python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from collections import OrderedDict
from contextlib import contextmanager
from metaseq.logging.progress_bar.base_progress_bar import (
BaseProgressBar,
logger,
format_stat,
)
from metaseq.utils import get_precise_epoch
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class JsonProgressBar(BaseProgressBar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if step > 0 and self.log_interval is not None and step % self.log_interval == 0:
update = get_precise_epoch(self.epoch, self.i, self.size)
stats = self._format_stats(stats, epoch=self.epoch, update=update)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag is not None:
self.stats = OrderedDict(
[(tag + "_" + k, v) for k, v in self.stats.items()]
)
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix["epoch"] = epoch
if update is not None:
postfix["update"] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
|
flash_metaseq-main
|
metaseq/logging/progress_bar/json_progress_bar.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from numbers import Number
from metaseq.logging.meters import AverageMeter
from metaseq.logging.progress_bar.base_progress_bar import (
BaseProgressBar,
logger,
)
from metaseq.utils import get_precise_epoch
wandb = None
try:
import wandb
except ImportError:
pass
class WandBProgressBarWrapper(BaseProgressBar):
"""Log to Weights & Biases."""
def __init__(self, wrapped_bar, wandb_project, run_name=None):
super().__init__(
wrapped_bar, epoch=wrapped_bar.epoch, prefix=wrapped_bar.prefix
)
self.wrapped_bar = wrapped_bar
if wandb is None:
logger.warning("wandb not found, pip install wandb")
return
# reinit=False to ensure if wandb.init() is called multiple times
# within one process it still references the same run
wandb.init(project=wandb_project, reinit=False, name=run_name)
def __len__(self):
return len(self.wrapped_bar)
def __iter__(self):
self.size = len(self.wrapped_bar)
for i, obj in enumerate(self.wrapped_bar, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_wandb(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_wandb(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
"""Log latest configuration."""
if wandb is not None:
wandb.config.update(config, allow_val_change=True)
self.wrapped_bar.update_config(config)
def _log_to_wandb(self, stats, tag=None, step=None):
if wandb is None:
return
if step is None:
step = stats["num_updates"]
prefix = "" if tag is None else tag + "/"
epoch = get_precise_epoch(self.epoch, self.i, self.size)
wandb.log({prefix + "epoch": epoch}, step=step)
for key in stats.keys() - {"num_updates"}:
if isinstance(stats[key], AverageMeter):
wandb.log({prefix + key: stats[key].val}, step=step)
elif isinstance(stats[key], Number):
wandb.log({prefix + key: stats[key]}, step=step)
|
flash_metaseq-main
|
metaseq/logging/progress_bar/wandb_progress_bar.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from metaseq import metrics, utils
from metaseq.criterions import BaseCriterion, register_criterion
def nll_loss(lprobs, target, ignore_index=None, reduction="mean"):
"""Like torch.nn.functional.nll_loss but works for large inputs."""
if lprobs.numel() < 2e9:
return F.nll_loss(
lprobs, target, ignore_index=ignore_index, reduction=reduction
)
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
if reduction == "mean":
nll_loss = nll_loss.mean()
elif reduction == "sum":
nll_loss = nll_loss.sum()
elif reduction == "none":
pass
else:
raise NotImplementedError
return nll_loss
@register_criterion("cross_entropy")
class CrossEntropyCriterion(BaseCriterion):
def __init__(self, task):
super().__init__(task)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample["ntokens"]
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if "src_tokens" in sample["net_input"] and hasattr(self.task, "eod"):
logging_output["ndocseps"] = (sample["target"] == self.task.eod).sum()
if (
len(net_output) >= 2
and isinstance(net_output[1], dict)
and "inner_states" in net_output[1]
):
with torch.no_grad():
# yank out the inner states we wish to instrument
# see transformer.py TransformerDecoder.extract_features_scriptable
emb, *_, actv = net_output[1]["inner_states"]
assert isinstance(
emb, dict
), "Expecting the first inner state to be a dict of embedding representations"
emb["actv"] = actv # throw on final for code brevity
for key, value in emb.items():
if value is None:
# maybe future proofing relative positional embeddings
continue
value = emb[key]
logging_output[f"{key}_norm"] = value.norm(p=2, dim=-1).sum(
dtype=torch.float32
)
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss = nll_loss(
lprobs,
target,
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
return loss, loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
for type_ in ("actv", "pos", "tok", "emb"):
key = f"{type_}_norm"
if any(key in log for log in logging_outputs):
actv_norm = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(key, actv_norm / ntokens, round=3)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
flash_metaseq-main
|
metaseq/criterions/cross_entropy.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from typing import Any, Dict, List
from torch.nn.modules.loss import _Loss
from metaseq import metrics, utils
from metaseq.dataclass import MetaseqDataclass
from metaseq.dataclass.utils import gen_parser_from_dataclass
class BaseCriterion(_Loss):
def __init__(self, task):
super().__init__()
self.task = task
if hasattr(task, "target_dictionary"):
tgt_dict = task.target_dictionary
self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100
@classmethod
def add_args(cls, parser):
"""Add criterion-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@classmethod
def build_criterion(cls, cfg: MetaseqDataclass, task):
"""Construct a criterion from command-line args."""
# arguments in the __init__.
init_args = {}
for p in inspect.signature(cls).parameters.values():
if (
p.kind == p.POSITIONAL_ONLY
or p.kind == p.VAR_POSITIONAL
or p.kind == p.VAR_KEYWORD
):
# we haven't implemented inference for these argument types,
# but PRs welcome :)
raise NotImplementedError("{} not supported".format(p.kind))
assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY}
if p.name == "task":
init_args["task"] = task
elif p.name == "cfg":
init_args["cfg"] = cfg
elif hasattr(cfg, p.name):
init_args[p.name] = getattr(cfg, p.name)
elif p.default != p.empty:
pass # we'll use the default value
else:
raise NotImplementedError(
"Unable to infer Criterion arguments, please implement "
"{}.build_criterion".format(cls.__name__)
)
return cls(**init_args)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(
logging_outputs: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
raise NotImplementedError
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"Criterions should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for k, v in agg_logging_outputs.items():
if k in {"nsentences", "ntokens", "sample_size"}:
continue
metrics.log_scalar(k, v)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
|
flash_metaseq-main
|
metaseq/criterions/base_criterion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from metaseq import registry
from metaseq.criterions.base_criterion import BaseCriterion
from omegaconf import DictConfig
(
build_criterion_,
register_criterion,
CRITERION_REGISTRY,
CRITERION_DATACLASS_REGISTRY,
) = registry.setup_registry(
"--criterion", base_class=BaseCriterion, default="cross_entropy"
)
def build_criterion(cfg: DictConfig, task):
return build_criterion_(cfg, task)
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("metaseq.criterions." + file_name)
|
flash_metaseq-main
|
metaseq/criterions/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
setuptools.setup(
name="cop3d",
version="1.0.0",
author="Meta AI",
author_email="romansh@meta.com",
packages=setuptools.find_packages(),
license="LICENSE",
description="Common Pets in 3D tools",
long_description=open("README.md").read(),
install_requires=[
"co3d @ git+ssh://git@github.com/facebookresearch/co3d.git#egg=co3d-2.1.0"
],
)
|
cop3d-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from co3d.dataset.download_dataset_impl import build_arg_parser, download_dataset
REPO_ROOT = __file__.rsplit(os.sep, 2)[0]
DEFAULT_LINK_LIST_FILE = os.path.join(REPO_ROOT, "links", "links.json")
DEFAULT_SHA256S_FILE = os.path.join(REPO_ROOT, "links", "cop3d_sha256.json")
if __name__ == "__main__":
parser = build_arg_parser("COP3D", DEFAULT_LINK_LIST_FILE, DEFAULT_SHA256S_FILE)
args = parser.parse_args()
download_dataset(
str(args.link_list_file),
str(args.download_folder),
n_download_workers=int(args.n_download_workers),
n_extract_workers=int(args.n_extract_workers),
download_categories=args.download_categories,
checksum_check=bool(args.checksum_check),
single_sequence_subset=False,
clear_archives_after_unpacking=bool(args.clear_archives_after_unpacking),
sha256s_file=str(args.sha256_file),
skip_downloaded_archives=not bool(args.redownload_existing_archives),
)
|
cop3d-main
|
cop3d/download_dataset.py
|
import pathlib
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
sgd_n_epochs = 15
n_trials = 10
def plot_objective_difference():
"""Plot objective difference during training
"""
for model_name in ['kernel', 'lenet']:
for transform_name in ['rotation', 'crop', 'blur', 'rotation_crop_blur']:
losses = np.array([np.load(f'saved/all_losses_{model_name}_{transform_name}_{seed}.npy') for seed in range(n_trials)])
diff_og = losses[:, 1] - losses[:, 0]
diff_1st = losses[:, 2] - losses[:, 0]
diff_2nd_no_1st = losses[:, 3] - losses[:, 0]
diff_2nd = losses[:, 4] - losses[:, 0]
plt.clf()
plt.errorbar(range(sgd_n_epochs + 1), diff_og.mean(axis=0), diff_og.std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(sgd_n_epochs + 1), diff_1st.mean(axis=0), diff_1st.std(axis=0), fmt='o-', capsize=5, label='1st-order')
plt.errorbar(range(sgd_n_epochs + 1), diff_2nd_no_1st.mean(axis=0), diff_2nd_no_1st.std(axis=0), fmt='o-', capsize=5, label='2nd-order w/o 1st-order')
plt.errorbar(range(sgd_n_epochs + 1), diff_2nd.mean(axis=0), diff_2nd.std(axis=0), fmt='o-', capsize=5, label='2nd-order')
plt.xlabel('Epoch')
plt.ylabel('Difference in objective')
plt.legend()
plt.axhline(color='k')
plt.savefig(f'figs/objective_difference_{model_name}_{transform_name}.pdf', bbox_inches='tight')
def plot_agreement_kl():
"""Plot training/valid agreements and KL divergence
"""
for model_name in ['kernel', 'lenet']:
for transform_name in ['rotation', 'crop', 'blur', 'rotation_crop_blur']:
saved_arrays = [np.load(f'saved/train_valid_agreement_kl_{model_name}_{transform_name}_{seed}.npz')
for seed in range(n_trials)]
train_agreement = np.array([saved['train_agreement'] for saved in saved_arrays])
valid_agreement = np.array([saved['valid_agreement'] for saved in saved_arrays])
valid_kl = np.array([saved['valid_kl'] for saved in saved_arrays])
valid_acc = np.array([saved['valid_acc'] for saved in saved_arrays])
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 1].mean(axis=0), train_agreement[:, 1].std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 2].mean(axis=0), train_agreement[:, 2].std(axis=0), fmt='o-', capsize=5, label='1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 3].mean(axis=0), train_agreement[:, 3].std(axis=0), fmt='o-', capsize=5, label='2nd-order w/o 1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 4].mean(axis=0), train_agreement[:, 4].std(axis=0), fmt='o-', capsize=5, label='2nd-order')
plt.xlabel('Epoch')
plt.ylabel('Prediction agreement')
plt.legend()
# plt.axhline(color='k')
plt.savefig(f'figs/prediction_agreement_training_{model_name}_{transform_name}.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 1].mean(axis=0), valid_agreement[:, 1].std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 2].mean(axis=0), valid_agreement[:, 2].std(axis=0), fmt='o-', capsize=5, label='1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 3].mean(axis=0), valid_agreement[:, 3].std(axis=0), fmt='o-', capsize=5, label='2nd-order w/o 1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 4].mean(axis=0), valid_agreement[:, 4].std(axis=0), fmt='o-', capsize=5, label='2nd-order')
plt.xlabel('Epoch')
plt.ylabel('Prediction agreement')
plt.legend()
# plt.axhline(color='k')
plt.savefig(f'figs/prediction_agreement_valid_{model_name}_{transform_name}.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 1].mean(axis=0), valid_kl[:, 1].std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 2].mean(axis=0), valid_kl[:, 2].std(axis=0), fmt='o-', capsize=5, label='1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 3].mean(axis=0), valid_kl[:, 3].std(axis=0), fmt='o-', capsize=5, label='2nd-order w/o 1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 4].mean(axis=0), valid_kl[:, 4].std(axis=0), fmt='o-', capsize=5, label='2nd-order')
plt.xlabel('Epoch')
plt.ylabel('Prediction KL')
plt.legend()
plt.axhline(color='k')
plt.savefig(f'figs/kl_valid_{model_name}_{transform_name}.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 1].mean(axis=0), valid_acc[:, 1].std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 2].mean(axis=0), valid_acc[:, 2].std(axis=0), fmt='o-', capsize=5, label='1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 3].mean(axis=0), valid_acc[:, 3].std(axis=0), fmt='o-', capsize=5, label='2nd-order w/o 1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 4].mean(axis=0), valid_acc[:, 4].std(axis=0), fmt='o-', capsize=5, label='2nd-order')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 0].mean(axis=0), valid_acc[:, 0].std(axis=0), fmt='o-', capsize=5, label='Exact (augmented images)')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
# plt.axhline(color='k')
plt.savefig(f'figs/accuracy_valid_{model_name}_{transform_name}.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 1] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 1] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 2] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 2] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 3] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 3] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='2nd-order w/o 1st-order')
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 4] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 4] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='2nd-order')
plt.xlabel('Epoch')
plt.ylabel('Accuracy difference')
plt.legend()
plt.axhline(color='k')
plt.savefig(f'figs/accuracy_difference_valid_{model_name}_{transform_name}.pdf', bbox_inches='tight')
def plot_agreement_kl_avg_at_layers():
"""Plot generalization difference when doing feature averaging at different layers
"""
model_name = 'lenet'
transform_name = 'rotation'
saved_arrays = [np.load(f'saved/train_valid_agreement_kl_{model_name}_{transform_name}_{seed}.npz')
for seed in range(n_trials)]
train_agreement = np.array([saved['train_agreement'] for saved in saved_arrays])
valid_agreement = np.array([saved['valid_agreement'] for saved in saved_arrays])
valid_kl = np.array([saved['valid_kl'] for saved in saved_arrays])
valid_acc = np.array([saved['valid_acc'] for saved in saved_arrays])
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 1].mean(axis=0), train_agreement[:, 1].std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 4].mean(axis=0), train_agreement[:, 4].std(axis=0), fmt='o-', capsize=5, label='Averaged at 4th layer')
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 5].mean(axis=0), train_agreement[:, 5].std(axis=0), fmt='o-', capsize=5, label='Averaged at 3rd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 6].mean(axis=0), train_agreement[:, 6].std(axis=0), fmt='o-', capsize=5, label='Averaged at 2nd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 7].mean(axis=0), train_agreement[:, 7].std(axis=0), fmt='o-', capsize=5, label='Averaged at 1st layer')
plt.errorbar(range(1, sgd_n_epochs + 1), train_agreement[:, 8].mean(axis=0), train_agreement[:, 8].std(axis=0), fmt='o-', capsize=5, label='Averaged at 0th layer')
plt.xlabel('Epoch')
plt.ylabel('Prediction agreement')
plt.legend()
# plt.axhline(color='k')
plt.savefig(f'figs/prediction_agreement_training_{model_name}_{transform_name}_layers.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 1].mean(axis=0), valid_agreement[:, 1].std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 4].mean(axis=0), valid_agreement[:, 4].std(axis=0), fmt='o-', capsize=5, label='Averaged at 4th layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 5].mean(axis=0), valid_agreement[:, 5].std(axis=0), fmt='o-', capsize=5, label='Averaged at 3rd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 6].mean(axis=0), valid_agreement[:, 6].std(axis=0), fmt='o-', capsize=5, label='Averaged at 2nd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 7].mean(axis=0), valid_agreement[:, 7].std(axis=0), fmt='o-', capsize=5, label='Averaged at 1st layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_agreement[:, 8].mean(axis=0), valid_agreement[:, 8].std(axis=0), fmt='o-', capsize=5, label='Averaged at 0th layer')
plt.xlabel('Epoch')
plt.ylabel('Prediction agreement')
plt.legend()
# plt.axhline(color='k')
plt.savefig(f'figs/prediction_agreement_valid_{model_name}_{transform_name}_layers.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 1].mean(axis=0), valid_kl[:, 1].std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 4].mean(axis=0), valid_kl[:, 4].std(axis=0), fmt='o-', capsize=5, label='Averaged at 4th layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 5].mean(axis=0), valid_kl[:, 5].std(axis=0), fmt='o-', capsize=5, label='Averaged at 3rd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 6].mean(axis=0), valid_kl[:, 6].std(axis=0), fmt='o-', capsize=5, label='Averaged at 2nd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 7].mean(axis=0), valid_kl[:, 7].std(axis=0), fmt='o-', capsize=5, label='Averaged at 1st layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_kl[:, 8].mean(axis=0), valid_kl[:, 8].std(axis=0), fmt='o-', capsize=5, label='Averaged at 0th layer')
plt.xlabel('Epoch')
plt.ylabel('Prediction KL')
plt.legend()
plt.axhline(color='k')
plt.savefig(f'figs/kl_valid_{model_name}_{transform_name}_layers.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 1].mean(axis=0), valid_acc[:, 1].std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 4].mean(axis=0), valid_acc[:, 4].std(axis=0), fmt='o-', capsize=5, label='Averaged at 4th layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 5].mean(axis=0), valid_acc[:, 5].std(axis=0), fmt='o-', capsize=5, label='Averaged at 3rd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 6].mean(axis=0), valid_acc[:, 6].std(axis=0), fmt='o-', capsize=5, label='Averaged at 2nd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 7].mean(axis=0), valid_acc[:, 7].std(axis=0), fmt='o-', capsize=5, label='Averaged at 1st layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 8].mean(axis=0), valid_acc[:, 8].std(axis=0), fmt='o-', capsize=5, label='Averaged at 0th layer')
plt.errorbar(range(1, sgd_n_epochs + 1), valid_acc[:, 0].mean(axis=0), valid_acc[:, 0].std(axis=0), fmt='o-', capsize=5, label='Exact (augmented images)')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
# plt.axhline(color='k')
plt.savefig(f'figs/accuracy_valid_{model_name}_{transform_name}_layers.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 1] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 1] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='Original image')
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 4] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 4] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='Averaged at 4th layer')
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 5] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 5] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='Averaged at 3rd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 6] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 6] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='Averaged at 2nd layer')
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 7] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 7] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='Averaged at 1st layer')
plt.errorbar(range(1, sgd_n_epochs + 1), (valid_acc[:, 8] - valid_acc[:, 0]).mean(axis=0), (valid_acc[:, 8] - valid_acc[:, 0]).std(axis=0), fmt='o-', capsize=5, label='Averaged at 0th layer')
plt.xlabel('Epoch')
plt.ylabel('Accuracy difference')
plt.legend()
plt.axhline(color='k')
plt.savefig(f'figs/accuracy_difference_valid_{model_name}_{transform_name}_layers.pdf', bbox_inches='tight')
def plot_accuracy_vs_computation():
"""Plot computational savings when doing averaging at earlier layers of LeNet
"""
layers = ['conv1_maxpool1', 'conv2_maxpool2', 'fc1', 'fc2', 'fc3']
# flops = np.array([50 * 24 * 24 * 6 + 4 * 12 * 12 * 6, 24 * 8 * 8 * 16 + 4 * 4 * 4 * 16, 256 * 120 + 120, 120 * 84 + 84, 84 * 10])
# computation_time = np.array([193, 120, 42, 42, 31])
# offset = 3
# computation_time -= offset
computation_time = np.array([123, 94, 41, 40, 30]) # Measured with iPython's %timeit
ratio = computation_time / computation_time.sum()
n_transforms = 16
exact = n_transforms
avg = np.empty(6)
avg[5] = 1.0
avg[4] = (ratio[:4].sum() * n_transforms + ratio[4:].sum()) / exact
avg[3] = (ratio[:3].sum() * n_transforms + ratio[3:].sum()) / exact
avg[2] = (ratio[:2].sum() * n_transforms + ratio[2:].sum()) / exact
avg[1] = (ratio[:1].sum() * n_transforms + ratio[1:].sum()) / exact
avg[0] = (ratio[:0].sum() * n_transforms + ratio[0:].sum()) / exact
model_name = 'lenet'
transform_name = 'rotation'
saved_arrays = [np.load(f'saved/train_valid_agreement_kl_{model_name}_{transform_name}_{seed}.npz')
for seed in range(n_trials)]
valid_acc = np.array([saved['valid_acc'] for saved in saved_arrays])
plt.clf()
plt.errorbar(avg, valid_acc[:, [8, 7, 6, 5, 4, 0], -1].mean(axis=0), valid_acc[:, [8, 7, 6, 5, 4, 0], -1].std(axis=0), fmt='o-', capsize=5)
plt.ylabel('Accuracy')
plt.xlabel('Computation fraction')
plt.savefig(f'figs/accuracy_vs_computation_{model_name}_{transform_name}.pdf', bbox_inches='tight')
# Plot relative accuracy gain
l, u = valid_acc[:, 8, -1].mean(axis=0), valid_acc[:, 0, -1].mean(axis=0)
plt.figure()
plt.errorbar(avg, (valid_acc[:, [8, 7, 6, 5, 4, 0], -1].mean(axis=0) - l) / (u - l), valid_acc[:, [8, 7, 6, 5, 4, 0], -1].std(axis=0) / (u - l), fmt='o-', capsize=10, markersize=10, linewidth=2)
plt.ylabel('Relative accuracy gain', fontsize=16)
plt.xlabel('Computation fraction', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=12)
plt.savefig(f'figs/accuracy_vs_computation_relative_{model_name}_{transform_name}.pdf', bbox_inches='tight')
plt.close()
def plot_accuracy_vs_kernel_alignment():
"""Scatter plot of accuracy vs kernel target alignment
"""
valid_acc = []
for model_name in ['kernel', 'lenet']:
valid_acc_per_model = []
# Accuracy on no transform
saved_arrays = [np.load(f'saved/train_valid_agreement_kl_{model_name}_blur_{seed}.npz')
for seed in range(n_trials)]
valid_acc_per_model.append(np.array([saved['valid_acc'] for saved in saved_arrays])[:, 1, -1])
for transform_name in ['rotation', 'crop', 'blur', 'rotation_crop_blur', 'hflip', 'hflip_vflip', 'brightness', 'contrast']:
saved_arrays = [np.load(f'saved/train_valid_acc_{model_name}_{transform_name}_{seed}.npz')
for seed in range(n_trials)]
valid_acc_per_model.append(np.array([saved['valid_acc'] for saved in saved_arrays])[:, -1])
# print(valid_acc.mean(axis=0)[-1], valid_acc.std(axis=0)[-1])
valid_acc.append(valid_acc_per_model)
valid_acc = np.array(valid_acc)
kernel_alignment = np.load('saved/kernel_alignment.npy')
plt.clf()
plt.errorbar(kernel_alignment, valid_acc[0].mean(axis=-1), valid_acc[0].std(axis=-1), fmt='o', capsize=5)
plt.axhline(valid_acc[0, 0].mean(axis=-1), color='k')
plt.axvline(kernel_alignment[0], color='k')
plt.errorbar(kernel_alignment[0], valid_acc[0, 0].mean(axis=-1), valid_acc[0, 0].std(axis=-1), fmt='o', capsize=5)
plt.ylabel('Accuracy')
plt.xlabel('Kernel target alignment')
plt.savefig(f'figs/accuracy_vs_alignment_kernel.pdf', bbox_inches='tight')
plt.clf()
plt.errorbar(kernel_alignment, valid_acc[1].mean(axis=-1), valid_acc[1].std(axis=-1), fmt='o', capsize=5)
plt.axhline(valid_acc[1, 0].mean(axis=-1), color='k')
plt.axvline(kernel_alignment[0], color='k')
plt.errorbar(kernel_alignment[0], valid_acc[1, 0].mean(axis=-1), valid_acc[1, 0].std(axis=-1), fmt='o', capsize=5)
plt.ylabel('Accuracy')
plt.xlabel('Kernel target alignment')
plt.savefig(f'figs/accuracy_vs_alignment_lenet.pdf', bbox_inches='tight')
plt.clf()
sns.set_style('white')
plt.figure(figsize=(10, 5))
ax = plt.subplot(1, 2, 1)
ax.errorbar(kernel_alignment[0], valid_acc[0, 0].mean(axis=-1), valid_acc[0, 0].std(axis=-1), fmt='x', color='r', capsize=5)
ax.errorbar(kernel_alignment[1], valid_acc[0, 1].mean(axis=-1), valid_acc[0, 1].std(axis=-1), fmt='s', color='b', capsize=5)
ax.errorbar(kernel_alignment[2], valid_acc[0, 2].mean(axis=-1), valid_acc[0, 2].std(axis=-1), fmt='s', color='g', capsize=5)
ax.errorbar(kernel_alignment[3], valid_acc[0, 3].mean(axis=-1), valid_acc[0, 3].std(axis=-1), fmt='o', color='b', capsize=5)
ax.errorbar(kernel_alignment[4], valid_acc[0, 4].mean(axis=-1), valid_acc[0, 4].std(axis=-1), fmt='s', color='tab:orange', capsize=5)
ax.errorbar(kernel_alignment[5], valid_acc[0, 5].mean(axis=-1), valid_acc[0, 5].std(axis=-1), fmt='D', color='g', capsize=5)
ax.errorbar(kernel_alignment[6], valid_acc[0, 6].mean(axis=-1), valid_acc[0, 6].std(axis=-1), fmt='o', color='g', capsize=5)
ax.errorbar(kernel_alignment[7], valid_acc[0, 7].mean(axis=-1), valid_acc[0, 7].std(axis=-1), fmt='D', color='b', capsize=5)
ax.errorbar(kernel_alignment[8], valid_acc[0, 8].mean(axis=-1), valid_acc[0, 8].std(axis=-1), fmt='D', color='m', capsize=5)
ax.axhline(valid_acc[0, 0].mean(axis=-1), color='k')
ax.axvline(kernel_alignment[0], color='k')
ax.set_yticks([0.94, 0.96, 0.98])
ax.tick_params(axis='both', which='major', labelsize=12)
ax.set_ylabel('Accuracy', fontsize=16)
ax.set_title('RBF Kernel', fontsize=16)
ax = plt.subplot(1, 2, 2)
ax.errorbar(kernel_alignment[0], valid_acc[1, 0].mean(axis=-1), valid_acc[1, 0].std(axis=-1), fmt='x', color='r', capsize=5, label='original')
ax.errorbar(kernel_alignment[1], valid_acc[1, 1].mean(axis=-1), valid_acc[1, 1].std(axis=-1), fmt='s', color='b', capsize=5, label='rotation')
ax.errorbar(kernel_alignment[2], valid_acc[1, 2].mean(axis=-1), valid_acc[1, 2].std(axis=-1), fmt='s', color='g', capsize=5, label='crop')
ax.errorbar(kernel_alignment[3], valid_acc[1, 3].mean(axis=-1), valid_acc[1, 3].std(axis=-1), fmt='o', color='b', capsize=5, label='blur')
ax.errorbar(kernel_alignment[4], valid_acc[1, 4].mean(axis=-1), valid_acc[1, 4].std(axis=-1), fmt='s', color='tab:orange', capsize=5, label='rotation, crop, blur')
ax.errorbar(kernel_alignment[5], valid_acc[1, 5].mean(axis=-1), valid_acc[1, 5].std(axis=-1), fmt='D', color='g', capsize=5, label='h. flip')
ax.errorbar(kernel_alignment[6], valid_acc[1, 6].mean(axis=-1), valid_acc[1, 6].std(axis=-1), fmt='o', color='g', capsize=5, label='h. flip, v. flip')
ax.errorbar(kernel_alignment[7], valid_acc[1, 7].mean(axis=-1), valid_acc[1, 7].std(axis=-1), fmt='D', color='b', capsize=5, label='brightness')
ax.errorbar(kernel_alignment[8], valid_acc[1, 8].mean(axis=-1), valid_acc[1, 8].std(axis=-1), fmt='D', color='m', capsize=5, label='contrast')
ax.axhline(valid_acc[1, 0].mean(axis=-1), color='k')
ax.axvline(kernel_alignment[0], color='k')
ax.set_yticks([0.97, 0.98, 0.99])
ax.tick_params(axis='both', which='major', labelsize=12)
ax.set_ylabel('Accuracy', fontsize=16)
ax.set_title('LeNet', fontsize=16)
# sns.despine()
# labels = ['original', 'rotation', 'crop', 'blur', 'rot., crop, blur', 'h. flip', 'h. flip, v. flip', 'brightness', 'contrast']
# plt.legend(labels, loc='upper center', bbox_transform=plt.gcf().transFigure, bbox_to_anchor=(0,0,1,1), ncol=3, fontsize=14)
plt.legend(loc='upper center', bbox_transform=plt.gcf().transFigure, bbox_to_anchor=(0,0.07,1,1), ncol=3, fontsize=16, frameon=True, edgecolor='k')
plt.tight_layout()
plt.subplots_adjust(wspace=0.4, top=0.75, bottom=0.1)
plt.suptitle('Kernel target alignment', x=0.5, y=0.05, fontsize=16)
# ax.set_ylabel('Accuracy')
# plt.xlabel('Kernel target alignment')
plt.savefig(f'figs/accuracy_vs_alignment.pdf', bbox_inches='tight')
def main():
pathlib.Path('figs').mkdir(parents=True, exist_ok=True)
plot_objective_difference()
plot_agreement_kl()
plot_agreement_kl_avg_at_layers()
plot_accuracy_vs_computation()
plot_accuracy_vs_kernel_alignment()
if __name__ == '__main__':
main()
|
augmentation_code-master
|
plot.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
class MultinomialLogisticRegression(nn.Module):
"""Abstract class for multinomial logistic regression.
Subclasses need to implement @features and @output_from_features.
"""
def features(self, x):
raise NotImplementedError()
def output_from_features(self, feat):
raise NotImplementedError()
def forward(self, x):
return self.output_from_features(self.features(x))
@staticmethod
def loss(output, target, reduce=True):
return F.cross_entropy(output, target, reduce=reduce)
@staticmethod
def predict(output):
return output.detach().max(1)[1]
class MultinomialLogisticRegressionAug(MultinomialLogisticRegression):
"""Abstract class for multinomial logistic regression on augmented data.
Input has size B x T x ..., where B is batch size and T is the number of
transformations.
Original i-th data point placed first among the transformed versions, which
is input[i, 0].
Output has size B x T.
Works exactly like the non-augmented version with default options and normal
loader (where input is of size B x ...).
"""
def __init__(self, approx=True, feature_avg=True, regularization=False):
"""Parameters:
approx: whether to use approximation or train on augmented data points.
If False, ignore @feature_avg and @regularization.
feature_avg: whether to average features or just use the features of the original data point.
regularization: whether to add 2nd order term (variance regularization).
"""
self.approx = approx
self.feature_avg = feature_avg
self.regularization = regularization
self.regularization_2nd_order_general = self.regularization_2nd_order
def forward(self, x):
augmented = x.dim() > 4
if augmented:
n_transforms = x.size(1)
x = combine_transformed_dimension(x)
feat = self.features(x)
if self.approx and augmented:
if not feat.requires_grad:
feat.requires_grad = True
feat = split_transformed_dimension(feat, n_transforms)
if self.feature_avg:
self._avg_features = feat.mean(dim=1)
else:
self._avg_features = feat[:, 0]
if self.regularization: # Storing this every time consumes lots of memory
self._centered_features = feat - self._avg_features[:, None]
feat = self._avg_features
output = self.output_from_features(feat)
if not self.approx and augmented:
output = split_transformed_dimension(output, n_transforms)
return output
@staticmethod
def predict(output):
if output.dim() > 2:
# Average over transformed versions of the same data point
output = output.mean(dim=1)
return output.detach().max(1)[1]
@classmethod
def loss_original(cls, output, target, reduce=True):
"""Original cross entropy loss.
"""
return super().loss(output, target, reduce=reduce)
@classmethod
def loss_on_augmented_data(cls, output, target, reduce=True):
"""Loss averaged over augmented data points (no approximation).
"""
# For each data point, replicate the target then compute the cross
# entropy loss. Finally stack the result.
loss = torch.stack([
cls.loss_original(out, tar.repeat(out.size(0)))
for out, tar in zip(output, target)
])
return loss.mean() if reduce else loss
def regularization_2nd_order(self, output, reduce=True):
"""Compute regularization term from output instead of from loss.
Fast implementation by evaluating the Jacobian directly instead of relying on 2nd order differentiation.
"""
p = F.softmax(output, dim=-1)
# Using autograd.grad(output[:, i]) is slower since it creates new node in graph.
# ones = torch.ones_like(output[:, 0])
# W = torch.stack([autograd.grad(output[:, i], self._avg_features, grad_outputs=ones, create_graph=True)[0]
# for i in range(10)], dim=1)
eye = torch.eye(output.size(1), device=output.device)
eye = eye[None, :].expand(output.size(0), -1, -1)
W = torch.stack([autograd.grad(output, self._avg_features, grad_outputs=eye[:, i], create_graph=True)[0]
for i in range(10)], dim=1)
# t = (W[:, None] * self._centered_features[:, :, None]).view(W.size(0), self._centered_features.size(1), W.size(1), -1).sum(dim=-1)
t = (W.view(W.size(0), 1, W.size(1), -1) @ self._centered_features.view(*self._centered_features.shape[:2], -1, 1)).squeeze(-1)
term_1 = (t**2 * p[:, None]).sum(dim=-1).mean(dim=-1)
# term_1 = (t**2 @ p[:, :, None]).squeeze(2).mean(dim=-1)
term_2 = ((t * p[:, None]).sum(dim=-1)**2).mean(dim=-1)
# term_2 = ((t @ p[:, :, None]).squeeze(2)**2).mean(dim=-1)
reg = (term_1 - term_2) / 2
return reg.mean() if reduce else reg
def regularization_2nd_order_linear(self, output, reduce=True):
"""Variance regularization (2nd order) term when the model is linear.
Fastest implementations since it doesn't rely on pytorch's autograd.
Equal to E[(W phi - W psi)^T (diag(p) - p p^T) (W phi - W psi)] / 2,
where W is the weight matrix, phi is the feature, psi is the average
feature, and p is the softmax probability.
In this case @output is W phi + bias, but the bias will be subtracted away.
"""
p = F.softmax(output, dim=-1)
unreduced_output = self.output_from_features(self._centered_features + self._avg_features[:, None])
reduced_output = self.output_from_features(self._avg_features)
centered_output = unreduced_output - reduced_output[:, None]
term_1 = (centered_output**2 * p[:, None]).sum(dim=-1).mean(dim=-1)
term_2 = ((centered_output * p[:, None]).sum(dim=-1)**2).mean(dim=-1)
reg = (term_1 - term_2) / 2
return reg.mean() if reduce else reg
def regularization_2nd_order_slow(self, output, reduce=True):
"""Compute regularization term from output, but uses pytorch's 2nd order differentiation.
Slow implementation, only faster than @regularization_2nd_order_from_loss.
"""
p = F.softmax(output, dim=-1)
g, = autograd.grad(output, self._avg_features, grad_outputs=p, create_graph=True)
term_1 = []
for i in range(self._centered_features.size(1)):
gg, = autograd.grad(g, p, grad_outputs=self._centered_features[:, i], create_graph=True)
term_1.append((gg**2 * p).sum(dim=-1))
term_1 = torch.stack(term_1, dim=-1).mean(dim=-1)
term_2 = ((g[:, None] * self._centered_features).view(*self._centered_features.shape[:2], -1).sum(dim=-1)**2).mean(dim=-1)
reg = (term_1 - term_2) / 2
return reg.mean() if reduce else reg
def regularization_2nd_order_from_loss(self, loss, reduce=True):
"""Variance regularization (2nd order) term.
Computed from loss, using Pytorch's 2nd order differentiation.
This is much slower but more likely to be correct. Used to check other implementations.
"""
g, = autograd.grad(loss * self._avg_features.size(0), self._avg_features, create_graph=True)
reg = []
for i in range(self._centered_features.size(1)):
gg, = autograd.grad(g, self._avg_features, grad_outputs=self._centered_features[:, i], create_graph=True)
reg.append((gg * self._centered_features[:, i]).view(gg.size(0), -1).sum(dim=-1))
reg = torch.stack(reg, dim=-1).mean(dim=-1) / 2
return reg.mean() if reduce else reg
def loss(self, output, target, reduce=True):
"""Cross entropy loss, with optional variance regularization.
"""
if not self.approx: # No approximation, loss on all augmented data points
return self.loss_on_augmented_data(output, target, reduce=reduce)
loss = self.loss_original(output, target, reduce=reduce)
if self.regularization:
return loss + self.regularization_2nd_order(output, reduce=reduce)
else:
return loss
def all_losses(self, x, target, reduce=True):
"""All losses: true loss on augmented data, loss on original image, approximate
loss with feature averaging (1st order), approximate loss with
variance regularization and no feature averaging, and approximate
loss with feature averaging and variance regularization (2nd order).
Used to compare the effects of different approximations.
Parameters:
x: the input of size B (batch size) x T (no. of transforms) x ...
target: target of size B (batch size)
"""
approx, feature_avg = self.approx, self.feature_avg
self.approx, self.feature_avg, self.regularization = True, True, True
output = self(x)
features = self._centered_features + self._avg_features[:, None]
n_transforms = features.size(1)
unreduced_output = self.output_from_features(combine_transformed_dimension(features))
unreduced_output = split_transformed_dimension(unreduced_output, n_transforms)
true_loss = self.loss_on_augmented_data(unreduced_output, target, reduce=reduce)
reduced_output = output
loss_original = self.loss_original(unreduced_output[:, 0], target, reduce=reduce)
loss_1st_order = self.loss_original(reduced_output, target, reduce=reduce)
reg_2nd_order = self.regularization_2nd_order(output, reduce=reduce)
loss_2nd_order = loss_1st_order + reg_2nd_order
loss_2nd_no_1st = loss_original + reg_2nd_order
self.approx, self.feature_avg = approx, feature_avg
return true_loss, loss_original, loss_1st_order, loss_2nd_no_1st, loss_2nd_order
class LinearLogisticRegression(MultinomialLogisticRegression):
"""Simple linear logistic regression model.
"""
def __init__(self, n_features, n_classes):
"""Parameters:
n_features: number of input features.
n_classes: number of classes.
"""
super().__init__()
self.fc = nn.Linear(n_features, n_classes)
def features(self, x):
return x.view(x.size(0), x.size(1), -1) if x.dim() > 4 else x.view(x.size(0), -1)
def output_from_features(self, feat):
return self.fc(feat)
class RBFLogisticRegression(MultinomialLogisticRegression):
"""Logistic regression with RBF kernel approximation (random Fourier features).
Equivalent to neural network with 2 layers: first layer is random
projection with sine-cosine nonlinearity, and second trainable linear
layer.
"""
def __init__(self, n_features, n_classes, gamma=1.0, n_components=100):
"""Parameters:
n_features: number of input features.
n_classes: number of classes.
gamma: hyperparameter of the RBF kernel k(x, y) = exp(-gamma*||x-y||^2)
n_components: number of components used to approximate kernel, i.e.
number of hidden units.
"""
super().__init__()
n_components //= 2 # Need 2 slots each for sine and cosine
self.fc = nn.Linear(n_components * 2, n_classes)
self.gamma = nn.Parameter(torch.Tensor([gamma]), requires_grad=False)
self.random_directions = nn.Parameter(
torch.randn(n_features, n_components), requires_grad=False)
def features(self, x):
x = x.view(x.size(0), x.size(1), -1) if x.dim() > 4 else x.view(x.size(0), -1)
projected_x = torch.sqrt(2 * self.gamma) * (x @ self.random_directions)
# Don't normalize by sqrt(self.n_components), it makes the weights too small.
return torch.cat((torch.sin(projected_x), torch.cos(projected_x)), -1)
def output_from_features(self, feat):
return self.fc(feat)
class LeNet(MultinomialLogisticRegression):
"""LeNet for MNIST, with 2 convolution-max pooling layers and 2 fully connected
layers.
"""
def __init__(self, n_channels=1, size=28):
super().__init__()
self.conv1 = nn.Conv2d(n_channels, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * (size // 4 - 3) * (size // 4 - 3), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.layers = [self.layer_1, self.layer_2, self.layer_3, self.layer_4]
def layer_1(self, x):
feat = F.relu(self.conv1(x))
return F.max_pool2d(feat, 2)
def layer_2(self, x):
feat = F.relu(self.conv2(x))
return F.max_pool2d(feat, 2).view(feat.size(0), -1)
def layer_3(self, x):
return F.relu(self.fc1(x))
def layer_4(self, x):
return F.relu(self.fc2(x))
def features(self, x):
feat = x
for layer in self.layers:
feat = layer(feat)
return feat
def output_from_features(self, feat):
return self.fc3(feat)
class LinearLogisticRegressionAug(MultinomialLogisticRegressionAug,
LinearLogisticRegression):
"""Linear logistic regression model with augmented data.
Input has size B x T x ..., where B is batch size and T is the number of
transformations.
Original i-th data point placed first among the transformed versions, which
is input[i, 0].
Output has size B x T.
"""
def __init__(self,
n_features,
n_classes,
approx=True,
feature_avg=True,
regularization=False):
"""Parameters:
n_features: number of input features.
n_classes: number of classes.
approx: whether to use approximation or train on augmented data points.
If False, ignore @feature_avg and @regularization.
feature_avg: whether to average features or just use the features of the original data point.
regularization: whether to add 2nd order term (variance regularization).
"""
LinearLogisticRegression.__init__(self, n_features, n_classes)
MultinomialLogisticRegressionAug.__init__(self, approx, feature_avg,
regularization)
self.regularization_2nd_order = self.regularization_2nd_order_linear
class RBFLogisticRegressionAug(MultinomialLogisticRegressionAug,
RBFLogisticRegression):
"""Logistic regression model with RBF kernel and augmented data.
Input has size B x T x ..., where B is batch size and T is the number of
transformations.
Original i-th data point placed first among the transformed versions, which
is input[i, 0].
Output has size B x T.
"""
def __init__(self,
n_features,
n_classes,
gamma=1.0,
n_components=100,
approx=True,
feature_avg=True,
regularization=False):
RBFLogisticRegression.__init__(self, n_features, n_classes, gamma,
n_components)
MultinomialLogisticRegressionAug.__init__(self, approx, feature_avg,
regularization)
self.regularization_2nd_order = self.regularization_2nd_order_linear
class LeNetAug(MultinomialLogisticRegressionAug, LeNet):
"""LeNet for MNIST, with 2 convolution-max pooling layers and 2 fully connected
layers.
"""
def __init__(self, n_channels=1, size=28, approx=True, feature_avg=True, regularization=False, layer_to_avg=4):
LeNet.__init__(self, n_channels, size)
MultinomialLogisticRegressionAug.__init__(self, approx, feature_avg,
regularization)
error_msg = "[!] layer_to_avg should be in the range [0, ..., 4]."
assert (layer_to_avg in range(5)), error_msg
self.layer_to_avg = layer_to_avg
if layer_to_avg == 4: # Not a linear model unless averaging at 4th layer
self.regularization_2nd_order = self.regularization_2nd_order_linear
def features(self, x):
feat = x
for layer in self.layers[:self.layer_to_avg]:
feat = layer(feat)
return feat
def output_from_features(self, feat):
for layer in self.layers[self.layer_to_avg:]:
feat = layer(feat)
return self.fc3(feat)
def combine_transformed_dimension(input):
"""Combine the minibatch and the transformation dimensions.
Parameter:
input: Tensor of shape B x T x ..., where B is the batch size and T is
the number of transformations.
Return:
output: Same tensor, now of shape (B * T) x ....
"""
return input.view(-1, *input.shape[2:])
def split_transformed_dimension(input, n_transforms):
"""Split the minibatch and the transformation dimensions.
Parameter:
input: Tensor of shape (B * T) x ..., where B is the batch size and T is
the number of transformations.
Return:
output: Same tensor, now of shape B x T x ....
"""
return input.view(-1, n_transforms, *input.shape[1:])
|
augmentation_code-master
|
models.py
|
import copy
from collections import namedtuple
import numpy as np
import torch
from torchvision import transforms
from PIL import Image, ImageFilter, ImageEnhance
# An augmentation object consists of its name, the transform functions of type
# torchvision.transforms, and the resulting augmented dataset of type
# torch.utils.data.Dataset.
Augmentation = namedtuple('Augmentation', ['name', 'transforms', 'dataset'])
def copy_with_new_transform(dataset, transform):
"""A copy of @dataset with its transform set to @transform.
Will work for datasets from torchvision, e.g., MNIST, CIFAR10, etc. Probably
won't work for a generic dataset.
"""
new_dataset = copy.copy(dataset)
new_dataset.transform = transform
return new_dataset
def augment_transforms(augmentations, base_transform, add_id_transform=True):
"""Construct a new transform that stack all the augmentations.
Parameters:
augmentations: list of transforms (e.g. image rotations)
base_transform: transform to be applied after augmentation (e.g. ToTensor)
add_id_transform: whether to include the original image (i.e. identity transform) in the new transform.
Return:
a new transform that takes in a data point and applies all the
augmentations, then stack the result.
"""
if add_id_transform:
fn = lambda x: torch.stack([base_transform(x)] + [base_transform(aug(x))
for aug in augmentations])
else:
fn = lambda x: torch.stack([base_transform(aug(x)) for aug in augmentations])
return transforms.Lambda(fn)
def rotation(base_dataset, base_transform, angles=range(-15, 16, 2)):
"""Rotations, e.g. between -15 and 15 degrees
"""
rotations = [transforms.RandomRotation((angle, angle)) for angle in angles]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(rotations, base_transform))
return Augmentation('rotation', rotations, aug_dataset)
def resized_crop(base_dataset, base_transform, size=28, scale=(0.64, 1.0), n_random_samples=31):
"""Random crop (with resize)
"""
random_resized_crops = [transforms.RandomResizedCrop(size, scale=scale) for _ in range(n_random_samples)]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(random_resized_crops, base_transform))
return Augmentation('crop', random_resized_crops, aug_dataset)
def blur(base_dataset, base_transform, radii=np.linspace(0.05, 1.0, 20)):
"""Random Gaussian blur
"""
def gaussian_blur_fn(radius):
return transforms.Lambda(lambda img: img.filter(ImageFilter.GaussianBlur(radius)))
blurs = [gaussian_blur_fn(radius) for radius in radii]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(blurs, base_transform))
return Augmentation('blur', blurs, aug_dataset)
def rotation_crop_blur(base_dataset, base_transform, angles=range(-15, 16, 2),
size=28, scale=(0.64, 1.0), n_random_samples=31,
radii=np.linspace(0.05, 1.0, 20)):
"""All 3: rotations, random crops, and blurs
"""
rotations = rotation(base_dataset, base_transform, angles).transforms
random_resized_crops = resized_crop(base_dataset, base_transform, size, scale, n_random_samples).transforms
blurs = blur(base_dataset, base_transform, radii).transforms
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(rotations + random_resized_crops + blurs, base_transform))
return Augmentation('rotation_crop_blur', blurs, aug_dataset)
def hflip(base_dataset, base_transform):
"""Horizontal flip
"""
flip = [transforms.Lambda(lambda img: img.transpose(Image.FLIP_LEFT_RIGHT))]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(flip, base_transform))
return Augmentation('hflip', flip, aug_dataset)
def hflip_vflip(base_dataset, base_transform):
"""Both horizontal and vertical flips
"""
allflips = [transforms.Lambda(lambda img: img.transpose(Image.FLIP_LEFT_RIGHT)),
transforms.Lambda(lambda img: img.transpose(Image.FLIP_TOP_BOTTOM)),
transforms.Lambda(lambda img: img.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.FLIP_TOP_BOTTOM))]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(allflips, base_transform))
return Augmentation('hflip_vflip', allflips, aug_dataset)
def brightness(base_dataset, base_transform, brightness_factors=np.linspace(1 - 0.25, 1 + 0.25, 11)):
"""Random brightness adjustment
"""
def brightness_fn(brightness_factor):
return transforms.Lambda(lambda img: ImageEnhance.Brightness(img).enhance(brightness_factor))
brightness_transforms = [brightness_fn(factor) for factor in brightness_factors]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(brightness_transforms, base_transform))
return Augmentation('brightness', brightness_transforms, aug_dataset)
def contrast(base_dataset, base_transform, contrast_factors=np.linspace(1 - 0.35, 1 + 0.35, 11)):
"""Random contrast adjustment
"""
def contrast_fn(contrast_factor):
return transforms.Lambda(lambda img: ImageEnhance.Contrast(img).enhance(contrast_factor))
contrast_transforms = [contrast_fn(factor) for factor in contrast_factors]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(contrast_transforms, base_transform))
return Augmentation('contrast', contrast_transforms, aug_dataset)
|
augmentation_code-master
|
augmentation.py
|
import copy
import numpy as np
import pathlib
import torch
import torch.optim as optim
import torch.nn.functional as F
from torchvision import datasets, transforms
from models import LinearLogisticRegression, RBFLogisticRegression, LinearLogisticRegressionAug, RBFLogisticRegressionAug, LeNet, LeNetAug, combine_transformed_dimension, split_transformed_dimension
from augmentation import copy_with_new_transform, augment_transforms, rotation, resized_crop, blur, rotation_crop_blur, hflip, hflip_vflip, brightness, contrast
from utils import get_train_valid_datasets, train, train_all_epochs, accuracy, all_losses, train_models_compute_agreement, agreement_kl_accuracy, kernel_target_alignment, kernel_target_alignment_augmented
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
batch_size = 256
if device.type == 'cuda':
loader_args = {'num_workers': 16, 'pin_memory': True}
else:
loader_args = {'num_workers': 4, 'pin_memory': False}
def loader_from_dataset(dataset):
return torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, **loader_args)
# Construct loader from MNIST dataset, then construct loaders corresponding to
# augmented dataset (wrt to different transformations).
mnist_normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])
mnist_train = datasets.MNIST(
'../data', train=True, download=True, transform=mnist_normalize)
mnist_test = datasets.MNIST(
'../data', train=False, download=True, transform=mnist_normalize)
mnist_train, mnist_valid = get_train_valid_datasets(mnist_train)
train_loader = loader_from_dataset(mnist_train)
valid_loader = loader_from_dataset(mnist_valid)
test_loader = loader_from_dataset(mnist_test)
augmentations = [rotation(mnist_train, mnist_normalize),
resized_crop(mnist_train, mnist_normalize),
blur(mnist_train, mnist_normalize),
rotation_crop_blur(mnist_train, mnist_normalize),
hflip(mnist_train, mnist_normalize),
hflip_vflip(mnist_train, mnist_normalize),
brightness(mnist_train, mnist_normalize),
contrast(mnist_train, mnist_normalize)]
n_features = 28 * 28
n_classes = 10
gamma = 0.003 # gamma hyperparam for RBF kernel exp(-gamma ||x - y||^2). Best gamma is around 0.001--0.003
n_components = 10000
sgd_n_epochs = 15
n_trials = 10
model_factories = {'linear': lambda: LinearLogisticRegressionAug(n_features, n_classes, approx=False),
'kernel': lambda: RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, approx=False),
'lenet': lambda: LeNetAug(approx=False)}
def sgd_opt_from_model(model, learning_rate=0.01, momentum=0.9, weight_decay=0.001):
return optim.SGD((p for p in model.parameters() if p.requires_grad),
lr=learning_rate, momentum=momentum,
weight_decay=weight_decay)
def train_basic_models(train_loader, augmented_loader):
"""Train a few simple models with data augmentation / approximation, as a
sanity check.
"""
models = [
LinearLogisticRegressionAug(n_features, n_classes), # No augmentation, accuracy around 92.5%
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components), # Accuracy around 97.5%
LeNetAug(), # Accuracy around 98.7%
LinearLogisticRegressionAug(n_features, n_classes, approx=False), # Augmented data, exact objective
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, approx=False),
LeNetAug(approx=False),
LinearLogisticRegressionAug(n_features, n_classes, regularization=False), # Augmented data, 1st order approx
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, regularization=False),
LeNetAug(),
LinearLogisticRegressionAug(n_features, n_classes, regularization=True), # Augmented data, 2nd order approx
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, regularization=True),
LeNetAug(regularization=True)
]
loaders = [train_loader, train_loader, train_loader,
augmented_loader, augmented_loader, augmented_loader,
augmented_loader, augmented_loader, augmented_loader,
augmented_loader, augmented_loader, augmented_loader]
for model, loader in zip(models, loaders):
model.to(device)
optimizer = sgd_opt_from_model(model)
train_loss, train_acc, valid_acc = train_all_epochs(loader, valid_loader, model,
optimizer, sgd_n_epochs)
correct, total = accuracy(test_loader, model)
print(f'Test set: Accuracy: {correct}/{total} ({correct/total*100:.4f}%)\n')
def objective_difference(augmentations):
"""Measure the difference in approximate and true objectives as we train on
the true objective.
"""
for model_name in ['kernel', 'lenet']:
for augmentation in augmentations:
for seed in range(n_trials):
print(f'Seed: {seed}')
torch.manual_seed(seed)
model = model_factories[model_name]().to(device)
optimizer = sgd_opt_from_model(model)
loader = loader_from_dataset(augmentation.dataset)
model.train()
losses = []
losses.append(all_losses(loader, model).mean(axis=0))
train_loss, train_acc, valid_acc = [], [], []
for epoch in range(sgd_n_epochs):
train_loss_epoch, train_acc_epoch = train(loader, model, optimizer)
train_loss += train_loss_epoch
train_acc += train_acc_epoch
print(f'Train Epoch: {epoch}')
correct, total = accuracy(valid_loader, model)
valid_acc.append(correct / total)
print(
f'Validation set: Accuracy: {correct}/{total} ({correct/total*100:.4f}%)'
)
losses.append(np.array(all_losses(loader, model)).mean(axis=0))
train_loss, train_acc, valid_acc = np.array(train_loss), np.array(train_acc), np.array(valid_acc)
np.savez(f'saved/train_valid_acc_{model_name}_{augmentation.name}_{seed}.npz',
train_loss=train_loss, train_acc=train_acc, valid_acc=valid_acc)
losses = np.array(losses).T
np.save(f'saved/all_losses_{model_name}_{augmentation.name}_{seed}.npy', losses)
def accuracy_on_true_objective(augmentations):
"""Measure the accuracy when trained on true augmented objective.
"""
for model_name in ['kernel', 'lenet']:
for augmentation in augmentations:
for seed in range(n_trials):
print(f'Seed: {seed}')
torch.manual_seed(seed)
model = model_factories[model_name]().to(device)
optimizer = sgd_opt_from_model(model)
loader = loader_from_dataset(augmentation.dataset)
train_loss, train_acc, valid_acc = train_all_epochs(loader, valid_loader, model, optimizer, sgd_n_epochs)
train_loss, train_acc, valid_acc = np.array(train_loss), np.array(train_acc), np.array(valid_acc)
np.savez(f'saved/train_valid_acc_{model_name}_{augmentation.name}_{seed}.npz',
train_loss=train_loss, train_acc=train_acc, valid_acc=valid_acc)
def exact_to_og_model(model):
"""Convert model training on exact augmented objective to model training on
original data.
"""
model_og = copy.deepcopy(model)
model_og.approx = True
model_og.feature_avg = False
model_og.regularization = False
return model_og
def exact_to_1st_order_model(model):
"""Convert model training on exact augmented objective to model training on
1st order approximation.
"""
model_1st = copy.deepcopy(model)
model_1st.approx = True
model_1st.feature_avg = True
model_1st.regularization = False
return model_1st
def exact_to_2nd_order_no_1st_model(model):
"""Convert model training on exact augmented objective to model training on
2nd order approximation without feature averaging (1st order approx).
"""
model_2nd_no_1st = copy.deepcopy(model)
model_2nd_no_1st.approx = True
model_2nd_no_1st.feature_avg = False
model_2nd_no_1st.regularization = True
return model_2nd_no_1st
def exact_to_2nd_order_model(model):
"""Convert model training on exact augmented objective to model training on
2nd order approximation.
"""
model_2nd = copy.deepcopy(model)
model_2nd.approx = True
model_2nd.feature_avg = True
model_2nd.regularization = True
return model_2nd
def exact_to_2nd_order_model_layer_avg(model, layer_to_avg=3):
"""Convert LeNet model training on exact augmented objective to model
training on 2nd order approximation, but approximation is done at different
layers.
"""
model_2nd = copy.deepcopy(model)
model_2nd.approx = True
model_2nd.feature_avg = True
model_2nd.regularization = True
model_2nd.layer_to_avg = layer_to_avg
# Can't use the regularization function specialized to linear model unless
# averaging at layer 4.
if layer_to_avg != 4:
model.regularization_2nd_order = model.regularization_2nd_order_general
return model_2nd
def agreement_kl_difference(augmentations):
"""Measure the agreement and KL divergence between the predictions produced
by model trained on exact augmentation objectives vs models trained on
approximate objectives.
"""
model_variants = {'kernel': lambda model: [model, exact_to_og_model(model), exact_to_1st_order_model(model),
exact_to_2nd_order_no_1st_model(model), exact_to_2nd_order_model(model)],
'lenet': lambda model: [model, exact_to_og_model(model), exact_to_1st_order_model(model),
exact_to_2nd_order_no_1st_model(model)] +
[exact_to_2nd_order_model_layer_avg(model, layer_to_avg) for layer_to_avg in [4, 3, 2, 1, 0]]}
for model_name in ['kernel', 'lenet']:
for augmentation in augmentations:
for seed in range(n_trials):
print(f'Seed: {seed}')
torch.manual_seed(n_trials + seed)
loader = loader_from_dataset(augmentation.dataset)
model = model_factories[model_name]()
models = model_variants[model_name](model)
for model in models:
model.to(device)
optimizers = [sgd_opt_from_model(model) for model in models]
for model in models:
model.train()
train_agreement, valid_agreement, valid_acc, valid_kl = [], [], [], []
for epoch in range(sgd_n_epochs):
print(f'Train Epoch: {epoch}')
train_agreement_epoch = train_models_compute_agreement(loader, models, optimizers)
train_agreement.append(np.array(train_agreement_epoch).mean(axis=0))
# Agreement and KL on validation set
valid_agreement_epoch, valid_kl_epoch, valid_acc_epoch = agreement_kl_accuracy(valid_loader, models)
valid_agreement.append(np.array(valid_agreement_epoch).mean(axis=0))
valid_acc.append(np.array(valid_acc_epoch).mean(axis=0))
valid_kl.append(np.array(valid_kl_epoch).mean(axis=0))
train_agreement = np.array(train_agreement).T
valid_agreement = np.array(valid_agreement).T
valid_acc = np.array(valid_acc).T
valid_kl = np.array(valid_kl).T
np.savez(f'saved/train_valid_agreement_kl_{model_name}_{augmentation.name}_{seed}.npz',
train_agreement=train_agreement, valid_agreement=valid_agreement, valid_acc=valid_acc, valid_kl=valid_kl)
def find_gamma_by_alignment(train_loader, gamma_vals=(0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001)):
"""Example use of kernel target alignment: to pick the hyperparameter gamma
of the RBF kernel exp(-gamma ||x-y||^2) by computing the kernel target
alignment of the random features wrt different values of gamma.
The value of gamma giving the highest alignment is likely the best gamma.
"""
for gamma in gamma_vals:
model = RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, approx=False).to(device)
print(kernel_target_alignment(train_loader, model))
# Best gamma is 0.003
def alignment_comparison(augmentations):
"""Compute the kernel target alignment of different augmentations.
"""
alignment = []
model_name = 'kernel'
for augmentation in augmentations:
print(augmentation.name)
loader = loader_from_dataset(augmentation.dataset)
model = model_factories[model_name]().to(device)
alignment.append(kernel_target_alignment_augmented(loader, model, n_passes_through_data=10))
alignment = np.array(alignment)
alignment_no_transform = alignment[:, 1].mean()
np.save('saved/kernel_alignment.npy', np.array([alignment_no_transform] + list(alignment[:, 0])))
def alignment_lenet(augmentations):
"""Compute the kernel target alignment on LeNet. Since the feature map is
initialized to be random and then trained, unlike kernels where feature map
is fixed, kernel target alignment doesn't predict the accuracy at all.
"""
for augmentation in augmentations:
print(augmentation.name)
model_base = LeNet().to(device)
optimizer = sgd_opt_from_model(model_base)
# Train LeNet for 1 epoch first
_ = train_all_epochs(train_loader, valid_loader, model_base, optimizer, 1)
model = LeNetAug().to(device)
model.load_state_dict(model_base.state_dict())
loader = loader_from_dataset(augmentation.dataset)
print(kernel_target_alignment_augmented(loader, model))
def measure_computation_fraction_lenet(train_loader):
"""Measure percentage of computation time spent in each layer of LeNet.
"""
model = LeNet().to(device)
loader = train_loader
it = iter(loader)
data, target = next(it)
data, target = data.to(device), target.to(device)
# We use iPython's %timeit. Uncomment and copy these to iPython.
# %timeit feat1 = model.layer_1(data)
# feat1 = model.layer_1(data)
# %timeit feat2 = model.layer_2(feat1)
# feat2 = model.layer_2(feat1)
# %timeit feat3 = model.layer_3(feat2)
# feat3 = model.layer_3(feat2)
# %timeit feat4 = model.layer_4(feat3)
# feat4 = model.layer_4(feat3)
# %timeit output = model.output_from_features(feat4)
# %timeit output = model(data)
def memory_profile():
# Print out the resident Tensors
import gc
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size(), obj.type())
except:
pass
def main():
pathlib.Path('saved').mkdir(parents=True, exist_ok=True)
# train_basic_models(train_loader, loader_from_dataset(augmentations[0].dataset))
objective_difference(augmentations[:4])
accuracy_on_true_objective(augmentations[4:])
agreement_kl_difference(augmentations[:4])
# find_gamma_by_alignment(train_loader)
alignment_comparison(augmentations)
# alignment_lenet(augmentations)
if __name__ == '__main__':
main()
|
augmentation_code-master
|
mnist_experiments.py
|
import copy
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from models import combine_transformed_dimension, split_transformed_dimension
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def get_train_valid_datasets(dataset,
valid_size=0.1,
random_seed=None,
shuffle=True):
"""
Utility function for loading and returning train and validation
datasets.
Parameters:
------
- dataset: the dataset, need to have train_data and train_labels attributes.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- random_seed: fix seed for reproducibility.
- shuffle: whether to shuffle the train/validation indices.
Returns:
-------
- train_dataset: training set.
- valid_dataset: validation set.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
num_train = len(dataset)
indices = list(range(num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_dataset, valid_dataset = copy.copy(dataset), copy.copy(dataset)
train_dataset.train_data = train_dataset.train_data[train_idx]
train_dataset.train_labels = train_dataset.train_labels[train_idx]
valid_dataset.train_data = valid_dataset.train_data[valid_idx]
valid_dataset.train_labels = valid_dataset.train_labels[valid_idx]
return train_dataset, valid_dataset
def train(data_loader, model, optimizer):
model.train()
train_loss, train_acc = [], []
for data, target in data_loader:
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
pred = model.predict(output)
loss = model.loss(output, target)
loss.backward()
optimizer.step()
acc = (pred == target).sum().item() / target.size(0)
train_loss.append(loss.item())
train_acc.append(acc)
return train_loss, train_acc
def train_models_compute_agreement(data_loader, models, optimizers):
train_agreement = []
for model in models:
model.train()
for data, target in data_loader:
data, target = data.to(device), target.to(device)
pred, loss = [], []
for model, optimizer in zip(models, optimizers):
optimizer.zero_grad()
output = model(data)
pred.append(model.predict(output))
loss_minibatch = model.loss(output, target)
loss_minibatch.backward()
optimizer.step()
loss.append(loss_minibatch.item())
# To avoid out-of-memory error, as these attributes prevent the memory from being freed
if hasattr(model, '_avg_features'):
del model._avg_features
if hasattr(model, '_centered_features'):
del model._centered_features
loss = np.array(loss)
pred = np.array([p.cpu().numpy() for p in pred])
train_agreement.append((pred == pred[0]).mean(axis=1))
return train_agreement
def train_all_epochs(train_loader,
valid_loader,
model,
optimizer,
n_epochs,
verbose=True):
model.train()
train_loss, train_acc, valid_acc = [], [], []
for epoch in range(n_epochs):
if verbose:
print(f'Train Epoch: {epoch}')
loss, acc = train(train_loader, model, optimizer)
train_loss += loss
train_acc += acc
correct, total = accuracy(valid_loader, model)
valid_acc.append(correct / total)
if verbose:
print(
f'Validation set: Accuracy: {correct}/{total} ({correct/total*100:.4f}%)'
)
return train_loss, train_acc, valid_acc
def accuracy(data_loader, model):
"""Accuracy over all mini-batches.
"""
training = model.training
model.eval()
correct, total = 0, 0
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = model.predict(output)
correct += (pred == target).sum().item()
total += target.size(0)
model.train(training)
return correct, total
def roc_auc(data_loader, model):
"""Accuracy over all mini-batches.
"""
training = model.training
model.eval()
y_true, y_score = [], []
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
output = model(data)
y_true.append(target)
y_score.append(torch.nn.Softmax(dim=-1)(output)[:, 1])
model.train(training)
y_true = torch.cat(y_true).cpu().numpy()
y_score = torch.cat(y_score).cpu().numpy()
return roc_auc_score(y_true, y_score)
def all_losses(data_loader, model):
"""All losses over all mini-batches.
"""
training = model.training
model.eval()
losses = []
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
losses.append([l.item() for l in model.all_losses(data, target)])
model.train(training)
return np.array(losses)
def agreement_kl_accuracy(data_loader, models):
training = [model.training for model in models]
for model in models:
model.eval()
valid_agreement, valid_acc, valid_kl = [], [], []
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
pred, out = [], []
for model in models:
output = model(data).detach()
out.append(output)
pred.append(model.predict(output))
pred = torch.stack(pred)
out = torch.stack(out)
log_prob = F.log_softmax(out, dim=-1)
prob = F.softmax(out[0], dim=-1)
valid_kl.append([F.kl_div(lp, prob, size_average=False).item() / prob.size(0) for lp in log_prob])
valid_acc.append((pred == target).float().mean(dim=1).cpu().numpy())
valid_agreement.append((pred == pred[0]).float().mean(dim=1).cpu().numpy())
for model, training_ in zip(models, training):
model.train(training_)
return valid_agreement, valid_kl, valid_acc
def kernel_target_alignment(data_loader, model, n_passes_through_data=10):
"""Compute kernel target alignment approximately by summing over
mini-batches. The number of mini-batches is controlled by @n_passes_through_data.
Larger number of passes yields more accurate result, but takes longer.
"""
inclass_kernel, kernel_fro_norm, inclass_num = [], [], []
with torch.no_grad():
for _ in range(n_passes_through_data):
for data, target in data_loader:
data, target = data.to(device), target.to(device)
features = model.features(data)
target = target[:, None]
same_labels = target == target.t()
K = features @ features.t()
inclass_kernel.append(K[same_labels].sum().item())
kernel_fro_norm.append((K * K).sum().item())
inclass_num.append(same_labels.long().sum().item())
inclass_kernel = np.array(inclass_kernel)
kernel_fro_norm = np.array(kernel_fro_norm)
inclass_num = np.array(inclass_num)
return inclass_kernel.mean(axis=0) / np.sqrt(kernel_fro_norm.mean(axis=0) * inclass_num.mean())
def kernel_target_alignment_augmented(data_loader, model, n_passes_through_data=10):
"""Compute kernel target alignment on augmented dataset, of the original
features and averaged features. Alignment is approximately by summing over
minibatches. The number of minibatches is controlled by
@n_passes_through_data. Larger number of passes yields more accurate
result.
"""
inclass_kernel, kernel_fro_norm, inclass_num = [], [], []
with torch.no_grad():
for _ in range(n_passes_through_data):
for data, target in data_loader:
data, target = data.to(device), target.to(device)
n_transforms = data.size(1)
data = combine_transformed_dimension(data)
features = model.features(data)
features = split_transformed_dimension(features, n_transforms)
features_avg = features.mean(dim=1)
features_og = features[:, 0]
target = target[:, None]
same_labels = target == target.t()
K_avg = features_avg @ features_avg.t()
K_og = features_og @ features_og.t()
inclass_kernel.append([K_avg[same_labels].sum().item(), K_og[same_labels].sum().item()])
kernel_fro_norm.append([(K_avg * K_avg).sum().item(), (K_og * K_og).sum().item()])
inclass_num.append(same_labels.long().sum().item())
inclass_kernel = np.array(inclass_kernel)
kernel_fro_norm = np.array(kernel_fro_norm)
inclass_num = np.array(inclass_num)
return tuple(inclass_kernel.mean(axis=0) / np.sqrt(kernel_fro_norm.mean(axis=0) * inclass_num.mean()))
def kernel_target_alignment_augmented_no_avg(data_loader, model, n_passes_through_data=10):
"""Compute kernel target alignment approximately by summing over
mini-batches. This is for augmented dataset, and no feature averaging will be done.
Thus this is kernel target alignment on the augmented dataset.
The number of mini-batches is controlled by @n_passes_through_data.
Larger number of passes yields more accurate result, but takes longer.
"""
inclass_kernel, kernel_fro_norm, inclass_num = [], [], []
with torch.no_grad():
for _ in range(n_passes_through_data):
for data, target in data_loader:
data, target = data.to(device), target.to(device)
# Repeat target for augmented data points
target = target[:, None].repeat(1, data.shape[1]).view(-1)
data = combine_transformed_dimension(data)
features = model.features(data)
target = target[:, None]
same_labels = target == target.t()
K = features @ features.t()
inclass_kernel.append(K[same_labels].sum().item())
kernel_fro_norm.append((K * K).sum().item())
inclass_num.append(same_labels.long().sum().item())
inclass_kernel = np.array(inclass_kernel)
kernel_fro_norm = np.array(kernel_fro_norm)
inclass_num = np.array(inclass_num)
return inclass_kernel.mean(axis=0) / np.sqrt(kernel_fro_norm.mean(axis=0) * inclass_num.mean())
|
augmentation_code-master
|
utils.py
|
import copy
import numpy as np
import pathlib
import torch
import torch.optim as optim
import torch.nn.functional as F
from torchvision import datasets, transforms
from models import LinearLogisticRegression, RBFLogisticRegression, RBFLogisticRegressionRotated, LinearLogisticRegressionAug, RBFLogisticRegressionAug, LeNet, LeNetAug, combine_transformed_dimension, split_transformed_dimension
from augmentation import copy_with_new_transform, augment_transforms, rotation, resized_crop, blur, rotation_crop_blur, hflip, hflip_vflip, brightness, contrast
from utils import get_train_valid_datasets, train, train_all_epochs, accuracy, all_losses, train_models_compute_agreement, agreement_kl_accuracy, kernel_target_alignment, kernel_target_alignment_augmented, kernel_target_alignment_augmented_no_avg
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
save_dir = 'saved_cifar10'
# save_dir = 'saved_cifar10_rerun_50_epochs'
# save_dir = 'saved_cifar10_rerun_100_epochs'
# save_dir = 'saved_cifar10_basic_models_3_channels'
batch_size = 256
if device.type == 'cuda':
loader_args = {'num_workers': 32, 'pin_memory': True}
# loader_args = {'num_workers': 16, 'pin_memory': True}
else:
loader_args = {'num_workers': 4, 'pin_memory': False}
def loader_from_dataset(dataset):
return torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, **loader_args)
# Construct loader from CIFAR10 dataset, then construct loaders corresponding to
# augmented dataset (wrt to different transformations).
cifar10_normalize = transforms.Compose([
# transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, ))
])
cifar10_train = datasets.CIFAR10(
'../data', train=True, download=True, transform=cifar10_normalize)
cifar10_test = datasets.CIFAR10(
'../data', train=False, download=True, transform=cifar10_normalize)
# For some reason the train labels are lists instead of torch.LongTensor
cifar10_train.train_labels = torch.LongTensor(cifar10_train.train_labels)
cifar10_test.test_labels = torch.LongTensor(cifar10_test.test_labels)
cifar10_train, cifar10_valid = get_train_valid_datasets(cifar10_train)
train_loader = loader_from_dataset(cifar10_train)
valid_loader = loader_from_dataset(cifar10_valid)
test_loader = loader_from_dataset(cifar10_test)
cifar10_normalize_rotate = transforms.Compose([
# transforms.Grayscale(num_output_channels=1),
transforms.RandomRotation((-5, 5)),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, ))
])
cifar10_test_rotated = datasets.CIFAR10(
'../data', train=False, download=True, transform=cifar10_normalize_rotate)
test_loader_rotated = loader_from_dataset(cifar10_test_rotated)
augmentations = [rotation(cifar10_train, cifar10_normalize, angles=range(-5, 6, 1)),
resized_crop(cifar10_train, cifar10_normalize, size=32),
blur(cifar10_train, cifar10_normalize),
rotation_crop_blur(cifar10_train, cifar10_normalize, size=32),
hflip(cifar10_train, cifar10_normalize),
hflip_vflip(cifar10_train, cifar10_normalize),
brightness(cifar10_train, cifar10_normalize),
contrast(cifar10_train, cifar10_normalize)]
n_channels = 3
size = 32
n_features = n_channels * size * size
n_classes = 10
gamma = 0.003 # gamma hyperparam for RBF kernel exp(-gamma ||x - y||^2). Best gamma is around 0.001--0.003
n_components = 10000
sgd_n_epochs = 15
n_trials = 10
model_factories = {'linear': lambda: LinearLogisticRegressionAug(n_features, n_classes),
'kernel': lambda: RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, approx=False),
'lenet': lambda: LeNetAug(n_channels=n_channels, size=size, approx=False)}
def sgd_opt_from_model(model, learning_rate=0.1, momentum=0.9, weight_decay=0.000):
# return optim.SGD((p for p in model.parameters() if p.requires_grad),
# lr=learning_rate, momentum=momentum,
# weight_decay=weight_decay)
return optim.Adam((p for p in model.parameters() if p.requires_grad),
weight_decay=weight_decay)
def train_basic_models(train_loader, augmented_loader):
"""Train a few simple models with data augmentation / approximation, as a
sanity check.
"""
test_acc = []
test_acc_rotated = []
for seed in range(n_trials):
print(f'Seed: {seed}')
torch.manual_seed(seed)
models = [
LinearLogisticRegressionAug(n_features, n_classes), # No augmentation, accuracy around 92.5%
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components), # Accuracy around 97.5%
LeNetAug(n_channels=n_channels, size=size), # Accuracy around 98.7%
LinearLogisticRegressionAug(n_features, n_classes, approx=False), # Augmented data, exact objective
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, approx=False),
LeNetAug(n_channels=n_channels, size=size, approx=False),
LinearLogisticRegressionAug(n_features, n_classes, regularization=False), # Augmented data, 1st order approx
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, regularization=False),
LeNetAug(n_channels=n_channels, size=size),
LinearLogisticRegressionAug(n_features, n_classes, feature_avg=False, regularization=True), # Augmented data, 2nd order no 1st approx
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, feature_avg=False, regularization=True),
LeNetAug(n_channels=n_channels, size=size, feature_avg=False, regularization=True),
LinearLogisticRegressionAug(n_features, n_classes, regularization=True), # Augmented data, 2nd order approx
RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, regularization=True),
LeNetAug(n_channels=n_channels, size=size, regularization=True),
RBFLogisticRegressionRotated(n_features, n_classes, gamma=gamma, n_components=n_components, size=size, n_channels=n_channels)
]
loaders = [train_loader, train_loader, train_loader,
augmented_loader, augmented_loader, augmented_loader,
augmented_loader, augmented_loader, augmented_loader,
augmented_loader, augmented_loader, augmented_loader,
augmented_loader, augmented_loader, augmented_loader,
train_loader]
test_acc_per_seed = []
test_acc_rotated_per_seed = []
import time
for model, loader in zip(models, loaders):
start = time.time()
# model = RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components)
model = RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, approx=False)
# model = RBFLogisticRegressionRotated(n_features, n_classes, gamma=gamma, n_components=n_components, n_channels=n_channels, size=size)
# end = time.time()
# print(end - start)
model.to(device)
optimizer = sgd_opt_from_model(model)
# start = time.time()
train_loss, train_acc, valid_acc = train_all_epochs(loader, valid_loader, model,
optimizer, sgd_n_epochs, verbose=True)
end = time.time()
print(end - start)
correct, total = accuracy(test_loader, model)
print(f'Test set: Accuracy: {correct}/{total} ({correct/total*100:.4f}%)')
correct_rotated, total_rotated = accuracy(test_loader_rotated, model)
print(f'Test set rotated: Accuracy: {correct_rotated}/{total_rotated} ({correct_rotated/total_rotated*100:.4f}%)\n')
test_acc_per_seed.append(correct / total)
test_acc_rotated_per_seed.append(correct_rotated / total_rotated)
np.save(f'{save_dir}/basic_models_cifar10_test_accuracy_last_{seed}.np', np.array(test_acc_per_seed))
np.save(f'{save_dir}/basic_models_cifar10_test_accuracy_rotated_last_{seed}.np', np.array(test_acc_rotated_per_seed))
test_acc.append(np.array(test_acc_per_seed))
test_acc_rotated.append(np.array(test_acc_rotated_per_seed))
test_acc = np.array(test_acc)
test_acc_rotated = np.array(test_acc_rotated)
np.save(f'{save_dir}/basic_models_cifar10_test_accuracy_last', test_acc)
np.save(f'{save_dir}/basic_models_cifar10_test_accuracy_rotated_last', test_acc_rotated)
def objective_difference(augmentations):
"""Measure the difference in approximate and true objectives as we train on
the true objective.
"""
# for model_name in ['kernel', 'lenet']:
for model_name in ['lenet']:
for augmentation in augmentations:
for seed in range(n_trials):
print(f'Seed: {seed}')
torch.manual_seed(seed)
model = model_factories[model_name]().to(device)
optimizer = sgd_opt_from_model(model)
loader = loader_from_dataset(augmentation.dataset)
model.train()
losses = []
losses.append(all_losses(loader, model).mean(axis=0))
train_loss, train_acc, valid_acc = [], [], []
for epoch in range(sgd_n_epochs):
train_loss_epoch, train_acc_epoch = train(loader, model, optimizer)
train_loss += train_loss_epoch
train_acc += train_acc_epoch
print(f'Train Epoch: {epoch}')
correct, total = accuracy(valid_loader, model)
valid_acc.append(correct / total)
print(
f'Validation set: Accuracy: {correct}/{total} ({correct/total*100:.4f}%)'
)
losses.append(np.array(all_losses(loader, model)).mean(axis=0))
train_loss, train_acc, valid_acc = np.array(train_loss), np.array(train_acc), np.array(valid_acc)
np.savez(f'{save_dir}/train_valid_acc_{model_name}_{augmentation.name}_{seed}.npz',
train_loss=train_loss, train_acc=train_acc, valid_acc=valid_acc)
losses = np.array(losses).T
np.save(f'{save_dir}/all_losses_{model_name}_{augmentation.name}_{seed}.npy', losses)
def accuracy_on_true_objective(augmentations):
"""Measure the accuracy when trained on true augmented objective.
"""
for model_name in ['kernel', 'lenet']:
for augmentation in augmentations:
for seed in range(n_trials):
print(f'Seed: {seed}')
torch.manual_seed(seed)
model = model_factories[model_name]().to(device)
optimizer = sgd_opt_from_model(model)
loader = loader_from_dataset(augmentation.dataset)
train_loss, train_acc, valid_acc = train_all_epochs(loader, valid_loader, model, optimizer, sgd_n_epochs)
train_loss, train_acc, valid_acc = np.array(train_loss), np.array(train_acc), np.array(valid_acc)
np.savez(f'{save_dir}/train_valid_acc_{model_name}_{augmentation.name}_{seed}.npz',
train_loss=train_loss, train_acc=train_acc, valid_acc=valid_acc)
def exact_to_og_model(model):
"""Convert model training on exact augmented objective to model training on
original data.
"""
model_og = copy.deepcopy(model)
model_og.approx = True
model_og.feature_avg = False
model_og.regularization = False
return model_og
def exact_to_1st_order_model(model):
"""Convert model training on exact augmented objective to model training on
1st order approximation.
"""
model_1st = copy.deepcopy(model)
model_1st.approx = True
model_1st.feature_avg = True
model_1st.regularization = False
return model_1st
def exact_to_2nd_order_no_1st_model(model):
"""Convert model training on exact augmented objective to model training on
2nd order approximation without feature averaging (1st order approx).
"""
model_2nd_no_1st = copy.deepcopy(model)
model_2nd_no_1st.approx = True
model_2nd_no_1st.feature_avg = False
model_2nd_no_1st.regularization = True
return model_2nd_no_1st
def exact_to_2nd_order_model(model):
"""Convert model training on exact augmented objective to model training on
2nd order approximation.
"""
model_2nd = copy.deepcopy(model)
model_2nd.approx = True
model_2nd.feature_avg = True
model_2nd.regularization = True
return model_2nd
def exact_to_2nd_order_model_layer_avg(model, layer_to_avg=3):
"""Convert LeNet model training on exact augmented objective to model
training on 2nd order approximation, but approximation is done at different
layers.
"""
model_2nd = copy.deepcopy(model)
model_2nd.approx = True
model_2nd.feature_avg = True
model_2nd.regularization = True
model_2nd.layer_to_avg = layer_to_avg
# Can't use the regularization function specialized to linear model unless
# averaging at layer 4.
if layer_to_avg != 4:
model.regularization_2nd_order = model.regularization_2nd_order_general
return model_2nd
def agreement_kl_difference(augmentations):
"""Measure the agreement and KL divergence between the predictions produced
by model trained on exact augmentation objectives vs models trained on
approximate objectives.
"""
model_variants = {'kernel': lambda model: [model, exact_to_og_model(model), exact_to_1st_order_model(model),
exact_to_2nd_order_no_1st_model(model), exact_to_2nd_order_model(model)],
'lenet': lambda model: [model, exact_to_og_model(model), exact_to_1st_order_model(model),
exact_to_2nd_order_no_1st_model(model)] +
[exact_to_2nd_order_model_layer_avg(model, layer_to_avg) for layer_to_avg in [4, 3, 2, 1, 0]]}
for model_name in ['kernel', 'lenet']:
# for model_name in ['lenet']:
for augmentation in augmentations:
for seed in range(5, 5 + n_trials):
print(f'Seed: {seed}')
torch.manual_seed(n_trials + seed)
loader = loader_from_dataset(augmentation.dataset)
model = model_factories[model_name]()
models = model_variants[model_name](model)
for model in models:
model.to(device)
optimizers = [sgd_opt_from_model(model) for model in models]
for model in models:
model.train()
train_agreement, valid_agreement, valid_acc, valid_kl = [], [], [], []
for epoch in range(sgd_n_epochs):
print(f'Train Epoch: {epoch}')
train_agreement_epoch = train_models_compute_agreement(loader, models, optimizers)
train_agreement.append(np.array(train_agreement_epoch).mean(axis=0))
# Agreement and KL on validation set
valid_agreement_epoch, valid_kl_epoch, valid_acc_epoch = agreement_kl_accuracy(valid_loader, models)
valid_agreement.append(np.array(valid_agreement_epoch).mean(axis=0))
valid_acc.append(np.array(valid_acc_epoch).mean(axis=0))
valid_kl.append(np.array(valid_kl_epoch).mean(axis=0))
train_agreement = np.array(train_agreement).T
valid_agreement = np.array(valid_agreement).T
valid_acc = np.array(valid_acc).T
valid_kl = np.array(valid_kl).T
np.savez(f'{save_dir}/train_valid_agreement_kl_{model_name}_{augmentation.name}_{seed}.npz',
train_agreement=train_agreement, valid_agreement=valid_agreement, valid_acc=valid_acc, valid_kl=valid_kl)
def find_gamma_by_alignment(train_loader, gamma_vals=(0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001)):
"""Example use of kernel target alignment: to pick the hyperparameter gamma
of the RBF kernel exp(-gamma ||x-y||^2) by computing the kernel target
alignment of the random features wrt different values of gamma.
The value of gamma giving the highest alignment is likely the best gamma.
"""
for gamma in gamma_vals:
model = RBFLogisticRegressionAug(n_features, n_classes, gamma=gamma, n_components=n_components, approx=False).to(device)
print(kernel_target_alignment(train_loader, model))
# Best gamma is 0.003
def alignment_comparison(augmentations):
"""Compute the kernel target alignment of different augmentations.
"""
alignment = []
model_name = 'kernel'
for augmentation in augmentations[:1]:
print(augmentation.name)
loader = loader_from_dataset(augmentation.dataset)
model = model_factories[model_name]().to(device)
alignment.append(kernel_target_alignment_augmented(loader, model, n_passes_through_data=50))
print(alignment)
alignment = np.array(alignment)
alignment_no_transform = alignment[:, 1].mean()
np.save(f'{save_dir}/kernel_alignment.npy', np.array([alignment_no_transform] + list(alignment[:, 0])))
alignment_no_avg = []
model_name = 'kernel'
for augmentation in augmentations:
print(augmentation.name)
loader = loader_from_dataset(augmentation.dataset)
model = model_factories[model_name]().to(device)
alignment_no_avg.append(kernel_target_alignment_augmented_no_avg(loader, model, n_passes_through_data=10))
alignment_no_avg = np.array(alignment_no_avg)
np.save(f'{save_dir}/kernel_alignment_no_avg.npy', alignment_no_avg)
def measure_computation_fraction_lenet(train_loader):
"""Measure percentage of computation time spent in each layer of LeNet.
"""
model = LeNet(n_channels=n_channels, size=32).to(device)
loader = train_loader
it = iter(loader)
data, target = next(it)
data, target = data.to(device), target.to(device)
# We use iPython's %timeit. Uncomment and copy these to iPython.
# %timeit feat1 = model.layer_1(data)
# feat1 = model.layer_1(data)
# %timeit feat2 = model.layer_2(feat1)
# feat2 = model.layer_2(feat1)
# %timeit feat3 = model.layer_3(feat2)
# feat3 = model.layer_3(feat2)
# %timeit feat4 = model.layer_4(feat3)
# feat4 = model.layer_4(feat3)
# %timeit output = model.output_from_features(feat4)
# %timeit output = model(data)
def main():
pathlib.Path(f'{save_dir}').mkdir(parents=True, exist_ok=True)
# train_basic_models(train_loader, loader_from_dataset(augmentations[0].dataset))
# objective_difference(augmentations[:4])
accuracy_on_true_objective(augmentations[:1])
# agreement_kl_difference(augmentations[:1])
# find_gamma_by_alignment(train_loader)
# alignment_comparison(augmentations)
# alignment_lenet(augmentations)
if __name__ == '__main__':
main()
|
augmentation_code-master
|
cifar10_experiments.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
from distutils.core import setup
from setuptools import find_packages
def find_version() -> str:
with open('bisk/__init__.py', 'r') as f:
version_file = f.read()
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='bipedal-skills',
version=find_version(),
author='Meta AI Research',
author_email='jgehring@meta.com',
url='https://facebookresearch.github.io/hsd3',
license='MIT License',
description='Bipedal Skills RL Benchmark',
python_requires='>=3.7',
install_requires=[
'dm-control>=0.0.32',
'gym>=0.26',
'numpy>=1.9.0',
],
packages=find_packages(),
package_data={'bisk': ['assets/*.xml', 'assets/*.png']},
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
)
|
bipedal-skills-main
|
setup.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskGoalWall-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
@pytest.fixture
def env2d():
env = gym.make('BiskGoalWall-v1', robot='testcube2d')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_eoe_if_touched_wall(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
terminated = False
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
step += 1
assert reward == 0
assert step < 250
def test_reward_goal1(env):
env.p.named.data.qvel['ball'][0:3] = [10, -3, 2.5]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 1
def test_reward_goal2(env):
env.p.named.data.qvel['ball'][0:3] = [10, 3, 4]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 2
def test_reward_nogoal(env):
env.p.named.data.qvel['ball'][0:3] = [10, 0, 2]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
def test_no_reward_if_beyond_line(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Move beyond line without touching the ball
for _ in range(2):
obs, reward, terminated, truncated, info = env.step([0, 1, upf])
assert terminated == False
for _ in range(18):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert terminated == False
env.p.named.data.qvel['ball'][0:3] = [10, 0, 2]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
env.p.named.data.qvel['ball'][0:3] = [10, -3, 2.5]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
def test_reward_goal2d(env2d):
env = env2d
env.p.named.data.qvel['ball-x'] = 10
env.p.named.data.qvel['ball-z'] = 4
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 1
def test_reward_nogoal2d_1(env2d):
env = env2d
env.p.named.data.qvel['ball-x'] = 10
env.p.named.data.qvel['ball-z'] = 0
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
def test_reward_nogoal2d_2(env2d):
env = env2d
env.p.named.data.qvel['ball-x'] = 10
env.p.named.data.qvel['ball-z'] = 10
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
|
bipedal-skills-main
|
tests/test_goalwall.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import numpy as np
import bisk
@pytest.fixture
def env():
env = gym.make('BiskGoToSphere-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_scripted_policy(env):
for i in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
retrn = 0
while not (terminated or truncated):
target = obs['targets'][:2]
dir = target / np.linalg.norm(target)
dx, dy = 0, 0
if np.abs(target[0]) > np.abs(target[1]):
dx = np.sign(target[0])
else:
dy = np.sign(target[1])
obs, reward, terminated, truncated, info = env.step([dx, dy, 0])
retrn += reward
assert terminated
assert not truncated
assert retrn == 1
|
bipedal-skills-main
|
tests/test_gotosphere.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskHurdles-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_clear(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Cross hurdle
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_hurdle'][0] > obs['next_hurdle'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_hurdle'][2] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
# Cross hurdle again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_hurdle'][0] > obs['next_hurdle'][0]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
# Cross next hurdle, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_hurdle'][0] > obs['next_hurdle'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
def test_reward_stuck(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Go forward -- should be stuck at first hurdle
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
|
bipedal-skills-main
|
tests/test_hurdles.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
# Simple test whether we can instantiate the env with a robot and do a single
# step.
def _create_helper(env_name: str, robot: str):
env = gym.make(env_name, robot='Walker')
env.reset(seed=0)
env.step(env.action_space.sample())
env.close()
env = gym.make(env_name.replace('-v', f'{robot}-v'))
env.reset(seed=0)
env.step(env.action_space.sample())
env.close()
def _envs_helper(robot: str):
_create_helper('BiskHurdles-v1', robot)
_create_helper('BiskLimbo-v1', robot)
_create_helper('BiskHurdlesLimbo-v1', robot)
_create_helper('BiskGaps-v1', robot)
_create_helper('BiskStairs-v1', robot)
_create_helper('BiskGoalWall-v1', robot)
_create_helper('BiskGoToTargets-v1', robot)
def test_halfcheetah_create():
_envs_helper('HalfCheetah')
def test_walker_create():
_envs_helper('Walker')
def test_humanoid_create():
_envs_helper('Humanoid')
def test_humanoidpc_create():
_envs_helper('HumanoidPC')
def test_humanoidamasspc_create():
_envs_helper('HumanoidAMASSPC')
|
bipedal-skills-main
|
tests/test_robots.py
|
# Copyright (c) 2021-present, Facebook, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskPoleBalance-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_rewards(env):
terminated = False
while not terminated:
obs, reward, terminated, truncated, info = env.step([1, 0, 0])
if terminated:
assert reward == 0
else:
assert reward == 1
|
bipedal-skills-main
|
tests/test_polebalance.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import numpy as np
import bisk
@pytest.fixture
def env():
env = gym.make('BiskButterflies-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_fixed_policy(env):
# fmt off
policy = []
# Drop to floor
for i in range(5):
policy.append([0, 0, 0])
# Go in a circle
for d in (-1,1):
for i in range(15):
policy.append([d,0,0])
for i in range(15):
policy.append([0,d,0])
# And the other way
for d in (1,-1):
for i in range(15):
policy.append([d,0,0])
for i in range(15):
policy.append([0,d,0])
retrn = 0
for action in policy:
obs, reward, terminated, truncated, info = env.step(action)
assert (not terminated and not truncated)
retrn += reward
assert retrn == 4
|
bipedal-skills-main
|
tests/test_butterflies.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import numpy as np
import bisk
@pytest.fixture
def env():
env = gym.make('BiskGoToTargets-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_scripted_policy(env):
for i in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
retrn = 0
while not (terminated or truncated):
target = obs['targets'][:2]
dir = target / np.linalg.norm(target)
dx, dy = 0, 0
if np.abs(target[0]) > np.abs(target[1]):
dx = np.sign(target[0])
else:
dy = np.sign(target[1])
obs, reward, terminated, truncated, info = env.step([dx, dy, 0])
retrn += reward
assert not terminated
assert truncated
assert retrn == 20
|
bipedal-skills-main
|
tests/test_gototargets.py
|
# Copyright (c) 2021-present, Facebook, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
from bisk.features.joints import JointsFeaturizer
from bisk.single_robot import BiskSingleRobotEnv
def test_names_walker():
env = BiskSingleRobotEnv('walker')
ftzr = JointsFeaturizer(env.p, 'walker', 'robot')
assert ftzr.observation_space.shape == (60,)
assert ftzr().shape == ftzr.observation_space.shape
# fmt off
assert ftzr.feature_names() == [
'rootz:p',
'rootx:p',
'rooty:p',
'right_hip:p',
'right_knee:p',
'right_ankle:p',
'left_hip:p',
'left_knee:p',
'left_ankle:p',
'rootz:v',
'rootx:v',
'rooty:v',
'right_hip:v',
'right_knee:v',
'right_ankle:v',
'left_hip:v',
'left_knee:v',
'left_ankle:v',
'torso:crx',
'torso:cry',
'torso:crz',
'torso:ctx',
'torso:cty',
'torso:ctz',
'right_thigh:crx',
'right_thigh:cry',
'right_thigh:crz',
'right_thigh:ctx',
'right_thigh:cty',
'right_thigh:ctz',
'right_leg:crx',
'right_leg:cry',
'right_leg:crz',
'right_leg:ctx',
'right_leg:cty',
'right_leg:ctz',
'right_foot:crx',
'right_foot:cry',
'right_foot:crz',
'right_foot:ctx',
'right_foot:cty',
'right_foot:ctz',
'left_thigh:crx',
'left_thigh:cry',
'left_thigh:crz',
'left_thigh:ctx',
'left_thigh:cty',
'left_thigh:ctz',
'left_leg:crx',
'left_leg:cry',
'left_leg:crz',
'left_leg:ctx',
'left_leg:cty',
'left_leg:ctz',
'left_foot:crx',
'left_foot:cry',
'left_foot:crz',
'left_foot:ctx',
'left_foot:cty',
'left_foot:ctz',
]
# fmt on
env.close()
def test_exclude_walker():
env = BiskSingleRobotEnv('walker')
ftzr = JointsFeaturizer(
env.p, 'walker', 'robot', exclude='.*/(left|right)_foot'
)
assert ftzr.observation_space.shape == (48,)
assert ftzr().shape == ftzr.observation_space.shape
# fmt off
assert ftzr.feature_names() == [
'rootz:p',
'rootx:p',
'rooty:p',
'right_hip:p',
'right_knee:p',
'right_ankle:p',
'left_hip:p',
'left_knee:p',
'left_ankle:p',
'rootz:v',
'rootx:v',
'rooty:v',
'right_hip:v',
'right_knee:v',
'right_ankle:v',
'left_hip:v',
'left_knee:v',
'left_ankle:v',
'torso:crx',
'torso:cry',
'torso:crz',
'torso:ctx',
'torso:cty',
'torso:ctz',
'right_thigh:crx',
'right_thigh:cry',
'right_thigh:crz',
'right_thigh:ctx',
'right_thigh:cty',
'right_thigh:ctz',
'right_leg:crx',
'right_leg:cry',
'right_leg:crz',
'right_leg:ctx',
'right_leg:cty',
'right_leg:ctz',
'left_thigh:crx',
'left_thigh:cry',
'left_thigh:crz',
'left_thigh:ctx',
'left_thigh:cty',
'left_thigh:ctz',
'left_leg:crx',
'left_leg:cry',
'left_leg:crz',
'left_leg:ctx',
'left_leg:cty',
'left_leg:ctz',
]
# fmt on
env.close()
|
bipedal-skills-main
|
tests/test_features_joints.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskHurdles-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_render(env):
img = env.render(width=480, height=480)
assert img.shape == (480, 480, 3)
|
bipedal-skills-main
|
tests/test_render.py
|
# Copyright (c) 2021-present, Facebook, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskLimbo-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_clear(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, _ = env.reset(seed=0)
# Cross bar
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, 0])
if next_obs['next_bar'][0] > obs['next_bar'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_bar'][2] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
# Cross bar again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_bar'][0] > obs['next_bar'][0]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
# Cross next bar, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_bar'][0] > obs['next_bar'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
def test_reward_stuck(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Go up so that we'll be stuck at the first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Low threshold, remaining momentum will bring us to the right height
if obs['observation'][2] - obs['next_bar'][1] >= -0.6:
break
# Go forward -- should be stuck at first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
|
bipedal-skills-main
|
tests/test_limbo.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskStairs-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_clear(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Cross first step
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_steps'][0] > obs['next_steps'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_steps'][2] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
# Cross step again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, 1])
if next_obs['next_steps'][0] > obs['next_steps'][0]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
# Cross next step, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, 1])
if next_obs['next_steps'][0] > obs['next_steps'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
def test_reward_full(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
terminated, truncated = False, False
ret = 0
while not (terminated or truncated):
obs, reward, terminated, truncated, info = env.step([1, 0, 1])
ret += reward
assert ret == 20
def test_reward_stuck(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Go forward -- should be stuck at first step
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
|
bipedal-skills-main
|
tests/test_stairs.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import numpy as np
import pytest
import bisk
def test_walker_fallover():
env = gym.make('BiskGoalWall-v1', robot='walker')
env.reset(seed=0)
while True:
obs, reward, terminated, truncated, info = env.step(
np.zeros(env.action_space.shape)
)
if terminated or truncated:
break
assert terminated
assert not truncated
env.close()
def test_walker_continuous():
env = gym.make('BiskGoalWallC-v1', robot='walker')
env.reset(seed=0)
while True:
obs, reward, terminated, truncated, info = env.step(
np.zeros(env.action_space.shape)
)
if terminated or truncated:
break
assert truncated
assert not terminated
env.close()
def test_humanoidcmupc_fallover():
env = gym.make('BiskGoalWall-v1', robot='humanoidcmupc')
env.reset(seed=0)
while True:
obs, reward, terminated, truncated, info = env.step(
np.zeros(env.action_space.shape)
)
if terminated or truncated:
break
assert terminated
assert not truncated
env.close()
def test_humanoidcmupc_continuous():
env = gym.make('BiskGoalWallC-v1', robot='humanoidcmupc')
env.reset(seed=0)
while True:
obs, reward, terminated, truncated, info = env.step(
np.zeros(env.action_space.shape)
)
if terminated or truncated:
break
assert truncated
assert not terminated
env.close()
|
bipedal-skills-main
|
tests/test_fallover.py
|
# Copyright (c) 2021-present, Facebook, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskHurdlesLimbo-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_clear(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
assert obs['next_obstacle'][0] == 0
# Cross hurdle
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
assert obs['next_obstacle'][0] == 1
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_obstacle'][3] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
assert obs['next_obstacle'][0] == 0
# Cross hurdle again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
assert obs['next_obstacle'][0] == 1
# Go up so that we'll be stuck at the first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Low threshold, remaining momentum will bring us to the right height
if obs['observation'][2] - obs['next_obstacle'][1] >= -0.6:
break
# Cross next bar, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
assert obs['next_obstacle'][0] == 0
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_obstacle'][3] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
assert obs['next_obstacle'][0] == 1
# Cross bar again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
assert obs['next_obstacle'][0] == 0
def test_reward_stuck1(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Go forward -- should be stuck at first hurdle
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
def test_reward_stuck2(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Cross hurdle
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go up so that we'll be stuck at the first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Low threshold, remaining momentum will bring us to the right height
if obs['observation'][2] - obs['next_obstacle'][2] >= -0.6:
break
# Go forward -- should be stuck at first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
|
bipedal-skills-main
|
tests/test_hurdleslimbo.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskGaps-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_cross(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Go to platform
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][1] > obs['next_gap_platform'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_gap_platform'][2] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
# Go to platform again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][1] > obs['next_gap_platform'][1]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
# Go to next platform, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][1] > obs['next_gap_platform'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
def test_touch_gap(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Go to gap
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][0] > obs['next_gap_platform'][0]:
break
obs = next_obs
# Go down into gap
for _ in range(8):
obs, reward, terminated, truncated, info = env.step([-1, 0, -0.6])
if terminated:
break
assert reward == -1
assert terminated == True
def test_touch_platform(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Go to platform
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][1] > obs['next_gap_platform'][1]:
break
obs = next_obs
# Go down on platform
for _ in range(8):
obs, reward, terminated, truncated, info = env.step([0, 0, -1])
assert reward == 0
assert terminated == False
|
bipedal-skills-main
|
tests/test_gaps.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
__version__ = "2.0"
from gym.envs.registration import register
from bisk.base import BiskEnv
from bisk.single_robot import BiskSingleRobotEnv
def register_all(robot, shaped, fallover):
fallover_text = 'C' if fallover else ''
shaped_text = 'Shaped' if shaped else ''
register(
id=f'BiskGoToTargets{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.gototarget:BiskGoToTargetEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'tolerance': 0.5,
'goal_area': 8.0,
'num_targets': 2,
'goal_switch_steps': 1,
},
max_episode_steps=1000,
)
register(
id=f'BiskGoToSphere{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.gototarget:BiskGoToTargetEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'tolerance': 0.5,
'goal_area': 4.0,
'num_targets': 1,
'single_target': True,
'on_circle': True,
},
max_episode_steps=1000,
)
register(
id=f'BiskHurdles{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.hurdles:BiskHurdlesEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'max_height': 0.3,
'fixed_height': False,
},
max_episode_steps=1000,
)
register(
id=f'BiskLimbo{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.limbo:BiskLimboEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'notouch': False,
'min_height': 'auto',
'fixed_height': False,
},
max_episode_steps=1000,
)
register(
id=f'BiskHurdlesLimbo{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.hurdleslimbo:BiskHurdlesLimboEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'notouch': False,
'min_bar_height': 'auto',
'max_hurdle_height': 0.3,
'fixed_height': False,
},
max_episode_steps=1000,
)
register(
id=f'BiskGaps{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.gaps:BiskGapsEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'max_size': 2.5,
'min_gap': 0.2,
'max_gap': 0.7,
'fixed_size': False,
},
max_episode_steps=1000,
)
register(
id=f'BiskStairs{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.stairs:BiskStairsEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'step_height': 0.2,
'step_length_min': 0.5,
'step_length_max': 1.0,
'num_flights': 2,
},
max_episode_steps=1000,
)
register(
id=f'BiskStairsCont{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.stairs:BiskStairsEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'step_height': 0.2,
'step_length_min': 0.5,
'step_length_max': 1.0,
'num_flights': 10,
},
max_episode_steps=1000,
)
register(
id=f'BiskGoalWall{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.goalwall:BiskGoalWallEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'init_distance': 2.5,
'touch_ball_reward': 0,
},
max_episode_steps=250,
)
register(
id=f'BiskVelocityControl{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.velocitycontrol:BiskVelocityControlEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
},
max_episode_steps=1000,
)
if shaped:
register(
id=f'BiskRunDir{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.rundir:BiskRunDirEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'heading_deg': 0,
},
max_episode_steps=1000,
)
if not shaped:
register(
id=f'BiskPoleBalance{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.polebalance:BiskPoleBalanceEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'pole_mass': 0.5,
'pole_length': 0.5,
'n_poles': 1,
},
max_episode_steps=1000,
)
register(
id=f'BiskButterflies{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.butterflies:BiskButterfliesEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'goal_area': 4,
'n_butterflies': 10,
'zoff': 1.6,
},
max_episode_steps=1000,
)
for robot in (
'',
'HalfCheetah',
'Walker',
'Humanoid',
'HumanoidPC',
'HumanoidCMUPC',
'HumanoidAMASSPC',
):
for shaped in (False, True):
for fallover in (False, True):
register_all(robot, shaped, fallover)
|
bipedal-skills-main
|
bisk/__init__.py
|
# gym.utils.seeding from gym 0.18.3
# Released under an MIT license
# (https://github.com/openai/gym/blob/0.18.3/LICENSE.md)
#
# This is provided for consistency as seeding changed with gym 0.26.
import hashlib
import numpy as np
import os
import random as _random
import struct
import sys
from gym import error
def np_random(seed=None):
if seed is not None and not (isinstance(seed, int) and 0 <= seed):
raise error.Error('Seed must be a non-negative integer or omitted, not {}'.format(seed))
seed = create_seed(seed)
rng = np.random.RandomState()
rng.seed(_int_list_from_bigint(hash_seed(seed)))
return rng, seed
def hash_seed(seed=None, max_bytes=8):
"""Any given evaluation is likely to have many PRNG's active at
once. (Most commonly, because the environment is running in
multiple processes.) There's literature indicating that having
linear correlations between seeds of multiple PRNG's can correlate
the outputs:
http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/
http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be
http://dl.acm.org/citation.cfm?id=1276928
Thus, for sanity we hash the seeds before using them. (This scheme
is likely not crypto-strength, but it should be good enough to get
rid of simple correlations.)
Args:
seed (Optional[int]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the hashed seed.
"""
if seed is None:
seed = create_seed(max_bytes=max_bytes)
hash = hashlib.sha512(str(seed).encode('utf8')).digest()
return _bigint_from_bytes(hash[:max_bytes])
def create_seed(a=None, max_bytes=8):
"""Create a strong random seed. Otherwise, Python 2 would seed using
the system time, which might be non-robust especially in the
presence of concurrency.
Args:
a (Optional[int, str]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the seed.
"""
# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
if a is None:
a = _bigint_from_bytes(os.urandom(max_bytes))
elif isinstance(a, str):
a = a.encode('utf8')
a += hashlib.sha512(a).digest()
a = _bigint_from_bytes(a[:max_bytes])
elif isinstance(a, int):
a = a % 2**(8 * max_bytes)
else:
raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))
return a
# TODO: don't hardcode sizeof_int here
def _bigint_from_bytes(bytes):
sizeof_int = 4
padding = sizeof_int - len(bytes) % sizeof_int
bytes += b'\0' * padding
int_count = int(len(bytes) / sizeof_int)
unpacked = struct.unpack("{}I".format(int_count), bytes)
accum = 0
for i, val in enumerate(unpacked):
accum += 2 ** (sizeof_int * 8 * i) * val
return accum
def _int_list_from_bigint(bigint):
# Special case 0
if bigint < 0:
raise error.Error('Seed must be non-negative, not {}'.format(bigint))
elif bigint == 0:
return [0]
ints = []
while bigint > 0:
bigint, mod = divmod(bigint, 2 ** 32)
ints.append(mod)
return ints
|
bipedal-skills-main
|
bisk/legacy_seeding.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Iterable, List
import gym
import numpy as np
from bisk.base import BiskEnv
from bisk.features import make_featurizer
from bisk.helpers import (add_ball, add_box, add_capsule, add_fwd_corridor,
add_robot, root_with_floor)
log = logging.getLogger(__name__)
class BiskSingleRobotEnv(BiskEnv):
def __init__(
self, robot: str, features: str = 'joints', allow_fallover: bool = False
):
super().__init__()
self.allow_fallover = allow_fallover
self.robot = robot.lower()
self.world_scale = 1.0
if self.robot.startswith('humanoidcmu') or self.robot.startswith(
'humanoidamass'
):
# The CMU/AMASS robots are about 1.3 the size of the default
# Humanoid
self.world_scale = 1.3
root = root_with_floor()
_, robot_pos = add_robot(
root, robot, 'robot', init=getattr(self, 'init_robot', None)
)
frameskip = 5
fs = root.find('numeric', 'robot/frameskip')
if fs is not None:
frameskip = int(fs.data[0])
self.init_sim(root, frameskip)
if self.robot.startswith('halfcheetah'):
# qpos is x_pos, z_pos, y_rot
self.init_qpos[0] = robot_pos[0]
self.init_qpos[1] = robot_pos[2]
elif self.robot.startswith('walker'):
# qpos is z_pos, x_pos, y_rot
self.init_qpos[0] = robot_pos[2]
self.init_qpos[1] = robot_pos[0]
elif self.robot.startswith('humanoidcmu') or self.robot.startswith(
'humanoidamass'
):
# initialize to upright position
self.init_qpos[0:3] = [0.0, 0.0, 1.2]
self.init_qpos[3:7] = [0.859, 1.0, 1.0, 0.859]
else:
# TODO Verify that this actually corresponds to the torso position?
self.init_qpos[0:3] = robot_pos
self.featurizer = self.make_featurizer(features)
self.observation_space = self.featurizer.observation_space
@property
def is_2d(self):
# TODO sth more proper? But it needs to be callable from init_sim, i.e.
# before the simulator instance is constructed.
return (
self.robot.startswith('halfcheetah')
or self.robot.startswith('walker')
or self.robot == 'testcube2d'
)
@property
def robot_pos(self) -> np.ndarray:
if self.robot.startswith('humanoidcmu') or self.robot.startswith(
'humanoidamass'
):
return self.p.named.data.xpos['robot/lowerneck']
else:
return self.p.named.data.xpos['robot/torso']
@property
def robot_speed(self) -> np.ndarray:
return self.p.named.data.subtree_linvel['robot/torso']
def make_featurizer(self, features: str):
return make_featurizer(features, self.p, self.robot, 'robot')
def reset_state(self):
noise = 0.1
qpos = self.init_qpos + self.np_random.uniform(
low=-noise, high=noise, size=self.p.model.nq
)
qvel = self.init_qvel + noise * self.np_random.standard_normal(self.p.model.nv)
self.p.data.qpos[:] = qpos
self.p.data.qvel[:] = qvel
self.featurizer.reset()
def get_observation(self):
return self.featurizer()
def fell_over(self) -> bool:
if self.robot.startswith('humanoid'):
zpos = self.robot_pos[2]
return bool(zpos < 0.9)
elif self.robot.startswith('halfcheetah'):
# Orientation pointing upwards and body almost on the ground
up = self.p.named.data.xmat['robot/torso', 'zz']
zpos = self.p.named.data.qpos['robot/rootz']
if up < -0.8 and zpos < 0.12:
return True
elif self.robot.startswith('walker'):
zpos = self.p.named.data.qpos['robot/rootz']
r = self.p.named.data.qpos['robot/rooty']
if zpos < 0.9 or r < -1.4 or r > 1.4:
return True
return False
def step(self, action):
obs, reward, terminated, truncated, info = super().step(action)
if not self.allow_fallover and self.fell_over():
terminated = True
info['fell_over'] = True
return obs, reward, terminated, truncated, info
def add_box(
self,
root: 'dm_control.mjcf.RootElement',
name: str,
size: Iterable[float],
rgba: Iterable[float] = None,
with_body: bool = False,
**kwargs,
) -> 'dm_control.mjcf.Element':
size = [s * self.world_scale for s in size]
if 'pos' in kwargs:
kwargs['pos'] = [p * self.world_scale for p in kwargs['pos']]
return add_box(root, name, size, rgba, with_body, **kwargs)
def add_capsule(
self,
root: 'dm_control.mjcf.RootElement',
name: str,
rgba: Iterable[float] = None,
with_body: bool = False,
**kwargs,
) -> 'dm_control.mjcf.Element':
if 'fromto' in kwargs:
kwargs['fromto'] = [p * self.world_scale for p in kwargs['fromto']]
if 'size' in kwargs:
kwargs['size'] = [p * self.world_scale for p in kwargs['size']]
if 'pos' in kwargs:
kwargs['pos'] = [p * self.world_scale for p in kwargs['pos']]
return add_capsule(root, name, rgba, with_body, **kwargs)
def add_fwd_corridor(self, root: 'dm_control.mjcf.RootElement', W=4):
return add_fwd_corridor(root, W * self.world_scale)
class BiskSingleRobotWithBallEnv(BiskSingleRobotEnv):
def __init__(
self, robot: str, features: str = 'joints', allow_fallover: bool = False
):
super().__init__(robot, features, allow_fallover)
self.ball_qpos_idx: List[int] = []
self.ball_qvel_idx: List[int] = []
if self.is_2d:
for j in ['ball-x', 'ball-z', 'ball-ry']:
qppos = self.p.named.model.jnt_qposadr[j]
self.ball_qpos_idx.append(qppos)
qvpos = self.p.named.model.jnt_dofadr[j]
self.ball_qvel_idx.append(qvpos)
else:
qppos = self.p.named.model.jnt_qposadr['ball']
for i in range(3):
self.ball_qpos_idx.append(qppos + i)
qvpos = self.p.named.model.jnt_dofadr['ball']
for i in range(6):
self.ball_qvel_idx.append(qvpos + i)
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(len(self.ball_qpos_idx) + len(self.ball_qvel_idx),),
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[('ball', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: 'mjcf.RootElement', frameskip: int = 5):
ball_size = 0.15 * self.world_scale
add_ball(
root,
'ball',
size=ball_size,
mass=0.1 * self.world_scale,
twod=self.is_2d,
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
# Small noise for ball
noise = 0.01
qpos = self.init_qpos + self.np_random.uniform(
low=-noise, high=noise, size=self.p.model.nq
)
qvel = self.init_qvel + noise * self.np_random.standard_normal(self.p.model.nv)
self.p.data.qpos[self.ball_qpos_idx] = qpos[self.ball_qpos_idx]
self.p.data.qvel[self.ball_qvel_idx] = qvel[self.ball_qvel_idx]
def get_observation(self):
ball_qpos = self.p.data.qpos[self.ball_qpos_idx].ravel().copy()
ball_qvel = self.p.data.qvel[self.ball_qvel_idx]
# Ball X/Y position is relative to robot's torso
ball_qpos[0] -= self.robot_pos[0]
if not self.is_2d:
ball_qpos[1] -= self.robot_pos[1]
else:
# Normalize Y rotation to [-pi,pi], as MuJoCo produces large values
# occasionally.
ball_qpos[2] = np.arctan2(
np.sin(ball_qpos[2]), np.cos(ball_qpos[2])
)
return {
'observation': super().get_observation(),
'ball': np.concatenate([ball_qpos, ball_qvel]).astype(np.float32),
}
|
bipedal-skills-main
|
bisk/single_robot.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from typing import Tuple, Iterable
import logging
import numpy as np
from gym.utils import seeding
log = logging.getLogger(__name__)
FANCY_SKYBOX = False
def root_with_floor() -> 'dm_control.mjcf.RootElement':
'''
Constructs a root element with the commonly used checkered floor.
'''
from dm_control import mjcf
root = mjcf.RootElement()
if FANCY_SKYBOX:
root.asset.add(
'texture',
type='skybox',
file=f'{asset_path()}/rainbow.png',
gridsize=[3, 4],
gridlayout='.U..LFRB.D..',
)
else:
root.asset.add(
'texture',
type='skybox',
builtin='gradient',
width=800,
height=800,
rgb1=[0.3, 0.5, 0.7],
rgb2=[0, 0, 0],
)
root.asset.add(
'texture',
name='tex_plane',
builtin='checker',
width=100,
height=100,
rgb1=[0.2, 0.3, 0.4],
rgb2=[0.1, 0.15, 0.2],
type='2d',
)
root.asset.add(
'material',
name='mat_plane',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 1],
texuniform=True,
texture='tex_plane',
)
root.worldbody.add(
'geom',
name='floor',
type='plane',
size=[100, 100, 100],
rgba=[0.8, 0.9, 0.8, 1.0],
conaffinity=1,
condim=3,
material='mat_plane',
pos=[0, 0, 0],
)
root.worldbody.add(
'light',
cutoff=100,
diffuse=[1, 1, 1],
dir=[0, 0, -1.3],
directional=True,
exponent=1,
pos=[0, 0, 1.3],
specular=[0.1, 0.1, 0.1],
)
return root
def asset_path() -> str:
return os.path.join(os.path.dirname(__file__), 'assets')
def add_robot(
root: 'dm_control.mjcf.RootElement', kind: str, name: str, xyoff=None,
init=None,
) -> Tuple['dm_control.mjcf.Element', np.ndarray]:
'''
Add a robot to the root element.
Returns the attachement frame the original position of the robot's torso.
If the robot requires a fresh freejoint, it returns its original position
(so that qpos can be initialized accordingly); otherwise, (0, 0, 0) is
returned.
'''
from dm_control import mjcf
rm = mjcf.from_path(f'{asset_path()}/{kind.lower()}.xml')
if init is not None:
init(rm, name)
rm.model = name
torso = rm.find('body', 'torso')
if torso is None:
torso = rm.find('body', 'root')
pos = torso.pos
# Use a [0,0,0] torso pos when attaching the frame and rather set
# the default qpos manually later. dm_control's attachment frame
# logic (apparently?) resets the frame of reference of the freejoint.
torso.pos = [0, 0, 0]
if pos is None:
pos = torso.pos
if xyoff is not None:
pos[0] += xyoff[0]
pos[1] += xyoff[1]
root_joint = torso.find('joint', 'root')
if root_joint and (
root_joint.tag == 'freejoint' or root_joint.type == 'free'
):
root_joint.remove()
needs_freejoint = True
else:
needs_freejoint = False
af = root.attach(rm)
if needs_freejoint:
af.add('freejoint')
return af, pos
def add_box(
root: 'dm_control.mjcf.RootElement',
name: str,
size: Iterable[float],
rgba: Iterable[float] = None,
with_body: bool = False,
**kwargs,
) -> 'dm_control.mjcf.Element':
if rgba is None:
rgba = np.array([0.8, 0.9, 0.8, 1])
body = root.worldbody
if with_body:
body = root.worldbody.add('body', name=name)
box = body.add(
'geom',
type='box',
name=name,
condim=3,
size=size,
rgba=rgba,
**kwargs,
)
return body if with_body else box
def add_capsule(
root: 'dm_control.mjcf.RootElement',
name: str,
rgba: Iterable[float] = None,
with_body: bool = False,
**kwargs,
) -> 'dm_control.mjcf.Element':
if rgba is None:
rgba = np.array([0.8, 0.9, 0.8, 1])
body = root.worldbody
if with_body:
body = root.worldbody.add('body', name=name)
box = body.add(
'geom',
type='capsule',
name=name,
condim=3,
rgba=rgba,
**kwargs,
)
return body if with_body else box
def add_fwd_corridor(root: 'dm_control.mjcf.RootElement', W=4):
WH = 2
wall_alpha = 0.0 # for debugging
# Change rendering of floor to fit the intended path
floor = root.find('geom', 'floor')
floor.size = [100, W, 1]
floor.pos = [100 - W, 0, 0]
# Add border walls
root.worldbody.add(
'geom',
type='plane',
name='wall_left',
xyaxes=[1, 0, 0, 0, 0, 1],
size=[100, WH, 1],
pos=[100 - W, W, WH],
rgba=[0, 0, 0, wall_alpha],
)
root.worldbody.add(
'geom',
type='plane',
name='wall_right',
xyaxes=[-1, 0, 0, 0, 0, 1],
size=[100, WH, 1],
pos=[100 - W, -W, WH],
rgba=[0, 0, 0, wall_alpha],
)
root.worldbody.add(
'geom',
type='plane',
name='wall_back',
xyaxes=[0, 1, 0, 0, 0, 1],
size=[W, WH, 1],
pos=[-4, 0, WH],
rgba=[0, 0, 0, wall_alpha],
)
# The ball element follows the element definitions in quadruped.xml from
# dm_control:
# https://github.com/deepmind/dm_control/blob/33cea51/dm_control/suite/quadruped.xml
def add_ball(root: 'dm_control.mjcf.RootElement',
name: str,
size: float,
mass: float,
twod: bool = False,
**kwargs) -> 'dm_control.mjcf.Element':
root.asset.add('texture',
name='ball',
builtin='checker',
mark='cross',
width=151,
height=151,
rgb1=[0.1, 0.1, 0.1],
rgb2=[0.9, 0.9, 0.9],
markrgb=[1, 1, 1])
root.asset.add('material', name='ball', texture='ball')
ball = root.worldbody.add('body', name=name, pos=[0, 0, 0])
if twod:
ball.add('joint',
name='ball-x',
type='slide',
damping=0,
axis=[1, 0, 0],
pos=[0, 0, 0],
range=[-1000, 1000])
ball.add('joint',
name='ball-z',
type='slide',
damping=0,
axis=[0, 0, 1],
pos=[0, 0, 0],
range=[-1000, 1000])
ball.add('joint',
name='ball-ry',
type='hinge',
damping=0,
axis=[0, 1, 0],
pos=[0, 0, 0],
range=[-np.pi, np.pi])
else:
ball.add('freejoint', name=name)
ball.add('geom',
type='sphere',
name=name,
size=[size],
mass=mass,
condim=6,
friction=[0.7, 0.005, 0.005],
solref=[-10000, -30],
material='ball',
priority=1)
return ball
|
bipedal-skills-main
|
bisk/helpers.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Optional
import gym
import numpy as np
from bisk import legacy_seeding as seeding
log = logging.getLogger(__name__)
class BiskEnv(gym.Env):
metadata = {'render_modes': ['rgb_array']}
def __init__(self):
# This is a stub; run init_sim() with a root element to set up the
# environment.
self.metadata = dict(**BiskEnv.metadata)
self.p = None
self.np_random = seeding.np_random(None)
def init_sim(self, root: 'mjcf.RootElement', frameskip: int = 5):
from dm_control import mjcf
if self.p is not None:
raise RuntimeError('Simulation already initialized')
self.p = mjcf.Physics.from_mjcf_model(root)
self.model = root
self.frameskip = frameskip
self.post_init()
def post_init(self):
self.init_qpos = self.p.data.qpos.ravel().copy()
self.init_qvel = self.p.data.qvel.ravel().copy()
# Expose all actuators
self.action_space = gym.spaces.Box(
self.p.model.actuator_ctrlrange[:, 0].astype(np.float32),
self.p.model.actuator_ctrlrange[:, 1].astype(np.float32),
dtype=np.float32,
)
# Leave observation space undefined in the base environment
self.metadata['render_fps'] = 1 / (self.p.model.opt.timestep * self.frameskip)
@property
def dt(self):
return self.p.model.opt.timestep * self.frameskip
def reset_state(self):
pass
def get_observation(self):
raise NotImplementedError()
def reset(self, seed: Optional[int] = None, options: Optional[dict] = None):
if seed is not None:
self.np_random, seed = seeding.np_random(seed)
if self.action_space is not None:
self.action_space.seed(seed)
if self.observation_space is not None:
self.observation_space.seed(seed)
# Disable contacts during reset to prevent potentially large contact
# forces that can be applied during initial positioning of bodies in
# reset_state().
with self.p.model.disable('contact'):
self.p.reset()
self.reset_state()
self.step_simulation()
return self.get_observation(), {}
def render(self, mode='rgb_array', **kwargs):
width = kwargs.get('width', 480)
height = kwargs.get('height', 480)
camera = kwargs.get('camera', 0)
flags = kwargs.get('flags', {})
return self.p.render(
width=width,
height=height,
camera_id=camera,
render_flag_overrides=flags,
)
def apply_action(self, action):
self.p.set_control(action)
def on_step_single_frame(self):
pass
def step_simulation(self):
from dm_control.mujoco.wrapper.mjbindings import mjlib
for _ in range(self.frameskip):
self.p.step()
self.on_step_single_frame()
# Call mj_rnePostConstraint to populate cfrc_ext (not done automatically
# in MuJoCo 2.0 unless the model defines the proper sensors)
mjlib.mj_rnePostConstraint(self.p.model.ptr, self.p.data.ptr)
# Same for subtree_linvel
mjlib.mj_subtreeVel(self.p.model.ptr, self.p.data.ptr)
def step(self, action):
from dm_control.rl.control import PhysicsError
self.apply_action(action)
try:
self.step_simulation()
except PhysicsError as e:
log.exception(e)
return self.get_observation(), -1, True, {'physics_error': True}
return self.get_observation(), 0, False, False, {}
|
bipedal-skills-main
|
bisk/base.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import gym
import numpy as np
from dm_control.utils import rewards
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskVelocityControlEnv(BiskSingleRobotEnv):
'''
Track a randomly changing velocity. From MoCapAct:
https://github.com/microsoft/MoCapAct/blob/e11713c/mocapact/tasks/velocity_control.py
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
max_speed: float = 4.5,
reward_margin: float = 0.75,
direction_exponent: float = 1.0,
steps_before_changing_velocity: int = 166,
**kwargs
):
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.max_speed = max_speed
self.reward_margin = reward_margin
self.direction_exponent = direction_exponent
self.steps_before_changing_velocity = steps_before_changing_velocity
obs_base = self.featurizer.observation_space
obs_task = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('target', obs_task), ('observation', obs_base)]
)
def sample_move_speed(self):
self.move_speed = self.np_random.uniform(high=self.max_speed)
if self.is_2d:
# Go forward or backward
self.move_angle = self.np_random.choice([0, np.pi])
else:
self.move_angle = self.np_random.uniform(high=2 * np.pi)
self.move_speed_counter = 0
def reset_state(self):
super().reset_state()
self.sample_move_speed()
def get_observation(self):
sin, cos = np.sin(self.move_angle), np.cos(self.move_angle)
phase = self.move_speed_counter / self.steps_before_changing_velocity
return {
'observation': super().get_observation(),
'target': np.array([self.move_speed, sin, cos, phase]),
}
def step(self, action):
obs, reward, terminated, truncated, info = super().step(action)
xvel, yvel = self.robot_speed[:2]
speed = np.linalg.norm([xvel, yvel])
speed_error = self.move_speed - speed
speed_reward = np.exp(-((speed_error / self.reward_margin) ** 2))
if np.isclose(xvel, 0.0) and np.isclose(yvel, 0.0):
dot = 0.0
angle_reward = 1.0
else:
direction = np.array([xvel, yvel])
direction /= np.linalg.norm(direction)
direction_tgt = np.array(
[np.cos(self.move_angle), np.sin(self.move_angle)]
)
dot = direction_tgt.dot(direction)
angle_reward = ((dot + 1) / 2) ** self.direction_exponent
speed_match = np.abs(speed_error) < 0.1
angle_match = dot > np.cos(np.deg2rad(15))
score = 1.0 if speed_match and angle_match else 0.0
if self.shaped:
reward = speed_reward * angle_reward
else:
reward = score
info['score'] = score
self.move_speed_counter += 1
if self.move_speed_counter >= self.steps_before_changing_velocity:
self.sample_move_speed()
obs = self.get_observation()
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/velocitycontrol.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
from typing import Dict, List, Union
import gym
import numpy as np
from dm_control import mjcf
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskHurdlesLimboEnv(BiskSingleRobotEnv):
'''
Alternating hurdles and limbo bars.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
notouch: bool,
min_bar_height: Union[float, str],
max_hurdle_height: float,
fixed_height: bool,
**kwargs,
):
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.notouch = notouch
self.fixed_height = fixed_height
self.max_obstacles_cleared = 0
if min_bar_height == 'auto':
if self.robot.startswith('humanoid'):
self.min_bar_height = 1.0
elif self.robot.startswith('walker'):
self.min_bar_height = 1.2
else:
self.min_bar_height = 1.0
else:
self.min_bar_height = float(min_bar_height)
self.max_hurdle_height = max_hurdle_height
self.robot_geoms: List[int] = []
for g in self.p.named.model.body_geomadr.axes.row.names:
if g.startswith('robot/'):
self.robot_geoms.append(self.p.named.model.body_geomadr[g])
self.bar_geoms: Dict[int, int] = {}
for i, g in enumerate(self.p.named.model.geom_bodyid.axes.row.names):
if g.startswith('bar-'):
self.bar_geoms[i] = int(g.split('-')[1])
self.bar_geom_ids = set(self.bar_geoms.keys())
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('next_obstacle', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
# 200 obstacles should be enough for everybody
self.n_obstacles = 200
for i in range(self.n_obstacles):
if i % 2 == 0:
b = self.add_box(
root, f'hurdle-{i}', size=[0.05, W, 0.1], pos=[2, 0, 0.2]
)
else:
b = self.add_capsule(
root,
f'bar-{i}',
fromto=[2.025, -W, 0.1, 2.025, W, 0.1],
size=[0.1],
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_obstacles_cleared = 0
xpos = 1
intervals = self.np_random.uniform(3, 6, size=(self.n_obstacles,)) * self.world_scale
assert self.n_obstacles % 2 == 0
if self.fixed_height:
bar_heights = (
np.zeros(self.n_obstacles // 2)
+ self.min_bar_height * self.world_scale
)
hurdle_heights = (
np.zeros(self.n_obstacles // 2)
+ self.max_hurdle_height * self.world_scale
)
else:
bar_heights = (
self.np_random.uniform(
self.min_bar_height,
self.min_bar_height + 0.3,
size=(self.n_obstacles // 2,),
)
* self.world_scale
)
hurdle_heights = (
self.np_random.uniform(
0.1, self.max_hurdle_height, size=(self.n_obstacles // 2,)
)
* self.world_scale
)
self.obstacle_pos = []
self.obstacle_type = []
nm = self.p.named.model
for i in range(self.n_obstacles):
xpos += intervals[i]
self.obstacle_pos.append(xpos)
self.obstacle_type.append(i % 2)
if i % 2 == 0:
nm.geom_size[f'hurdle-{i}'][2] = hurdle_heights[i // 2]
nm.geom_pos[f'hurdle-{i}'][0] = xpos
nm.geom_pos[f'hurdle-{i}'][2] = (
hurdle_heights[i // 2] / 2 + 0.01
)
else:
nm.geom_pos[f'bar-{i}'][0] = xpos
nm.geom_pos[f'bar-{i}'][2] = (
bar_heights[i // 2] + nm.geom_size[f'bar-{i}'][0]
)
nm.geom_rgba[f'bar-{i}'] = [0.8, 0.9, 0.8, 1]
self.bar_hit = [False] * self.n_obstacles
self.new_bars_hit = set()
def get_observation(self):
no = self.next_obstacle_index()
if no < len(self.obstacle_pos):
next_obstacle_type = self.obstacle_type[no]
xpos = self.robot_pos[0]
nm = self.p.named.model
if next_obstacle_type == 0:
next_obstacle_d = nm.geom_pos[f'hurdle-{no}'][0] - xpos
next_obstacle_h = nm.geom_pos[f'hurdle-{no}'][2] * 2
else:
next_obstacle_d = nm.geom_pos[f'bar-{no}'][0] - xpos
next_obstacle_h = (
nm.geom_pos[f'bar-{no}'][2] + nm.geom_size[f'bar-{no}'][0]
)
else:
next_obstacle_d = 10.0
next_obstacle_h = 0.1
next_obstacle_cleared = no < self.max_obstacles_cleared
return {
'observation': super().get_observation(),
'next_obstacle': np.array(
[
next_obstacle_type,
next_obstacle_d,
next_obstacle_h,
not next_obstacle_cleared,
],
dtype=np.float32
),
}
def next_obstacle_index(self):
xpos = self.robot_pos[0]
return bisect_left(self.obstacle_pos, xpos)
def on_step_single_frame(self):
contact = self.p.data.contact
for i, c in enumerate(contact.geom1):
if contact.dist[i] > 0:
continue
if c not in self.bar_geom_ids:
continue
bar = self.bar_geoms[c]
self.new_bars_hit.add(bar)
for i, c in enumerate(contact.geom2):
if contact.dist[i] > 0:
continue
if c not in self.bar_geom_ids:
continue
bar = self.bar_geoms[c]
self.new_bars_hit.add(bar)
def step_simulation(self):
super().step_simulation()
self.max_obstacles_cleared = max(
self.max_obstacles_cleared, self.next_obstacle_index()
)
def step(self, action):
self.new_bars_hit = set()
mobefore = self.max_obstacles_cleared
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_obstacles_cleared > mobefore else 0
touched = False
for hit in self.new_bars_hit:
if not self.bar_hit[hit] and self.notouch:
touched = True
if self.notouch:
marked = [0.8, 0.0, 0.0, 1.0]
self.p.named.model.geom_rgba[f'bar-{hit}'] = marked
score -= 1
self.bar_hit[hit] = True
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
reward = -1
terminated = True
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/hurdleslimbo.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
from typing import List
import gym
import numpy as np
from dm_control import mjcf
from dm_control.mujoco.wrapper.mjbindings import mjlib
from bisk.features import make_featurizer
from bisk.helpers import add_capsule, asset_path
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskButterfliesEnv(BiskSingleRobotEnv):
'''
Chasing butterflies. This is similar to more classic food gathering tasks,
but in three dimensions. The (humanoid) robot is equipped with a dip net and
has to collect as many "butterflies" (spheres floating in the air) as
possible within an episode. The butterflies are projected to a sphere around
the robot's head, and a fixed-size long/lat grid contains distances to the
closest butterflies in that direction.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
goal_area: int,
n_butterflies: int,
zoff: int,
shaped: bool = False,
**kwargs,
):
self.n_butterflies = n_butterflies
super().__init__(robot, features, allow_fallover, **kwargs)
self.goal_area = goal_area
self.shaped = shaped
self.zoff = zoff
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(10 * 10,), # 10x10 long/lat grid
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[('butterflies', obs_env), ('observation', obs_base)]
)
def make_featurizer(self, features: str):
return make_featurizer(
features, self.p, self.robot, 'robot', exclude=r'robot/net'
)
def init_robot(self, robot: 'mjcf.RootElement', name: str):
size = 0.01 * self.world_scale
if self.robot in {'humanoid', 'humanoidpc'}:
raise NotImplementedError()
elif self.robot in {'humanoidcmupc', 'humanoidamasspc'}:
hand = robot.find('body', 'lhand')
handg = hand.find('geom', 'lhand')
zpos = handg.size[0]
net = hand.add(
'body',
name='net',
pos=handg.pos + [0, 0, -zpos / 2],
xyaxes=[0, -1, 0, 0, 0, 1],
)
dclass = handg.dclass.dclass
elif self.robot == 'testcube':
torso = robot.find('body', 'torso')
torsog = torso.find('geom', 'torso')
zpos = torsog.size[2]
net = torso.add('body', name='net', pos=[0, 0, zpos])
if torsog.dclass:
dclass = torsog.dclass.dclass
else:
dclass = None
else:
raise NotImplementedError(
'Humanoid robot required for BiskButterfliesEnv'
)
robot.asset.add(
'texture',
name='tex_net',
type='2d',
file=f'{asset_path()}/net.png',
)
robot.asset.add(
'material',
name='mat_net',
reflectance=0.5,
shininess=0.2,
specular=1,
texrepeat=[10, 10],
texuniform=False,
texture='tex_net',
)
net_length = 0.5 * self.world_scale
net_mass = 0.01
if self.robot == 'testcube':
net_radius = 0.5 * self.world_scale
else:
net_radius = 0.15 * self.world_scale
net.add(
'geom',
name='net_handle_geom',
type='capsule',
fromto=[0, 0, 0, 0, 0, net_length],
size=[size],
mass=net_mass,
dclass=dclass,
)
net.add(
'geom',
name='net_geom',
type='ellipsoid',
pos=[0, 0, net_length + net_radius],
size=(net_radius, net_radius, 1e-3),
xyaxes=[1, 0, 0, 0, 0, 1],
mass=net_mass,
dclass=dclass,
contype=3, # collide with body and butterflies
material='mat_net',
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
try:
from matplotlib import pyplot as plt
cmap = plt.get_cmap('rainbow')
except:
cmap = lambda x: [1, 0, 0, 1]
root.asset.add(
'material',
name='butterfly',
reflectance=0,
shininess=0,
specular=0,
emission=0.5,
)
for i in range(self.n_butterflies):
root.worldbody.add(
'geom',
name=f'butterfly_{i}',
type='sphere',
pos=(i, 0.0, 1.0),
size=(0.1,),
rgba=cmap(i / self.n_butterflies),
material='butterfly',
gap=1.0, # high gap so that there won't be actual contact forces
conaffinity=2, # only collide with the net
)
super().init_sim(root, frameskip)
def get_observation(self):
nd = self.p.named.data
bf_geoms = [
idx
for idx, name in enumerate(nd.geom_xpos.axes.row.names)
if name.startswith('butterfly')
and not self.butterflies_caught[int(name.split('_')[1])]
]
grid = np.zeros((10, 10), dtype=np.float32)
if len(bf_geoms) == 0:
return {
'observation': super().get_observation(),
'butterflies': grid.flatten(),
}
try:
bf_rpos = np.dot(
nd.geom_xpos[bf_geoms] - nd.xpos['robot/head'],
nd.xmat['robot/head'].reshape(3, 3),
)
except KeyError:
bf_rpos = np.dot(
nd.geom_xpos[bf_geoms] - nd.xpos['robot/torso'],
nd.xmat['robot/torso'].reshape(3, 3),
)
bf_dist = np.linalg.norm(bf_rpos, axis=1)
bf_npos = np.divide(bf_rpos, bf_dist.reshape(bf_rpos.shape[0], 1)).T
lat = np.rad2deg(np.arccos(bf_npos[1]))
lon = np.rad2deg(np.arctan2(bf_npos[0], bf_npos[2]))
lat10 = np.floor(lat / 18).astype(np.int32)
lon10 = np.floor((lon + 180) / 36).astype(np.int32)
expdist = np.exp(-bf_dist)
for i, (x, y) in enumerate(zip(lat10, lon10)):
grid[x][y] = max(grid[x][y], expdist[i])
return {
'observation': super().get_observation(),
'butterflies': grid.flatten(),
}
def reset_state(self):
super().reset_state()
poss = self.np_random.uniform(-1.0, 1.0, size=(self.n_butterflies, 3))
scale = (
np.asarray([self.goal_area, self.goal_area, 0.5]) * self.world_scale
)
off = np.asarray([0, 0, self.zoff * self.world_scale])
if self.robot == 'testcube':
off[2] = 1
for i in range(self.n_butterflies):
self.p.named.model.geom_pos[f'butterfly_{i}'] = (
poss[i] * scale + off
)
self.p.named.model.geom_rgba[f'butterfly_{i}'][3] = 1
self.butterflies_caught = np.zeros(self.n_butterflies, dtype=np.int32)
def on_step_single_frame(self):
contact = self.p.data.contact
gnames = self.p.named.model.geom_type.axes.row.names
for c1, c2 in zip(contact.geom1, contact.geom2):
if not (
gnames[c1].startswith('butterfly_')
and gnames[c2] == 'robot/net_geom'
):
continue
id = int(gnames[c1].split('_')[1])
if self.butterflies_caught[id] == 0:
log.debug(f'contact: {gnames[c1]} - {gnames[c2]}')
self.butterflies_caught[id] = 1
self.p.named.model.geom_rgba[gnames[c1]][3] = 0.1
def step(self, action):
bfs_caught_before = self.butterflies_caught.sum()
obs, reward, terminated, truncated, info = super().step(action)
bfs_caught_after = self.butterflies_caught.sum()
score = bfs_caught_after - bfs_caught_before
info['score'] = score
# TODO: what's a good shaped reward here?
# Based on distance to the closest butterfly?
info['shaped_reward'] = score
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, False, info
|
bipedal-skills-main
|
bisk/tasks/butterflies.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
from typing import List
import gym
import numpy as np
from dm_control import mjcf
from dm_control.mujoco.wrapper.mjbindings import mjlib
from bisk.features import make_featurizer
from bisk.helpers import add_capsule
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskPoleBalanceEnv(BiskSingleRobotEnv):
'''
Classic pole balancing, but with robots. The pole is attached to a suitable
point (top of the robot's torso or head) with 3 degrees of freedom for
rotation. If its angle surpasses a threshold, the episode ends.
If n_poles > 1, multiple poles will be stacked on top of each other, and
each connection point will be again have 3 degrees of freedom.
For 2D robots (HalfCheetah, Walker), the pole has just one degree of
freedom (rotation around Y).
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
pole_mass: float,
pole_length: float,
n_poles: int,
**kwargs,
):
self.pole_mass = pole_mass
self.pole_length = pole_length
self.n_poles = n_poles
super().__init__(robot, features, allow_fallover, **kwargs)
self.pole_qpos_idx: List[int] = []
self.pole_qvel_idx: List[int] = []
if self.robot in {'halfcheetah', 'walker'}:
for i in range(self.n_poles):
qppos = self.p.named.model.jnt_qposadr[f'robot/pole-{i}_rot']
self.pole_qpos_idx.append(qppos)
qvpos = self.p.named.model.jnt_dofadr[f'robot/pole-{i}_rot']
self.pole_qvel_idx.append(qvpos)
else:
for i in range(self.n_poles):
for j in range(4):
qppos = (
j
+ self.p.named.model.jnt_qposadr[f'robot/pole-{i}_rot']
)
self.pole_qpos_idx.append(qppos)
for j in range(3):
qvpos = (
j + self.p.named.model.jnt_dofadr[f'robot/pole-{i}_rot']
)
self.pole_qvel_idx.append(qvpos)
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(len(self.pole_qpos_idx) + len(self.pole_qvel_idx),),
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[
('pole', obs_env),
('observation', obs_base),
]
)
def make_featurizer(self, features: str):
return make_featurizer(
features, self.p, self.robot, 'robot', exclude=r'robot/pole'
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
try:
from matplotlib import pyplot as plt
cmap = plt.get_cmap('rainbow')
except:
cmap = lambda x: [1, 0, 0, 1]
size = 0.05 * self.world_scale
if self.robot in {'humanoid', 'humanoidpc'}:
size = 0.02 * self.world_scale
head = root.find('body', 'robot/head')
headg = head.find('geom', 'head')
zpos = headg.size[0]
pole = head.add('body', name='pole-0', pos=[0, 0, zpos])
elif self.robot in {'humanoidcmupc', 'humanoidamasspc'}:
size = 0.02 * self.world_scale
head = root.find('body', 'robot/head')
headg = head.find('geom', 'head')
zpos = headg.size[0]
pole = head.add(
'body',
name='pole-0',
pos=headg.pos + [0, zpos, 0],
xyaxes=[1, 0, 0, 0, 0, -1],
)
elif self.robot in {'halfcheetah'}:
torso = root.find('body', 'robot/torso')
pole = torso.add('body', name='pole-0', pos=[0, 0, 0])
elif self.robot in {'walker'}:
torso = root.find('body', 'robot/torso')
torsog = torso.find('geom', 'torso')
pole = torso.add('body', name='pole-0', pos=[0, 0, torsog.size[1]])
else:
try:
torso = root.find('body', 'robot/torso')
zpos = torso.find('geom', 'torso').size[2]
pole = torso.add('body', name='pole-0', pos=[0, 0, zpos])
except:
raise NotImplementedError(
f'Don\'t know how to place poles on a {self.robot} robot'
)
if self.robot in {'halfcheetah', 'walker'}:
# HalfCheetah model is defined in radians
limit = np.pi if self.robot == 'halfcheetah' else 180
pole.add(
'joint',
name='pole-0_rot',
type='hinge',
damping=0.1,
stiffness=0,
axis='0 1 0',
pos=[0, 0, 0],
range=[-limit, limit],
)
else:
pole.add(
'joint',
name='pole-0_rot',
damping=0.1,
type='ball',
pos=[0, 0, 0],
range=[0, 90],
)
pole.add(
'geom',
name='pole-0_geom',
type='capsule',
fromto=[0, 0, 0, 0, 0, self.pole_length * self.world_scale],
size=[size],
mass=self.pole_mass * self.world_scale,
rgba=cmap(0),
)
for i in range(1, self.n_poles):
pole = pole.add(
'body', name=f'pole-{i}', pos=[0, 0, self.pole_length * self.world_scale]
)
if self.robot in {'halfcheetah', 'walker'}:
limit = np.pi if self.robot == 'halfcheetah' else 180
pole.add(
'joint',
name=f'pole-{i}_rot',
type='hinge',
damping=0.1,
stiffness=0,
axis='0 1 0',
pos=[0, 0, 0],
range=[-limit, limit],
)
else:
pole.add(
'joint',
name=f'pole-{i}_rot',
type='ball',
damping=0.1,
pos=[0, 0, 0],
range=[0, 90],
)
pole.add(
'geom',
name=f'pole-{i}_geom',
type='capsule',
fromto=[0, 0, 0, 0, 0, self.pole_length * self.world_scale],
size=[size],
mass=self.pole_mass * self.world_scale,
rgba=cmap((i + 1) / self.n_poles),
)
super().init_sim(root, frameskip)
def get_observation(self):
pole_qpos = self.p.data.qpos[self.pole_qpos_idx]
pole_qvel = self.p.data.qpos[self.pole_qvel_idx]
return {
'observation': super().get_observation(),
'pole': np.concatenate([pole_qpos, pole_qvel]).astype(np.float32),
}
def reset_state(self):
super().reset_state()
# Small noise for pole
noise = 0.01
qpos = self.init_qpos + self.np_random.uniform(
low=-noise, high=noise, size=self.p.model.nq
)
qvel = self.init_qvel + noise * self.np_random.standard_normal(self.p.model.nv)
self.p.data.qpos[self.pole_qpos_idx] = qpos[self.pole_qpos_idx]
self.p.data.qvel[self.pole_qvel_idx] = qvel[self.pole_qvel_idx]
def step(self, action):
obs, reward, terminated, truncated, info = super().step(action)
reward = 1.0
# Failure is defined as the z range of bottom and top of pole tower
# falls below 20% of total length.
xpos = self.p.named.data.xpos
xquat = self.p.named.data.xquat
t = np.zeros(3)
mjlib.mju_rotVecQuat(
t,
np.array([0.0, 0.0, -self.pole_length / 2]) * self.world_scale,
xquat['robot/pole-0'],
)
bottom_z = xpos['robot/pole-0'][2] + t[2]
mjlib.mju_rotVecQuat(
t,
np.array([0.0, 0.0, self.pole_length / 2]) * self.world_scale,
xquat[f'robot/pole-{self.n_poles-1}'],
)
top_z = xpos[f'robot/pole-{self.n_poles-1}'][2] + t[2]
zthresh = 0.8 * self.n_poles * self.pole_length * self.world_scale
if top_z - bottom_z < zthresh:
terminated = True
score = 1 if not terminated else 0
info['score'] = score
reward = score
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/polebalance.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
from typing import Dict, List, Union
import gym
import numpy as np
from dm_control import mjcf
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskLimboEnv(BiskSingleRobotEnv):
'''
A limbo "dance" environment. There are bunch of geoms along the way which
the robot has to crouch under. Proper limbo posture is not enforced.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
notouch: bool,
min_height: Union[float, str],
fixed_height: bool,
**kwargs,
):
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.notouch = notouch
self.fixed_height = fixed_height
self.max_bars_cleared = 0
if min_height == 'auto':
if self.robot.startswith('humanoid'):
self.min_height = 1.0
elif self.robot.startswith('walker'):
self.min_height = 1.2
else:
self.min_height = 1.0
else:
self.min_height = float(min_height)
self.robot_geoms: List[int] = []
for g in self.p.named.model.body_geomadr.axes.row.names:
if g.startswith('robot/'):
self.robot_geoms.append(self.p.named.model.body_geomadr[g])
self.bar_geoms: Dict[int, int] = {}
for i, g in enumerate(self.p.named.model.geom_bodyid.axes.row.names):
if g.startswith('bar-'):
self.bar_geoms[i] = int(g.split('-')[1])
self.bar_geom_ids = set(self.bar_geoms.keys())
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('next_bar', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
# 200 bars should be enough for everybody
self.n_bars = 200
for i in range(self.n_bars):
b = self.add_capsule(
root,
f'bar-{i}',
fromto=[2.025, -W, 0.1, 2.025, W, 0.1],
size=[0.1],
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_bars_cleared = 0
xpos = 1
intervals = (
self.np_random.uniform(3, 6, size=(self.n_bars,)) * self.world_scale
)
if self.fixed_height:
heights = np.zeros(self.n_bars) + self.min_height * self.world_scale
else:
heights = (
self.np_random.uniform(
self.min_height, self.min_height + 0.3, size=(self.n_bars,)
)
* self.world_scale
)
self.bar_pos = []
nm = self.p.named.model
for i in range(self.n_bars):
xpos += intervals[i]
self.bar_pos.append(xpos)
nm.geom_pos[f'bar-{i}'][0] = xpos
nm.geom_pos[f'bar-{i}'][2] = (
heights[i] + nm.geom_size[f'bar-{i}'][0]
)
nm.geom_rgba[f'bar-{i}'] = [0.8, 0.9, 0.8, 1]
self.bar_hit = [False] * self.n_bars
self.new_bars_hit = set()
def get_observation(self):
nb = self.next_bar_index()
if nb < len(self.bar_pos):
xpos = self.robot_pos[0]
nm = self.p.named.model
next_bar_d = nm.geom_pos[f'bar-{nb}'][0] - xpos
next_bar_h = (
nm.geom_pos[f'bar-{nb}'][2] + nm.geom_size[f'bar-{nb}'][0]
)
else:
next_bar_d = 1.0
next_bar_h = 2.0
next_bar_cleared = nb < self.max_bars_cleared
return {
'observation': super().get_observation(),
'next_bar': np.array(
[next_bar_d, next_bar_h, not next_bar_cleared],
dtype=np.float32
),
}
def next_bar_index(self):
xpos = self.robot_pos[0]
return bisect_left(self.bar_pos, xpos)
def on_step_single_frame(self):
contact = self.p.data.contact
for i, c in enumerate(contact.geom1):
if contact.dist[i] > 0:
continue
if c not in self.bar_geom_ids:
continue
bar = self.bar_geoms[c]
self.new_bars_hit.add(bar)
for i, c in enumerate(contact.geom2):
if contact.dist[i] > 0:
continue
if c not in self.bar_geom_ids:
continue
bar = self.bar_geoms[c]
self.new_bars_hit.add(bar)
def step_simulation(self):
super().step_simulation()
self.max_bars_cleared = max(
self.max_bars_cleared, self.next_bar_index()
)
def step(self, action):
self.new_bars_hit = set()
mbbefore = self.max_bars_cleared
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_bars_cleared > mbbefore else 0
touched = False
for hit in self.new_bars_hit:
if not self.bar_hit[hit] and self.notouch:
touched = True
if self.notouch:
marked = [0.8, 0.0, 0.0, 1.0]
self.p.named.model.geom_rgba[f'bar-{hit}'] = marked
score -= 1
self.bar_hit[hit] = True
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
reward = -1
terminated = True
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/limbo.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from dm_control.utils import rewards
from bisk.single_robot import BiskSingleRobotWithBallEnv
log = logging.getLogger(__name__)
class BiskGoalWallEnv(BiskSingleRobotWithBallEnv):
'''
Goal wall shooting. In the dense-reward setting we allow for falling over
since the reward is the negative distance to the closest goal.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
init_distance: float,
touch_ball_reward: float,
**kwargs,
):
self.init_distance = init_distance
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.touch_ball_reward = touch_ball_reward
if self.touch_ball_reward > 0:
self.observation_space = gym.spaces.Dict(
[
('ball', self.observation_space.spaces['ball']),
(
'touched_ball',
gym.spaces.Box(
low=0, high=1, shape=(1,), dtype=np.float32
),
),
(
'observation',
self.observation_space.spaces['observation'],
),
]
)
self.ball_geom = self.p.named.model.body_geomadr['ball']
self.wall_geom = self.p.named.model.geom_type.axes.row.names.index(
'wall'
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
# Add wall
W = 3 * self.world_scale
WH = 1 * self.world_scale
WD = (4 + self.init_distance) * self.world_scale
root.asset.add(
'material',
name='mat_wall',
reflectance=0.5,
shininess=1,
emission=0.5,
specular=1,
)
root.worldbody.add(
'geom',
type='plane',
name='wall',
material='mat_wall',
xyaxes=[0, -1, 0, 0, 0, 1],
size=[W, WH, 1],
pos=[WD, 0, WH],
rgba=[0, 0.5, 0.1, 1],
)
# Add a visual marker
root.asset.add(
'texture',
name='tex_dnc',
builtin='checker',
width=50,
height=50,
rgb1=[0, 0, 0],
rgb2=[1, 0.8, 0],
type='2d',
)
root.asset.add(
'material',
name='mat_dnc',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 10],
texuniform=False,
texture='tex_dnc',
)
root.worldbody.add(
'site',
type='box',
name='line',
size=[0.1, W, 0.01],
pos=[1.5 + self.init_distance, 0, 0.02],
material='mat_dnc',
)
# rgba=[1, 0, 0, 0.3])
# Add goals on wall
if self.is_2d:
GP = WH
root.worldbody.add(
'site',
type='ellipsoid',
name='goal',
material='mat_wall',
size=[
0.01 * self.world_scale,
0.4 * self.world_scale,
0.4 * self.world_scale,
],
pos=[WD, 0, GP],
rgba=[1, 1, 1, 1],
)
root.worldbody.add(
'site',
type='ellipsoid',
name='goalb',
material='mat_wall',
size=[
0.005 * self.world_scale,
0.45 * self.world_scale,
0.45 * self.world_scale,
],
pos=[WD, 0, GP],
rgba=[1, 0, 0, 1],
)
else:
root.worldbody.add(
'site',
type='ellipsoid',
name='goal1',
material='mat_wall',
size=[
0.01 * self.world_scale,
0.4 * self.world_scale,
0.4 * self.world_scale,
],
pos=[WD, -1, WH - 0.35],
rgba=[1, 1, 1, 1],
)
root.worldbody.add(
'site',
type='ellipsoid',
name='goal1b',
material='mat_wall',
size=[
0.005 * self.world_scale,
0.45 * self.world_scale,
0.45 * self.world_scale,
],
pos=[WD, -1, WH - 0.35],
rgba=[1, 0, 0, 1],
)
root.worldbody.add(
'site',
type='ellipsoid',
name='goal2',
material='mat_wall',
size=[
0.01 * self.world_scale,
0.4 * self.world_scale,
0.4 * self.world_scale,
],
pos=[WD, 1, WH + 0.35],
rgba=[1, 1, 1, 1],
)
root.worldbody.add(
'site',
type='ellipsoid',
name='goal2b',
material='mat_wall',
size=[
0.005 * self.world_scale,
0.45 * self.world_scale,
0.45 * self.world_scale,
],
pos=[WD, 1, WH + 0.35],
rgba=[1, 0, 0, 1],
)
# This is the camera we'll use by default
euler = [80, -5, 0]
if root.compiler.angle == 'radian':
euler = [np.deg2rad(e) for e in euler]
root.worldbody.add(
'camera',
name='sideline',
mode='fixed',
pos=[WD / 3, -9 * self.world_scale, 2 * self.world_scale],
euler=euler,
)
super().init_sim(root, frameskip)
def get_observation(self):
obs = super().get_observation()
if self.touch_ball_reward > 0:
obs['touched_ball'] = np.array([float(self.ball_touched)],
dtype=np.float32)
return obs
def reset_state(self) -> None:
super().reset_state()
# Place ball
ball_size = self.p.named.model.geom_size['ball'][0]
if self.is_2d:
self.p.named.data.qpos['ball-x'] += self.init_distance
self.p.named.data.qpos['ball-z'] += ball_size + 0.1
else:
self.p.named.data.qpos['ball'][0] += self.init_distance
self.p.named.data.qpos['ball'][2] += ball_size + 0.1
self.ball_yz = None
self.ball_touched = False
def on_step_single_frame(self):
contact = self.p.data.contact
ball_wall = np.in1d(contact.geom1, self.wall_geom) & np.in1d(
contact.geom2, self.ball_geom
)
touching = contact.dist <= 0
if np.any(ball_wall & touching):
if self.is_2d:
self.ball_yz = [0, self.p.named.data.qpos['ball-z'][0]]
else:
self.ball_yz = self.p.named.data.qpos['ball'][1:3].copy()
if not self.ball_touched:
for c in contact:
names = self.p.named.model.name_geomadr.axes.row.names
if (
names[c.geom1].startswith('ball')
and names[c.geom2].startswith('robot')
and c.dist < 0
):
self.ball_touched = True
def step(self, action):
self.ball_yz = None
btbefore = self.ball_touched
obs, reward, terminated, truncated, info = super().step(action)
goal_hit = None
goal_dists = []
goal_sizes = []
if self.ball_yz is not None:
if self.is_2d:
goals = ('goal',)
else:
goals = ('goal1', 'goal2')
for g in goals:
d = np.linalg.norm(
self.ball_yz - self.p.named.data.site_xpos[g][1:3]
)
goal_dists.append(d)
goal_sizes.append(self.p.named.model.site_size[g][2])
if d <= self.p.named.model.site_size[g][2]:
goal_hit = g
break
score = 0
if goal_hit == 'goal' or goal_hit == 'goal1':
score = 1
elif goal_hit == 'goal2':
score = 2
info['score'] = score
shaped_reward = 0
for i, (d, s) in enumerate(zip(goal_dists, goal_sizes)):
shaped_reward = i * rewards.tolerance(d, (0, s), margin=s)
info['shaped_reward'] = shaped_reward
reward = info['shaped_reward'] if self.shaped else score
if self.touch_ball_reward > 0 and self.ball_touched != btbefore:
reward += self.touch_ball_reward
# Zero reward if we're beyond the line
lpos = self.p.named.data.site_xpos['line', 'x']
if self.robot_pos[0] > lpos:
reward = 0
# Once we've hit the wall we're done
if self.ball_yz is not None:
terminated = True
if info.get('fell_over', False):
reward = -1
terminated = True
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/goalwall.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskHurdlesEnv(BiskSingleRobotEnv):
'''
Jump over hurdles to progress in X-direction.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
max_height: float,
fixed_height: bool,
**kwargs,
):
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.max_height = max_height
self.fixed_height = fixed_height
self.max_hurdles_cleared = 0
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('next_hurdle', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
# 200 hurdles should be enough for everybody
self.n_hurdles = 200
for i in range(self.n_hurdles):
b = self.add_box(
root, f'hurdle-{i}', size=[0.05, W, 0.1], pos=[2, 0, 0.2]
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_hurdles_cleared = 0
xpos = 1
intervals = (
self.np_random.uniform(3, 6, size=(self.n_hurdles,))
* self.world_scale
)
if self.fixed_height:
heights = (
np.zeros(self.n_hurdles) + self.max_height * self.world_scale
)
else:
heights = (
self.np_random.uniform(
0.1, self.max_height, size=(self.n_hurdles,)
)
* self.world_scale
)
self.hurdle_pos = []
for i in range(self.n_hurdles):
xpos += intervals[i]
self.hurdle_pos.append(xpos)
self.p.named.model.geom_size[f'hurdle-{i}'][2] = heights[i]
self.p.named.model.geom_pos[f'hurdle-{i}'][0] = xpos
self.p.named.model.geom_pos[f'hurdle-{i}'][2] = (
heights[i] / 2 + 0.01
)
def get_observation(self):
nh = self.next_hurdle_index()
if nh < len(self.hurdle_pos):
xpos = self.robot_pos[0]
nm = self.p.named.model
next_hurdle_d = nm.geom_pos[f'hurdle-{nh}'][0] - xpos
next_hurdle_h = nm.geom_size[f'hurdle-{nh}'][2] * 2
else:
next_hurdle_d = 10.0
next_hurdle_h = 0.1
next_hurdle_cleared = nh < self.max_hurdles_cleared
return {
'observation': super().get_observation(),
'next_hurdle': np.array(
[next_hurdle_d, next_hurdle_h, not next_hurdle_cleared],
dtype=np.float32
),
}
def next_hurdle_index(self):
xpos = self.robot_pos[0]
return bisect_left(self.hurdle_pos, xpos)
def step_simulation(self):
super().step_simulation()
self.max_hurdles_cleared = max(
self.max_hurdles_cleared, self.next_hurdle_index()
)
def step(self, action):
mhbefore = self.max_hurdles_cleared
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_hurdles_cleared > mhbefore else 0
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/hurdles.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskStairsEnv(BiskSingleRobotEnv):
'''
Go up and down flights of fixed-height, varible-length stairs.
'''
def __init__(
self,
robot: str,
features: str,
shaped: bool,
step_height: float,
step_length_min: float,
step_length_max: float,
num_flights: int,
**kwargs,
):
self.step_height = step_height
self.step_length_min = step_length_min
self.step_length_max = step_length_max
self.num_flights = num_flights
super().__init__(robot, f'{features}-relz', **kwargs)
self.shaped = shaped
self.max_steps_cleared = 0
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[
('next_steps', obs_env),
('observation', obs_base),
]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
self.flight_steps = 10
self.n_steps = self.flight_steps * self.num_flights
self.start_pos = 3.0
self.top_width = 3.0
color1 = [0.8, 0.9, 0.8, 1.0]
color2 = [0.6, 0.6, 0.6, 1.0]
length = 0.5
xpos = self.start_pos + length / 2
step = 0
for flight in range(self.num_flights):
if flight % 2 == 0:
h2 = self.step_height / 2
for i in range(self.flight_steps):
self.add_box(
root,
f'step-{step}',
size=[length / 2, W, h2],
pos=[xpos, 0, h2],
rgba=color1 if i % 2 == 0 else color2,
)
step += 1
h2 += self.step_height / 2
xpos += length
h2 = self.flight_steps * self.step_height / 2
self.add_box(
root,
f'top-{step}',
size=[self.top_width, W, h2],
pos=[xpos + self.top_width - length / 2, 0, h2],
rgba=color1,
)
xpos += self.top_width * 2
else:
for i in range(self.flight_steps):
self.add_box(
root,
f'step-{step}',
size=[length / 2, W, h2],
pos=[xpos, 0, h2],
rgba=color1 if i % 2 == 1 else color2,
)
step += 1
h2 -= self.step_height / 2
xpos += length
xpos += self.top_width * 4
euler = [80, 0, 0]
if root.compiler.angle == 'radian':
euler = [np.deg2rad(e) for e in euler]
root.worldbody.add(
'camera',
name='stairs_side',
mode='trackcom',
pos=[0, -6, 1],
euler=euler,
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_steps_cleared = 0
self.step_pos: List[float] = []
lengths = (
self.np_random.uniform(
self.step_length_min,
self.step_length_max,
size=(self.n_steps + 1,),
)
* self.world_scale
)
nm = self.p.named.model
xpos = self.start_pos + lengths[0] / 2
step = 0
for flight in range(self.num_flights):
if flight % 2 == 0:
for _ in range(self.flight_steps):
nm.geom_size[f'step-{step}'][0] = lengths[step] / 2
nm.geom_pos[f'step-{step}'][0] = xpos
self.step_pos.append(xpos)
xpos += lengths[step] / 2 + lengths[step + 1] / 2
step += 1
nm.geom_pos[f'top-{step}'][0] = (
xpos + self.top_width - lengths[step] / 2
)
xpos += self.top_width * 2
else:
for _ in range(self.flight_steps):
nm.geom_size[f'step-{step}'][0] = lengths[step] / 2
nm.geom_pos[f'step-{step}'][0] = xpos
self.step_pos.append(xpos)
xpos += lengths[step] / 2 + lengths[step + 1] / 2
step += 1
xpos += self.top_width * 4
# Custom fall-over detection because we want to use the featurizer's
# relative Z position.
def fell_over(self) -> bool:
if self.robot.startswith('humanoidcmu') or self.robot.startswith(
'humanoidamass'
):
# XXX We like the relative position of the lower neck, not the torso
# (which is at the center of the robot for this one).
abs_zpos = self.p.named.data.xpos['robot/torso', 'z']
zpos = self.featurizer.relz()
zdiff = abs_zpos - zpos
return bool(self.robot_pos[2] - zdiff < 0.9)
elif self.robot.startswith('humanoid'):
zpos = self.featurizer.relz()
return bool(zpos < 0.9)
elif self.robot.startswith('halfcheetah'):
# Orientation pointing upwards and body almost on the ground
up = self.p.named.data.xmat['robot/torso', 'zz']
zpos = self.featurizer.relz()
if up < -0.8 and zpos < 0.12:
return True
elif self.robot.startswith('walker'):
zpos = self.featurizer.relz()
r = self.p.named.data.qpos['robot/rooty']
if zpos < 0.9 or r < -1.4 or r > 1.4:
return True
return False
def get_observation(self):
ns = self.next_step_index()
xpos = self.robot_pos[0]
nm = self.p.named.model
if ns < len(self.step_pos):
next_step_d1 = nm.geom_pos[f'step-{ns}'][0] - xpos
if ns + 1 < len(self.step_pos):
next_step_d2 = nm.geom_pos[f'step-{(ns+1)}'][0] - xpos
else:
next_step_d2 = 10.0
else:
next_step_d1 = 10.0
next_step_d2 = 20.0
next_step_cleared = ns < self.max_steps_cleared
return {
'next_steps': np.array(
[next_step_d1, next_step_d2, not next_step_cleared],
dtype=np.float32,
),
'observation': super().get_observation(),
}
def next_step_index(self):
xpos = self.robot_pos[0]
return bisect_left(self.step_pos, xpos)
def step_simulation(self):
super().step_simulation()
self.max_steps_cleared = max(
self.max_steps_cleared, self.next_step_index()
)
def step(self, action):
msbefore = self.max_steps_cleared
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_steps_cleared > msbefore else 0
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
reward = -1
terminated = True
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/stairs.py
|
bipedal-skills-main
|
bisk/tasks/__init__.py
|
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import gym
import numpy as np
from dm_control import mjcf
from dm_control.utils import rewards
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskGoToTargetEnv(BiskSingleRobotEnv):
'''
Simple 1D/2D navigation, a port of dm_control's GoToTarget task.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
tolerance: float,
goal_area: float,
num_targets: int = 1,
goal_switch_steps: int = 10,
single_target: bool = False,
on_circle: bool = False,
**kwargs,
):
self.shaped = shaped
self.goal_area = goal_area
self.tolerance = tolerance
self.goals = np.zeros((num_targets, 2))
self.goal_switch_steps = goal_switch_steps
self.on_circle = on_circle
self.steps_to_switch = 0
self.single_target = single_target
super().__init__(robot, features, allow_fallover, **kwargs)
obs_base = self.featurizer.observation_space
if self.is_2d:
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(1 * num_targets,),
dtype=np.float32,
)
else:
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(3 * num_targets,),
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[('targets', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
for i in range(self.goals.shape[0]):
root.worldbody.add(
'site',
name=f'target_{i}',
type='sphere',
pos=(0.0, 0.0, 1.0),
size=(0.1,),
rgba=(0.9, 0.6, 0.6, 1.0 if i == 0 else 0.2),
)
root.worldbody.add(
'site',
name=f'target_tolerance_{i}',
type='ellipsoid',
pos=(0.0, 0.0, 1.0),
size=(self.tolerance, self.tolerance, 1e-3),
rgba=(0.9, 0.6, 0.6, 0.2 if i == 0 else 0.05),
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.sample_goal(all=True)
def get_observation(self):
if self.is_2d:
targets = self.goals[:, 0:1] - self.robot_pos[0:1]
else:
targets = np.zeros((self.goals.shape[0], 3))
targets[:, :2] = self.goals - self.robot_pos[:2]
targets = np.dot(
targets, self.p.named.data.xmat['robot/torso'].reshape(3, 3)
)
return {
'observation': super().get_observation(),
'targets': targets.flatten().astype(np.float32),
}
def step(self, action):
obs, reward, terminated, truncated, info = super().step(action)
dist = np.linalg.norm(self.goals[0] - self.robot_pos[:2])
if dist < self.tolerance:
score = 1
self.steps_to_switch -= 1
else:
score = 0
info['score'] = score
info['distance'] = dist
info['shaped_reward'] = -0.1 * (
1
- rewards.tolerance(
dist,
(0, 0),
margin=self.goal_area / 2,
)
)
reward = info['shaped_reward'] if self.shaped else score
if self.steps_to_switch <= 0:
self.sample_goal()
obs = self.get_observation()
if info.get('fell_over', False):
terminated = True
reward = -1
if score == 1 and self.single_target:
terminated = True
return obs, reward, terminated, truncated, info
def sample_goal(self, all: bool = False):
if all:
if self.on_circle:
self.goals = self.np_random.standard_normal(self.goals.shape)
self.goals /= np.maximum(
np.linalg.norm(self.goals, axis=1).reshape(-1, 1), 1e-5
)
self.goals *= self.goal_area
else:
self.goals = self.np_random.uniform(
-self.goal_area, self.goal_area, size=self.goals.shape
)
else:
self.goals = np.roll(self.goals, -1, axis=0)
if self.on_circle:
self.goals[-1] = self.np_random.standard_normal(2)
self.goals[-1] /= np.maximum(
np.linalg.norm(self.goals[-1]), 1e-5
)
self.goals[-1] *= self.goal_area
else:
self.goals[-1] = self.np_random.uniform(
-self.goal_area, self.goal_area, size=(2,)
)
if self.is_2d:
self.goals[:, 1] = 0
for i in range(self.goals.shape[0]):
self.p.named.model.site_pos[f'target_{i}'][0:2] = self.goals[i]
self.p.named.model.site_pos[f'target_{i}'][2] = 0
self.p.named.model.site_pos[f'target_tolerance_{i}'][
0:2
] = self.goals[i]
self.p.named.model.site_pos[f'target_tolerance_{i}'][2] = 0
self.steps_to_switch = self.goal_switch_steps
|
bipedal-skills-main
|
bisk/tasks/gototarget.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from bisk.single_robot import BiskSingleRobotEnv
class BiskRunDirEnv(BiskSingleRobotEnv):
'''
Dense-reward task: move at a specific angle.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
heading_deg: float,
**kwargs):
super().__init__(robot, features, allow_fallover, **kwargs)
heading = np.deg2rad(heading_deg)
# torso orientation: X/Y are switched
self.dir = np.asarray([np.sin(heading), np.cos(heading), 0])
def step(self, action):
pos_before = self.robot_pos.copy()
obs, reward, terminated, truncated, info = super().step(action)
pos_after = self.robot_pos
displacement = pos_after - pos_before
rdir = np.dot(self.dir, self.p.named.data.xmat['robot/torso'].reshape(3,3))
reward = np.dot(rdir, displacement)
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/rundir.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from bisk.helpers import asset_path
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskGapsEnv(BiskSingleRobotEnv):
'''
Jump over gaps to progress in X-direction.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
max_size: float,
min_gap: float,
max_gap: float,
fixed_size: bool,
**kwargs,
):
super().__init__(robot, features, **kwargs)
self.shaped = shaped
self.max_size = max(0.5, max_size)
self.fixed_size = fixed_size
self.min_gap = min_gap
self.max_gap = max_gap
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('next_gap_platform', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
root.find('geom', 'floor').remove()
# Base platform
H = 0.1
root.asset.add(
'material',
name='mat_base',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 1],
texuniform=True,
texture='tex_plane',
)
root.asset.add(
'texture',
name='tex_lava',
type='2d',
file=f'{asset_path()}/lava.png',
)
root.asset.add(
'material',
name='mat_gaps',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 1],
texuniform=True,
texture='tex_lava',
)
self.add_box(
root,
f'base',
size=[(W + 4) / 2, W, H],
pos=[(-W + 4) / 2, 0, -H],
conaffinity=1,
material='mat_base',
)
# 200 platforms should be enough for everybody
self.n_platforms = 200
root.asset.add(
'material',
name='mat_platform',
reflectance=0.5,
shininess=1,
specular=1,
)
for i in range(self.n_platforms):
o = (i % 2) * 0.1
self.add_box(
root,
f'platform-{i}',
size=[1, W, H],
pos=[2, 0, -H],
material='mat_platform',
rgba=[0.2 + o, 0.3 + o, 0.4 + o, 1.0],
)
# Gaps are placed 5cm below
g = self.add_box(
root,
f'gap-{i}',
size=[1, W, H],
pos=[2, 0, -H - 0.05],
material='mat_gaps',
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_platforms_reached = 0
xpos = 4 * self.world_scale
if self.fixed_size:
gaps = np.zeros(self.n_platforms) + self.min_gap * self.world_scale
sizes = (
np.zeros(self.n_platforms) + self.max_size * self.world_scale
)
else:
if self.robot.startswith('quadruped'):
gaps = (
self.np_random.uniform(0.8, 1.55, size=(self.n_platforms,))
* self.world_scale
)
ms = max(self.max_size * 2, 2.0)
sizes = (
self.np_random.uniform(2.0, ms, size=(self.n_platforms,))
* self.world_scale
)
elif self.robot.startswith('humanoid'):
gaps = (
self.np_random.uniform(
self.min_gap, self.max_gap, size=(self.n_platforms,)
)
* self.world_scale
)
sizes = (
self.np_random.uniform(
1.0, self.max_size, size=(self.n_platforms,)
)
* self.world_scale
)
else:
gaps = (
self.np_random.uniform(
self.min_gap, self.max_gap, size=(self.n_platforms,)
)
* self.world_scale
)
sizes = (
self.np_random.uniform(
0.5, self.max_size, size=(self.n_platforms,)
)
* self.world_scale
)
self.gap_starts = []
self.platform_starts = []
for i in range(self.n_platforms):
self.gap_starts.append(xpos)
self.p.named.model.geom_size[f'gap-{i}'][0] = gaps[i] / 2
self.p.named.model.geom_pos[f'gap-{i}'][0] = xpos + gaps[i] / 2
xpos += gaps[i]
self.platform_starts.append(xpos)
self.p.named.model.geom_size[f'platform-{i}'][0] = sizes[i] / 2
self.p.named.model.geom_pos[f'platform-{i}'][0] = (
xpos + sizes[i] / 2
)
xpos += sizes[i]
def next_gap_platform_index(self):
xpos = self.robot_pos[0]
nxp = bisect_left(self.platform_starts, xpos)
nxg = bisect_left(self.gap_starts, xpos)
return nxg, nxp
def get_observation(self):
nxg, nxp = self.next_gap_platform_index()
xpos = self.robot_pos[0]
if nxg < len(self.gap_starts):
next_gap_d = self.gap_starts[nxg] - xpos
else:
next_gap_d = 1.0
if nxp < len(self.platform_starts):
next_platform_d = self.platform_starts[nxp] - xpos
else:
next_platform_d = 1.0
next_platform_reached = nxp < self.max_platforms_reached
return {
'observation': super().get_observation(),
'next_gap_platform': np.array(
[next_gap_d, next_platform_d, not next_platform_reached],
dtype=np.float32
),
}
def on_step_single_frame(self):
for c in self.p.data.contact:
names = self.p.named.model.name_geomadr.axes.row.names
nams = sorted([names[c.geom1], names[c.geom2]])
if nams[0].startswith('gap') and nams[1].startswith('robot/'):
self.touched_gap = True
break
def step_simulation(self):
super().step_simulation()
self.max_platforms_reached = max(
self.max_platforms_reached, self.next_gap_platform_index()[1]
)
def step(self, action):
mpbefore = self.max_platforms_reached
self.touched_gap = False
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_platforms_reached > mpbefore else 0
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
terminated = True
reward = -1
if self.touched_gap:
terminated = True
reward = -1
info['score'] -= 1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/gaps.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from bisk.features.base import Featurizer
_registry = {}
def register_featurizer(name, cls):
global _registry
_registry[name] = cls
def make_featurizer(
features: str,
p: 'dm_control.mujoco.Physics',
robot: str,
prefix: str = 'robot',
*args,
**kwargs,
) -> Featurizer:
global _registry
if features == 'joints':
from bisk.features.joints import JointsFeaturizer
return JointsFeaturizer(p, robot, prefix, *args, **kwargs)
elif features == 'joints-relz':
from bisk.features.joints import JointsRelZFeaturizer
return JointsRelZFeaturizer(p, robot, prefix, *args, **kwargs)
elif features in _registry:
return _registry[features](p, robot, prefix, *args, **kwargs)
else:
raise ValueError(f'Unknown feature set {features}')
|
bipedal-skills-main
|
bisk/features/__init__.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import re
from typing import List, Set
import gym
import numpy as np
from bisk.features.base import Featurizer
class JointsFeaturizer(Featurizer):
'''
Featurizes joint observations (qpos, qvel) as well
as contact forces (clipped to [-1,1]).
'''
def __init__(
self, p: 'dm_control.mujoco.Physics', robot: str, prefix: str, exclude: str = None
):
super().__init__(p, robot, prefix, exclude)
self.qpos_idx: List[int] = []
self.qvel_idx: List[int] = []
for jn in self.p.named.model.jnt_type.axes.row.names:
if not jn.startswith(f'{self.prefix}/'):
continue
if exclude is not None and re.match(exclude, jn) is not None:
continue
typ = self.p.named.model.jnt_type[jn]
qpos_adr = self.p.named.model.jnt_qposadr[jn]
for i in range(self.n_qpos[typ]):
self.qpos_idx.append(qpos_adr + i)
qvel_adr = self.p.named.model.jnt_dofadr[jn]
for i in range(self.n_qvel[typ]):
self.qvel_idx.append(qvel_adr + i)
self.cfrc_idx = [
r
for r, k in enumerate(self.p.named.data.cfrc_ext.axes.row.names)
if k.startswith(f'{self.prefix}/')
and k != f'{self.prefix}/'
and (exclude is None or re.match(exclude, k) is None)
]
n_obs = len(self.qpos_idx) + len(self.qvel_idx) + len(self.cfrc_idx) * 6
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(n_obs,), dtype=np.float32
)
def __call__(self) -> np.ndarray:
qpos = self.p.data.qpos[self.qpos_idx]
qvel = self.p.data.qvel[self.qvel_idx]
cfrc_ext = self.p.data.cfrc_ext[self.cfrc_idx]
return np.concatenate(
[qpos, qvel, np.clip(cfrc_ext.flat, -1, 1)]
).astype(np.float32)
def feature_names(self) -> List[str]:
names: List[str] = []
qp = self.qpos_names()
names += [qp[i] for i in self.qpos_idx]
qv = self.qvel_names()
names += [qv[i] for i in self.qvel_idx]
cn = self.cfrc_ext_names()
for i in self.cfrc_idx:
names += cn[i]
for i in range(len(names)):
names[i] = names[i].replace(f'{self.prefix}/', '')
return names
class JointsRelZFeaturizer(JointsFeaturizer):
'''
JointFeaturizer that reports the robots's Z position as relative to the
closest surface underneath it.
'''
def __init__(
self,
p: 'mujoco.Physics',
robot: str,
prefix: str = 'robot',
exclude: str = None,
):
super().__init__(p, robot, prefix, exclude)
self.robot_geoms: Set[int] = set()
for i, p in enumerate(self.p.named.model.geom_bodyid.axes.row.names):
if p.startswith(f'{self.prefix}/'):
self.robot_geoms.add(i)
# XXX Hacky lookup of z feature
names = [':pz', 'slidez:p', 'rootz:p', 'root:pz']
for name in names:
try:
self.zpos_idx = self.feature_names().index(name)
break
except:
continue
def relz(self):
from dm_control.mujoco.wrapper.mjbindings import mjlib
# Find closest non-robot geom from torso downwards
pos = self.p.named.data.xpos[f'{self.prefix}/torso'].copy()
dir = np.array([0.0, 0.0, -1.0])
excl = self.p.named.model.geom_bodyid[f'{self.prefix}/torso']
id = np.array([0], dtype=np.int32)
while True:
d = mjlib.mj_ray(
self.p.model.ptr, self.p.data.ptr, pos, dir, None, 1, excl, id
)
if d < 0.0: # No geom hit
break
pos += dir * d
if id[0] not in self.robot_geoms:
break
excl = self.p.model.geom_bodyid[id[0]]
return self.p.named.data.xpos[f'{self.prefix}/torso', 'z'] - pos[2]
def __call__(self) -> np.ndarray:
obs = super().__call__()
rz = self.relz()
obs[self.zpos_idx] = rz
return obs
|
bipedal-skills-main
|
bisk/features/joints.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Dict, List
import gym
import numpy as np
log = logging.getLogger(__name__)
class Featurizer:
n_qpos: Dict[int, int] = { # qpos entries per joint type
0: 7, # mjenums.mjtJoint.mjJNT_FREE
1: 4, # mjenums.mjtJoint.mjJNT_BALL
2: 1, # mjenums.mjtJoint.mjJNT_SLIDE
3: 1, # mjenums.mjtJoint.mjJNT_HINGE
}
n_qvel: Dict[int, int] = { # qvel entries per joint type
0: 6, # mjenums.mjtJoint.mjJNT_FREE
1: 3, # mjenums.mjtJoint.mjJNT_BALL
2: 1, # mjenums.mjtJoint.mjJNT_SLIDE
3: 1, # mjenums.mjtJoint.mjJNT_HINGE
}
def __init__(
self,
p: 'dm_control.mujoco.Physics',
robot: str,
prefix: str = 'robot',
exclude: str = None,
):
self.p = p
self.prefix = prefix
self.observation_space: gym.spaces.Box = None
def reset(self):
pass
def __call__(self) -> np.ndarray:
raise NotImplementedError()
def set_frame_of_reference(self):
raise NotImplementedError()
def feature_names(self) -> List[str]:
raise NotImplementedError()
def qpos_names(self) -> List[str]:
names = ['' for i in range(len(self.p.data.qpos))]
for jn in self.p.named.model.jnt_type.axes.row.names:
typ = self.p.named.model.jnt_type[jn]
adr = self.p.named.model.jnt_qposadr[jn]
if typ == 0:
names[adr + 0] = f'{jn}:px'
names[adr + 1] = f'{jn}:py'
names[adr + 2] = f'{jn}:pz'
names[adr + 3] = f'{jn}:ow'
names[adr + 4] = f'{jn}:ox'
names[adr + 5] = f'{jn}:oy'
names[adr + 6] = f'{jn}:oz'
elif typ == 1:
names[adr + 0] = f'{jn}:ow'
names[adr + 1] = f'{jn}:ox'
names[adr + 2] = f'{jn}:oy'
names[adr + 3] = f'{jn}:oz'
elif typ == 2 or typ == 3:
names[adr] = f'{jn}:p'
else:
raise ValueError(f'Unknown joint type {typ}')
return names
def qvel_names(self) -> List[str]:
names = ['' for i in range(len(self.p.data.qvel))]
for jn in self.p.named.model.jnt_type.axes.row.names:
typ = self.p.named.model.jnt_type[jn]
adr = self.p.named.model.jnt_dofadr[jn]
if typ == 0:
names[adr + 0] = f'{jn}:lvx'
names[adr + 1] = f'{jn}:lvy'
names[adr + 2] = f'{jn}:lvz'
names[adr + 3] = f'{jn}:avx'
names[adr + 4] = f'{jn}:avy'
names[adr + 5] = f'{jn}:avz'
elif typ == 1:
names[adr + 0] = f'{jn}:avx'
names[adr + 1] = f'{jn}:avy'
names[adr + 2] = f'{jn}:avz'
elif typ == 2 or typ == 3:
names[adr] = f'{jn}:v'
else:
raise ValueError(f'Unknown joint type {typ}')
return names
def cfrc_ext_names(self) -> List[List[str]]:
names: List[List[str]] = []
for cn in self.p.named.data.cfrc_ext.axes.row.names:
names.append(
[f'{cn}:c{n}' for n in ['rx', 'ry', 'rz', 'tx', 'ty', 'tz']]
)
return names
def sensor_names(self) -> List[str]:
from dm_control.mujoco.wrapper.mjbindings import enums as mjenums
names = ['' for i in range(len(self.p.data.sensordata))]
for sn in self.p.named.model.sensor_adr.axes.row.names:
typ = self.p.named.model.sensor_type[sn]
adr = self.p.named.model.sensor_adr[sn]
if typ == mjenums.mjtSensor.mjSENS_GYRO:
feats = ['avx', 'avy', 'avz']
elif (
typ == mjenums.mjtSensor.mjSENS_VELOCIMETER
or typ == mjenums.mjtSensor.mjSENS_SUBTREELINVEL
):
feats = ['lvx', 'lvy', 'lvz']
elif typ == mjenums.mjtSensor.mjSENS_ACCELEROMETER:
feats = ['lax', 'lay', 'laz']
elif (
typ == mjenums.mjtSensor.mjSENS_FRAMEPOS
or typ == mjenums.mjtSensor.mjSENS_SUBTREECOM
):
feats = ['px', 'py', 'pz']
elif typ == mjenums.mjtSensor.mjSENS_JOINTPOS:
feats = ['']
elif typ == mjenums.mjtSensor.mjSENS_JOINTVEL:
feats = ['']
elif typ == mjenums.mjtSensor.mjSENS_FORCE:
feats = ['fx', 'fy', 'fz']
elif typ == mjenums.mjtSensor.mjSENS_TORQUE:
feats = ['tx', 'ty', 'tz']
elif typ == mjenums.mjtSensor.mjSENS_RANGEFINDER:
feats = ['d']
elif typ == mjenums.mjtSensor.mjSENS_TOUCH:
feats = ['f']
else:
raise ValueError(f'Unsupported sensor type: {typ}')
for i, f in enumerate(feats):
names[adr + i] = f'{sn}:{f}'
return names
|
bipedal-skills-main
|
bisk/features/base.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
import gym
from dm_control import _render
from dm_control.viewer import gui, renderer, viewer, views
import bisk
parser = argparse.ArgumentParser()
parser.add_argument('task')
parser.add_argument('robot')
args = parser.parse_args()
env_name = {
'hurdles': 'BiskHurdles-v1',
'limbo': 'BiskLimbo-v1',
'hurdleslimbo': 'BiskHurdlesLimbo-v1',
'gaps': 'BiskGaps-v1',
'stairs': 'BiskStairs-v1',
'goalwall': 'BiskGoalWall-v1',
'polebalance': 'BiskPoleBalance-v1',
'gototarget': 'BiskGoToTarget-v1',
'butterflies': 'BiskButterflies-v1',
}[args.task.lower()]
env = gym.make(env_name, robot=args.robot)
print(
f'timestep {env.p.model.opt.timestep}s x frameskip {env.frameskip} = dt {env.dt}s'
)
width = 480
height = 480
title = f'{args.task} - {args.robot}'
render_surface = None
_MAX_FRONTBUFFER_SIZE = 2048
render_surface = _render.Renderer(
max_width=_MAX_FRONTBUFFER_SIZE, max_height=_MAX_FRONTBUFFER_SIZE
)
ren = renderer.OffScreenRenderer(env.p.model, render_surface)
viewer_layout = views.ViewportLayout()
viewport = renderer.Viewport(width, height)
window = gui.RenderWindow(width, height, title)
vw = viewer.Viewer(viewport, window.mouse, window.keyboard)
ren.components += viewer_layout
vw.initialize(env.p, ren, touchpad=False)
env.seed(0)
step = 0
def tick():
global step
global obs
if step == 0:
obs = env.reset()
#env.p.named.data.qvel['ball'][0:3] = [10, 3, 4]
a = env.action_space.sample()
a *= 0
'''
if step < 1:
a[2] = 1
elif step < 100:
a[0] = 1
else:
a[2] = -1
'''
d = False
obs, r, d, i = env.step(a)
step += 1
if step > 200 or d:
print(r)
print(f'reset after {step} steps')
step = 0
time.sleep(0.05)
vw.render()
def _tick():
viewport.set_size(*window.shape)
tick()
return ren.pixels
window.event_loop(tick_func=_tick)
window.close()
|
bipedal-skills-main
|
exp/testgui.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import yaml
def load_configs(file_path, ws_dir):
with open(file_path, 'r') as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
config_dict['log_dir'] = os.path.join(ws_dir, config_dict['log_dir'])
config_dict['data']['raw_dataset_dir'] = os.path.join(ws_dir, config_dict['data']['raw_dataset_dir'])
config_dict['data']['dataset_dir'] = os.path.join(ws_dir, config_dict['data']['dataset_dir'])
config_dict['data']['bin_path'] = os.path.join(ws_dir, config_dict['data']['bin_path'])
config_dict['data']['smpl_path'] = os.path.join(ws_dir, config_dict['data']['smpl_path'])
config_dict['data']['uv_info'] = os.path.join(ws_dir, config_dict['data']['uv_info'])
config_dict['data']['resample_idxs_path'] = os.path.join(ws_dir, config_dict['data']['resample_idxs_path'])
config_dict['data']['train_bin_path'] = os.path.join(ws_dir, config_dict['data']['train_bin_path'])
config_dict['data']['interp_bin_path'] = os.path.join(ws_dir, config_dict['data']['interp_bin_path'])
config_dict['data']['extrap_bin_path'] = os.path.join(ws_dir, config_dict['data']['extrap_bin_path'])
if 'type' not in config_dict['data']:
config_dict['data']['type'] = 'CAPE'
if 'separate_detail' not in config_dict['data']:
config_dict['data']['separate_detail'] = True
return config_dict
|
AutoAvatar-main
|
utils/configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import torch
import pickle
from pytorch3d.io import load_ply
# Classes -------------------------------------------------------------------------------------------------------
class DFaustJson():
"""
DFaust .bin Structure:
'subject'
'seqs'
for seq in seqs:
'id'
'seq_name'
'frames'
for frame in frames:
'ply_path'
'poses'
"""
def __init__(self, bin_path=None):
self.data = None
if bin_path is not None:
self.load_bin_file(bin_path)
def load_bin_file(self, bin_path):
with open(bin_path, 'rb') as f:
self.data = pickle.load(f)
def dump_bin_file(self, bin_path):
with open(bin_path, 'wb') as f:
pickle.dump(self.data, f)
def append_frames(self, frames, ply_path, poses):
frames.append({
'ply_path': ply_path,
'poses': poses
})
return frames
def append_seqs(self, seqs, seq_name, frames):
seqs.append({
'id': len(seqs),
'seq_name': seq_name,
'frames': frames
})
return seqs
def set_data(self, subject, seqs):
self.data = {
'subject': subject,
'seqs': seqs
}
def num_of_seqs(self):
return len(self.data['seqs'])
def num_of_frames(self):
count = 0
for seq in self.data['seqs']:
count += len(seq['frames'])
return count
|
AutoAvatar-main
|
utils/DFaust.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.renderer import (
PerspectiveCameras,
AmbientLights,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
HardPhongShader,
TexturesVertex,
rasterize_meshes
)
from pytorch3d.structures import Meshes
from pytorch3d.io import load_obj
from pytorch3d.renderer import rasterize_meshes
from numba import jit
import copy
import open3d as o3d
# Functions -----------------------------------------------------------------------------------------------------
def render_mesh(verts, faces, R, t, f, image_size=(512, 512), colors=None, simplify_mesh=False):
"""
:param verts: (N, 3)
:param faces: (F, 3)
"""
device = verts.device
f_th = torch.tensor(f, dtype=torch.float32, device=device)[None]
image_size_th = torch.tensor(image_size, dtype=torch.int32, device=device)[None]
cameras = PerspectiveCameras(focal_length=f_th, R=R[None], T=t[None], device=device, image_size=image_size_th)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius=0.0,
faces_per_pixel=1,
)
lights = AmbientLights(device=device)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=HardPhongShader(
device=device,
cameras=cameras,
lights=lights
)
)
if not simplify_mesh:
if colors is not None:
mesh = Meshes(verts=verts[None], faces=faces[None], textures=TexturesVertex(colors[None]))
else:
mesh = Meshes(verts=verts[None], faces=faces[None])
normals = (mesh.verts_normals_padded() + 1) / 2
mesh = Meshes(verts=verts[None], faces=faces[None], textures=TexturesVertex(normals))
else:
if colors is None:
mesh = Meshes(verts=verts[None], faces=faces[None])
normals = (mesh.verts_normals_padded() + 1) / 2
colors = normals[0]
mesh_o3d = o3d.geometry.TriangleMesh()
mesh_o3d.vertices = o3d.utility.Vector3dVector(verts.cpu().numpy())
mesh_o3d.triangles = o3d.utility.Vector3iVector(faces.cpu().numpy())
mesh_o3d.vertex_colors = o3d.utility.Vector3dVector(colors.cpu().numpy())
mesh_o3d = mesh_o3d.simplify_quadric_decimation(int(faces.shape[0] * 0.1))
verts, faces, colors = torch.from_numpy(np.asarray(mesh_o3d.vertices)), torch.from_numpy(np.asarray(mesh_o3d.triangles)), torch.from_numpy(np.asarray(mesh_o3d.vertex_colors))
mesh = Meshes(verts=verts[None].float(), faces=faces[None], textures=TexturesVertex(colors[None].float())).to(device)
images = renderer(mesh)[0, ..., :3].clip(min=0, max=1)
return images
def parse_uv_info(obj_path):
verts, faces_tuple, aux_tuple = load_obj(obj_path)
faces = faces_tuple.verts_idx.numpy()
faces_uv = faces_tuple.textures_idx.numpy()
verts_uv = aux_tuple.verts_uvs.numpy()
verts_uv = verts_uv * 2 - 1 #(1 - verts_uv) * 2 - 1
N = verts.shape[0]
F = faces.shape[0]
M = verts_uv.shape[0]
assert faces_uv.shape == (F, 3)
print(N, F, M)
v2uv = np.zeros((N, 10), dtype=np.int32) - 1
v2uv_count = np.zeros((N,), dtype=np.int32)
@jit(nopython=True)
def func(faces, faces_uv, v2uv, v2uv_count):
for i in range(F):
for k in range(3):
v = faces[i, k]
uv = faces_uv[i, k]
included = False
for j in range(10):
if v2uv[v, j] == uv:
included = True
break
if not included:
v2uv[v, v2uv_count[v]] = uv
v2uv_count[v] += 1
for i in range(N):
for k in range(10):
if v2uv[i, k] == -1:
v2uv[i, k] = v2uv[i, 0]
return v2uv, v2uv_count
v2uv, v2uv_count = func(faces, faces_uv, v2uv, v2uv_count)
print(np.amin(v2uv_count), np.amax(v2uv_count))
v2uv = v2uv[:, :np.amax(v2uv_count)]
return verts_uv, faces_uv, v2uv, faces
def compute_per_pixel_verts_idx_bary_weights(verts_uv, faces_uv, v2uv, uv_size):
# Compute uv2v
N, K = v2uv.shape
M = verts_uv.shape[0]
uv2v = torch.zeros((M,), dtype=torch.long) - 1
for i in range(K):
uv2v[v2uv[:, i]] = torch.arange(N)
# Rasterization
verts_uv = -verts_uv
verts_uv_ = torch.cat([verts_uv, torch.ones((M, 1), dtype=torch.float)], dim=-1)
meshes = Meshes(verts=verts_uv_[None].cuda(), faces=faces_uv[None].cuda())
pix_to_face, _, barycentric, _ = rasterize_meshes(meshes, uv_size, faces_per_pixel=1) #, blur_radius=0.0001, clip_barycentric_coords=True)
assert pix_to_face.shape == (1, uv_size, uv_size, 1) and barycentric.shape == (1, uv_size, uv_size, 1, 3)
faces_uv_ = torch.cat([-torch.ones((1, 3), dtype=torch.long), faces_uv], dim=0) # (1 + F, 3)
pix_to_uv = faces_uv_[pix_to_face[0, ..., 0] + 1]
assert pix_to_uv.shape == (uv_size, uv_size, 3)
uv2v_ = torch.cat([-torch.ones((1,), dtype=torch.long), uv2v], dim=0) # (1 + M,)
pix_to_v = uv2v_[pix_to_uv + 1]
assert pix_to_v.shape == (uv_size, uv_size, 3)
return pix_to_v, barycentric[0, ..., 0, :]
# Classes -------------------------------------------------------------------------------------------------------
class UVRender(nn.Module):
def __init__(self, args, verts_uv, faces_uv, v2uv):
super().__init__()
self.args = copy.deepcopy(args)
self.register_buffer('verts_uv', verts_uv)
self.register_buffer('faces_uv', faces_uv)
self.register_buffer('v2uv', v2uv)
pix_to_v, bary_w = compute_per_pixel_verts_idx_bary_weights(verts_uv, faces_uv, v2uv, args['model']['uv_size'])
self.register_buffer('pix_to_v', pix_to_v)
self.register_buffer('bary_w', bary_w)
def to_uv(self, verts):
"""
:param verts: (B, N, C)
"""
B, N, C = verts.shape
verts_ = torch.cat([torch.zeros((B, 1, C), dtype=torch.float, device=verts.device), verts], dim=1) # (B, 1 + N, C)
pix_verts = verts_[:, self.pix_to_v + 1, :] # (B, H, W, 3, C)
verts_uv = (pix_verts * self.bary_w[None, ..., None]).sum(dim=-2) # (B, H, W, C)
assert verts_uv.shape == (B, self.args['model']['uv_size'], self.args['model']['uv_size'], C)
return verts_uv.permute(0, 3, 1, 2).contiguous()
def from_uv(self, verts_uv):
"""
:param verts_uv: (B, C, H, W)
"""
B, C, H, W = verts_uv.shape
N, K = self.v2uv.shape
grid = self.verts_uv[self.v2uv][None].expand(B, N, K, 2).contiguous()
verts = F.grid_sample(verts_uv, grid, mode='bilinear', align_corners=False) # (B, C, N, K)
assert verts.shape == (B, C, N, K)
verts = verts.mean(dim=-1).permute(0, 2, 1).contiguous()
return verts
|
AutoAvatar-main
|
utils/render.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
def load_components(model, ckpt_dir, ckpt_itr, name):
state_dict = model.state_dict()
ckpt_state_dict = torch.load(os.path.join(ckpt_dir, 'ckpt', 'dyn_net_%06d.pth' % ckpt_itr), map_location='cpu')
ckpt_state_dict = {key: value for key, value in ckpt_state_dict.items() if name in key}
state_dict.update(ckpt_state_dict)
model.load_state_dict(state_dict)
|
AutoAvatar-main
|
utils/io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# import openvdb as vdb
import numpy as np
import os
import torch
import torch.nn.functional as F
import math
from skimage import measure
def build_smooth_conv3D(in_channels=1, out_channels=1, kernel_size=3, padding=1):
smooth_conv = torch.nn.Conv3d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, padding=padding
)
smooth_conv.weight.data = torch.ones(
(kernel_size, kernel_size, kernel_size),
dtype=torch.float32
).reshape(in_channels, out_channels, kernel_size, kernel_size, kernel_size) / (kernel_size**3)
smooth_conv.bias.data = torch.zeros(out_channels)
return smooth_conv
def reconstruction(net, cuda, calib_tensor,
resolution, b_min, b_max,
use_octree=False, num_samples=10000, transform=None, thresh=0.5, texture_net = None, poses=None, shapes=None):
'''
Reconstruct meshes from sdf predicted by the network.
:param net: a BasePixImpNet object. call image filter beforehead.
:param cuda: cuda device
:param calib_tensor: calibration tensor
:param resolution: resolution of the grid cell
:param b_min: bounding box corner [x_min, y_min, z_min]
:param b_max: bounding box corner [x_max, y_max, z_max]
:param use_octree: whether to use octree acceleration
:param num_samples: how many points to query each gpu iteration
:return: marching cubes results.
'''
# Then we define the lambda function for cell evaluation
color_flag = False if texture_net is None else True
def eval_func(points):
samples = points.t().unsqueeze(0).to(cuda)
# pred = net.query(samples, calib_tensor)[0][0]
pred = net(samples, poses, shapes)[0]
return pred
def batch_eval(points, num_samples=num_samples):
num_pts = points.shape[1]
sdf = []
num_batches = num_pts // num_samples
for i in range(num_batches):
sdf.append(
eval_func(points[:, i * num_samples:i * num_samples + num_samples])
)
if num_pts % num_samples:
sdf.append(
eval_func(points[:, num_batches * num_samples:])
)
if num_pts == 0:
return None
sdf = torch.cat(sdf)
return sdf
# Then we evaluate the grid
max_level = int(math.log2(resolution))
sdf = eval_progressive(batch_eval, 4, max_level, cuda, b_min, b_max, thresh)
# calculate matrix
mat = np.eye(4)
length = b_max - b_min
mat[0, 0] = length[0] / sdf.shape[0]
mat[1, 1] = length[1] / sdf.shape[1]
mat[2, 2] = length[2] / sdf.shape[2]
mat[0:3, 3] = b_min
# Finally we do marching cubes
try:
verts, faces, normals, values = measure.marching_cubes(sdf, thresh, gradient_direction='ascent')
except:
print('error cannot marching cubes')
return -1
# grid = vdb.FloatGrid(1.0)
# grid.copyFromArray(sdf)
# verts, quads = grid.convertToQuads()
# faces = np.zeros((quads.shape[0] * 2, 3), dtype=np.uint32)
# faces[:quads.shape[0], :] = quads[:, [0, 2, 1]]
# faces[quads.shape[0]:, :] = quads[:, [0, 3, 2]]
# verts = np.zeros((10, 3), dtype=np.float32)
# faces = np.zeros((10, 3), dtype=np.int32)
# transform verts into world coordinate system
verts = np.matmul(mat[:3, :3], verts.T) + mat[:3, 3:4]
verts = verts.T
if np.linalg.det(mat) > 0:
faces = faces[:,[0,2,1]]
if color_flag:
torch_verts = torch.Tensor(verts).unsqueeze(0).permute(0,2,1).to(cuda)
with torch.no_grad():
_, last_layer_feature, point_local_feat = net.query(torch_verts, calib_tensor, return_last_layer_feature=True)
vertex_colors = texture_net.query(point_local_feat, last_layer_feature)
vertex_colors = vertex_colors.squeeze(0).permute(1,0).detach().cpu().numpy()
return verts, faces, vertex_colors #, normals, values, vertex_colors
else:
return verts, faces #, normals, values
def eval_progressive(batch_eval, min_level, max_level, cuda, b_min, b_max, thresh=0.5):
steps = [i for i in range(min_level, max_level+1)]
b_min = torch.tensor(b_min).to(cuda)
b_max = torch.tensor(b_max).to(cuda)
# init
smooth_conv3x3 = build_smooth_conv3D(in_channels=1, out_channels=1, kernel_size=3, padding=1).to(cuda)
arrange = torch.linspace(0, 2**steps[-1], 2**steps[0]+1).long().to(cuda)
coords = torch.stack(torch.meshgrid([
arrange, arrange, arrange
])) # [3, 2**step+1, 2**step+1, 2**step+1]
coords = coords.view(3, -1).t() # [N, 3]
calculated = torch.zeros(
(2**steps[-1]+1, 2**steps[-1]+1, 2**steps[-1]+1), dtype=torch.bool
).to(cuda)
gird8_offsets = torch.stack(torch.meshgrid([
torch.tensor([-1, 0, 1]), torch.tensor([-1, 0, 1]), torch.tensor([-1, 0, 1])
])).int().to(cuda).view(3, -1).t() #[27, 3]
with torch.no_grad():
for step in steps:
resolution = 2**step + 1
stride = 2**(steps[-1]-step)
if step == steps[0]:
coords2D = coords.float() / (2**steps[-1]+1) * (b_max - b_min) + b_min
sdf_all = batch_eval(
coords2D.t(),
).view(resolution, resolution, resolution)
coords_accum = coords / stride
coords_accum = coords_accum.long()
calculated[coords[:, 0], coords[:, 1], coords[:, 2]] = True
else:
valid = F.interpolate(
(sdf_all>thresh).view(1, 1, *sdf_all.size()).float(),
size=resolution, mode="trilinear", align_corners=True
)[0, 0]
sdf_all = F.interpolate(
sdf_all.view(1, 1, *sdf_all.size()),
size=resolution, mode="trilinear", align_corners=True
)[0, 0]
coords_accum *= 2
is_boundary = (valid > 0.0) & (valid < 1.0)
is_boundary = smooth_conv3x3(is_boundary.float().view(1, 1, *is_boundary.size()))[0, 0] > 0
is_boundary[coords_accum[:, 0], coords_accum[:, 1], coords_accum[:, 2]] = False
# coords = is_boundary.nonzero() * stride
coords = torch.nonzero(is_boundary) * stride
coords2D = coords.float() / (2**steps[-1]+1) * (b_max - b_min) + b_min
# coords2D = coords.float() / (2**steps[-1]+1)
sdf = batch_eval(
coords2D.t(),
) #[N]
if sdf is None:
continue
if sdf is not None:
sdf_all[is_boundary] = sdf
voxels = coords / stride
voxels = voxels.long()
coords_accum = torch.cat([
voxels,
coords_accum
], dim=0).unique(dim=0)
calculated[coords[:, 0], coords[:, 1], coords[:, 2]] = True
for n_iter in range(14):
sdf_valid = valid[voxels[:, 0], voxels[:, 1], voxels[:, 2]]
idxs_danger = ((sdf_valid==1) & (sdf<thresh)) | ((sdf_valid==0) & (sdf>thresh)) #[N,]
coords_danger = coords[idxs_danger, :] #[N, 3]
if coords_danger.size(0) == 0:
break
coords_arround = coords_danger.int() + gird8_offsets.view(-1, 1, 3) * stride
coords_arround = coords_arround.reshape(-1, 3).long()
coords_arround = coords_arround.unique(dim=0)
coords_arround[:, 0] = coords_arround[:, 0].clamp(0, calculated.size(0)-1)
coords_arround[:, 1] = coords_arround[:, 1].clamp(0, calculated.size(1)-1)
coords_arround[:, 2] = coords_arround[:, 2].clamp(0, calculated.size(2)-1)
coords = coords_arround[
calculated[coords_arround[:, 0], coords_arround[:, 1], coords_arround[:, 2]] == False
]
if coords.size(0) == 0:
break
coords2D = coords.float() / (2**steps[-1]+1) * (b_max - b_min) + b_min
# coords2D = coords.float() / (2**steps[-1]+1)
sdf = batch_eval(
coords2D.t(),
) #[N]
voxels = coords / stride
voxels = voxels.long()
sdf_all[voxels[:, 0], voxels[:, 1], voxels[:, 2]] = sdf
coords_accum = torch.cat([
voxels,
coords_accum
], dim=0).unique(dim=0)
calculated[coords[:, 0], coords[:, 1], coords[:, 2]] = True
return sdf_all.data.cpu().numpy()
|
AutoAvatar-main
|
utils/implicit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pickle
import os
import torch
import torch.nn.functional as F
from pytorch3d.ops import norm_laplacian, sample_points_from_meshes, knn_points, knn_gather
from pytorch3d.structures import Meshes, Pointclouds, utils as struct_utils
from pytorch3d.io import load_ply
from pytorch3d import _C
from pytorch3d.renderer import TexturesVertex
from pytorch3d.transforms import axis_angle_to_quaternion
import smplx
from smplx.utils import SMPLOutput
from smplx.lbs import blend_shapes, vertices2joints, batch_rodrigues, batch_rigid_transform
# Functions -----------------------------------------------------------------------------------------------------
def load_smpl(args):
if args['data']['type'] == 'CAPE':
ply_path = os.path.join(args['data']['raw_dataset_dir'], 'minimal_body_shape', args['data']['subject'], '%s_minimal.ply' % args['data']['subject'])
elif args['data']['type'] == 'DFaust':
ply_path = os.path.join(args['data']['dataset_dir'], 'smpl_poses', args['data']['subject'], 'v_template.ply')
v_template, _ = load_ply(ply_path)
smpl_model = MySMPL(args['data']['smpl_path'], v_template=v_template)
return smpl_model
def taubin_smoothing(
meshes: Meshes, lambd: float = 0.53, mu: float = -0.53, num_iter: int = 10
) -> Meshes:
"""
Taubin smoothing [1] is an iterative smoothing operator for meshes.
At each iteration
verts := (1 - λ) * verts + λ * L * verts
verts := (1 - μ) * verts + μ * L * verts
This function returns a new mesh with smoothed vertices.
Args:
meshes: Meshes input to be smoothed
lambd, mu: float parameters for Taubin smoothing,
lambd > 0, mu < 0
num_iter: number of iterations to execute smoothing
Returns:
mesh: Smoothed input Meshes
[1] Curve and Surface Smoothing without Shrinkage,
Gabriel Taubin, ICCV 1997
"""
verts = meshes.verts_packed() # V x 3
edges = meshes.edges_packed() # E x 3
for _ in range(num_iter):
L = norm_laplacian(verts, edges)
total_weight = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
verts = (1 - lambd) * verts + lambd * torch.mm(L, verts) / (total_weight + 1e-10)
# pyre-ignore
L = norm_laplacian(verts, edges)
total_weight = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
verts = (1 - mu) * verts + mu * torch.mm(L, verts) / (total_weight + 1e-10)
verts_list = struct_utils.packed_to_list(
verts, meshes.num_verts_per_mesh().tolist()
)
mesh = Meshes(verts=list(verts_list), faces=meshes.faces_list())
return mesh
def compute_adjacent_matrix(parents, n_rings):
"""
:param parents: (J,)
"""
J = parents.shape[0]
W = torch.zeros(J, J - 1)
for i in range(J - 1):
W[i + 1, i] += 1.0
parent = parents[i+1]
for j in range(n_rings):
W[parent, i] += 1.0
if parent == 0:
break
parent = parents[parent]
# W /= W.sum(0, keepdim=True) + 1e-16
return W
def sample_igr_pts(verts, faces, bbmin, bbmax, args):
"""
:param verts: (B, N, 3)
:param faces: (B, F, 3)
:param bbmin / bbmax: (B, 3)
"""
B, N, _ = verts.shape
meshes = Meshes(verts=verts, faces=faces)
if not args['model']['use_detail']:
surf_pts, surf_normals = sample_points_from_meshes(meshes, num_samples=args['train']['n_pts_scan'], return_normals=True)
else:
normals = meshes.verts_normals_padded()
meshes = Meshes(verts=verts, faces=faces, textures=TexturesVertex(normals))
surf_pts, surf_normals = sample_points_from_meshes(meshes, num_samples=args['train']['n_pts_scan'], return_textures=True)
surf_normals = F.normalize(surf_normals, p=2, dim=-1)
igr_pts = surf_pts[:, :args['train']['n_pts_scan_igr']] + torch.normal(0, args['train']['pts_igr_sigma'], (B, args['train']['n_pts_scan_igr'], 3), device=verts.device)
igr_pts = torch.minimum(torch.maximum(igr_pts, bbmin[:, None].expand(B, args['train']['n_pts_scan_igr'], 3)), bbmax[:, None].expand(B, args['train']['n_pts_scan_igr'], 3))
bbox_pts = torch.rand((B, args['train']['n_pts_bbox_igr'], 3), device=verts.device) * (bbmax - bbmin)[:, None] + bbmin[:, None]
rand_pts = torch.cat([igr_pts, bbox_pts], dim=1)
return surf_pts, surf_normals, igr_pts, bbox_pts, rand_pts
def _point_to_edge_distance(
point: torch.Tensor, s0, s1
) -> torch.Tensor:
"""
Computes the squared euclidean distance of points to edges. Modified from https://github.com/facebookresearch/pytorch3d/issues/613
Args:
point: FloatTensor of shape (P, 3)
edge: FloatTensor of shape (P, 2, 3)
Returns:
dist: FloatTensor of shape (P,)
x: FloatTensor of shape (P, 3)
If a, b are the start and end points of the segments, we
parametrize a point p as
x(t) = a + t * (b - a)
To find t which describes p we minimize (x(t) - p) ^ 2
Note that p does not need to live in the space spanned by (a, b)
"""
s01 = s1 - s0
norm_s01 = (s01 * s01).sum(dim=-1)
same_edge = norm_s01 < 1e-8
t = torch.where(same_edge, torch.ones_like(norm_s01) * 0.5, (s01 * (point - s0)).sum(dim=-1) / norm_s01)
t = torch.clamp(t, min=0.0, max=1.0)[..., None]
x = s0 + t * s01
dist = ((x - point) * (x - point)).sum(dim=-1).sqrt()
return dist, x
def _point_to_bary(point: torch.Tensor, a, b, c) -> torch.Tensor:
"""
Computes the barycentric coordinates of point wrt triangle (tri)
Note that point needs to live in the space spanned by tri = (a, b, c),
i.e. by taking the projection of an arbitrary point on the space spanned by
tri. Modified from https://github.com/facebookresearch/pytorch3d/issues/613
Args:
point: FloatTensor of shape (P, 3)
tri: FloatTensor of shape (3, 3)
Returns:
bary: FloatTensor of shape (P, 3)
"""
assert point.dim() == 2 and point.shape[1] == 3
P, _ = point.shape
assert a.shape == (P, 3) and b.shape == (P, 3) and c.shape == (P, 3)
v0 = b - a
v1 = c - a
v2 = point - a
d00 = (v0 * v0).sum(dim=-1)
d01 = (v0 * v1).sum(dim=-1)
d11 = (v1 * v1).sum(dim=-1)
d20 = (v2 * v0).sum(dim=-1)
d21 = (v2 * v1).sum(dim=-1)
denom = d00 * d11 - d01 * d01 + 1e-8
s2 = (d11 * d20 - d01 * d21) / denom
s3 = (d00 * d21 - d01 * d20) / denom
s1 = 1.0 - s2 - s3
bary = torch.stack([s1, s2, s3], dim=-1)
return bary
def proj_pts_to_mesh(pts, verts, faces, verts_feat=None, scale=1000, return_idxs=False):
"""
:param pts: (B, M, 3)
:param verts: (B, N, 3)
:param faces: (B, F, 3)
:param verts_feat: (B, N, C)
"""
B, M, _ = pts.shape
N = verts.shape[1]
F = faces.shape[1]
pts = pts * scale
verts = verts * scale
meshes = Meshes(verts=verts, faces=faces)
pcls = Pointclouds(pts)
assert len(meshes) == B and len(pcls) == B
# packed representation for pointclouds
points = pcls.points_packed() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_points = pcls.num_points_per_cloud().max().item()
assert torch.allclose(points, pts.view(-1, 3))
# packed representation for faces
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
tris = verts_packed[faces_packed] # (T, 3, 3)
tris_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_tris = meshes.num_faces_per_mesh().max().item()
verts_normals_packed = meshes.verts_normals_packed()
faces_normals_packed = meshes.faces_normals_packed()
assert torch.allclose(verts_packed, verts.view(-1, 3)) #and torch.allclose(faces_packed, faces.view(-1, 3))
dists, idxs = _C.point_face_dist_forward(
points, points_first_idx, tris, tris_first_idx, max_points, 1e-3
)
pts_faces_normals = faces_normals_packed[idxs] # (P, 3)
pts_verts_normals = verts_normals_packed[faces_packed][idxs] # (P, 3, 3)
pts_tris = tris[idxs] # (P, 3, 3)
# Project pts to the plane of its closest triangle
v, v0, v1, v2 = points, pts_tris[:, 0], pts_tris[:, 1], pts_tris[:, 2]
sd = -((v0 - v) * pts_faces_normals).sum(dim=-1, keepdim=True)
v_proj = -sd * pts_faces_normals + v
# Check v_proj outside triangle
inside = torch.isclose(sd[:, 0].abs(), dists.sqrt(), atol=1e-5)
outside = torch.logical_not(inside)
# Project pts to triangle edges
if outside.sum().item() > 0:
e01_dist, e01_v_proj = _point_to_edge_distance(v[outside], v0[outside], v1[outside])
e02_dist, e02_v_proj = _point_to_edge_distance(v[outside], v0[outside], v2[outside])
e12_dist, e12_v_proj = _point_to_edge_distance(v[outside], v1[outside], v2[outside])
e_dist = torch.stack([e01_dist, e02_dist, e12_dist], dim=0) # (3, P_)
e_v_proj = torch.stack([e01_v_proj, e02_v_proj, e12_v_proj], dim=0) # (3, P_, 3)
e_min_idxs = torch.argmin(e_dist, dim=0) # (P_,)
v_proj_out = torch.gather(e_v_proj, dim=0, index=e_min_idxs[None, :, None].expand(1, e_dist.shape[1], 3))[0]
v_proj[outside] = v_proj_out
# Compute barycentric coordinates
bary = _point_to_bary(v_proj, v0, v1, v2) # (P, 3)
pts_normals = (pts_verts_normals * bary[..., None]).sum(dim=-2)
sd = torch.norm(v - v_proj + 1e-8, dim=-1, p=2) * ((v - v_proj) * pts_normals).sum(dim=-1).sign()
# Test
if not torch.allclose(sd.abs(), dists.sqrt(), atol=1e-3):
print('sd:', (sd.abs() - dists.sqrt()).abs().max(), ((sd.abs() - dists.sqrt()).abs() > 1e-3).sum())
# v_proj_rec = (pts_tris * bary[..., None]).sum(dim=-2)
# if not torch.allclose(v_proj, v_proj_rec, atol=1e-3):
# print('v_proj:', (v_proj - v_proj_rec).abs().max())
# if sd.isnan().sum().item() > 0:
# print(sd.isnan().sum(), '/', sd.shape)
if verts_feat is not None:
C = verts_feat.shape[-1]
assert verts_feat.shape == (B, N, C)
verts_feat_packed = verts_feat.view(-1, C)
pts_verts_feat = verts_feat_packed[faces_packed][idxs] # (P, 3, C)
pts_feat = (pts_verts_feat * bary[..., None]).sum(dim=-2)
pts_feat = pts_feat.view(B, M, C)
else:
pts_feat = None
if not return_idxs:
return sd.view(B, M) / scale, v_proj.view(B, M, 3) / scale, faces_packed[idxs].reshape(B, M, 3), bary.view(B, M, 3), pts_feat
else:
return sd.view(B, M) / scale, v_proj.view(B, M, 3) / scale, faces_packed[idxs].reshape(B, M, 3), bary.view(B, M, 3), pts_feat, idxs.view(B, M)
def proj_pts_to_mesh_sample(pts, verts, faces, verts_feat=None, n_sample=100000):
"""
:param pts: (B, M, 3)
:param verts: (B, N, 3)
:param faces: (B, F, 3)
:param verts_feat: (B, N, C)
"""
B, M, _ = pts.shape
F = faces.shape[1]
K = n_sample
if verts_feat is None:
verts_feat = torch.zeros_like(verts)
C = verts_feat.shape[-1]
meshes = Meshes(verts=verts, faces=faces, textures=TexturesVertex(verts_feat))
pts_v, pts_v_normals, pts_v_feat = sample_points_from_meshes(meshes, num_samples=n_sample, return_normals=True, return_textures=True)
assert pts_v.shape == (B, K, 3) and pts_v_normals.shape == (B, K, 3) and pts_v_feat.shape == (B, K, C)
# KNN
_, idx, nn = knn_points(pts, pts_v, K=1, return_nn=True)
assert torch.allclose(nn, knn_gather(pts_v, idx)) and idx.shape == (B, M, 1)
nn_normals = knn_gather(pts_v_normals, idx)
nn_feat = knn_gather(pts_v_feat, idx)
assert nn.shape == (B, M, 1, 3) and nn_normals.shape == (B, M, 1, 3) and nn_feat.shape == (B, M, 1, C)
nn, nn_normals, nn_feat = nn[:, :, 0], nn_normals[:, :, 0], nn_feat[:, :, 0]
sd = torch.norm(pts - nn + 1e-8, dim=-1, p=2) * ((pts - nn) * nn_normals).sum(dim=-1).sign()
return sd, nn, nn_normals, nn_feat
def compute_signed_dst(pts, verts, faces, scale=1000):
"""
:param pts: (B, M, 3)
:param verts: (B, N, 3)
:param faces: (B, F, 3)
"""
B, M, _ = pts.shape
F = faces.shape[1]
pts = pts * scale
verts = verts * scale
meshes = Meshes(verts=verts, faces=faces)
pcls = Pointclouds(pts)
assert len(meshes) == B and len(pcls) == B
# packed representation for pointclouds
points = pcls.points_packed() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_points = pcls.num_points_per_cloud().max().item()
assert torch.allclose(points, pts.view(-1, 3))
# packed representation for faces
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
tris = verts_packed[faces_packed] # (T, 3, 3)
tris_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_tris = meshes.num_faces_per_mesh().max().item()
verts_normals_packed = meshes.verts_normals_packed()
faces_normals_packed = meshes.faces_normals_packed()
assert torch.allclose(verts_packed, verts.view(-1, 3)) and torch.allclose(faces_packed, faces.view(-1, 3))
dists, idxs = _C.point_face_dist_forward(
points, points_first_idx, tris, tris_first_idx, max_points
)
pts_faces_normals = faces_normals_packed[idxs] # (P, 3)
pts_verts_normals = verts_normals_packed[faces_packed][idxs] # (P, 3, 3)
pts_tris = tris[idxs] # (P, 3, 3)
verts_normals = meshes.verts_normals_padded()
_, nn, _, nn_normals = proj_pts_to_mesh_sample(pts, verts, faces, verts_feat=verts_normals, n_sample=100000)
sd = dists.sqrt() * ((pts - nn) * nn_normals).sum(dim=-1).sign()
return sd.view(B, M) / scale
def scan_to_pred_errors(verts_scan, faces_scan, verts_pred, faces_pred):
"""
:param verts_scan: (B, N_s, 3)
:param faces_scan: (B, F_s, 3)
:param verts_pred: (B, N_p, 3)
:param faces_pred: (B, F_p, 3)
"""
B, N_s, _ = verts_scan.shape
N_p = verts_pred.shape[1]
assert verts_scan.shape == (B, N_s, 3) and verts_pred.shape == (B, N_p, 3)
meshes = Meshes(verts=verts_scan, faces=faces_scan)
normals_scan = meshes.verts_normals_padded()
meshes = Meshes(verts=verts_pred, faces=faces_pred)
normals_pred = meshes.verts_normals_padded()
assert normals_pred.shape == (B, N_p, 3)
sd_err, _, _, _, normals_proj = proj_pts_to_mesh(verts_scan, verts_pred, faces_pred, normals_pred)
cos_err = F.cosine_similarity(normals_scan, normals_proj, dim=-1)
assert sd_err.shape == (B, N_s) and cos_err.shape == (B, N_s)
return sd_err, cos_err
def proj_pts_to_uv(pts, verts, faces, verts_uv, faces_uv, uv_feat=None):
"""
:param pts: (B, M, 3)
:param verts: (B, N, 3)
:param faces: (B, F, 3)
:param verts_uv: (B, N_, 2)
:param faces_uv: (B, F, 3)
:param uv_feat: (B, C, H, W)
"""
B, M, _ = pts.shape
N = verts.shape[1]
F_ = faces.shape[1]
N_ = verts_uv.shape[1]
assert pts.shape == (B, M, 3) and verts.shape == (B, N, 3) and faces.shape == (B, F_, 3) and verts_uv.shape == (B, N_, 2) and faces_uv.shape == (B, F_, 3)
sd, v_proj, _, bary_w, _, pts_faces_idxs = proj_pts_to_mesh(pts, verts, faces, return_idxs=True)
pts_faces_idxs_packed = pts_faces_idxs.view(B * M,) # (P,)
verts_uv_ = torch.cat([verts_uv, torch.zeros_like(verts_uv[:, :, :1])], dim=-1) # (B, N_, 3)
meshes = Meshes(verts=verts_uv_, faces=faces_uv)
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
pts_verts_uv = verts_packed[faces_packed][pts_faces_idxs_packed][:, :, :2] # (P, 3, 2)
pts_uv = (pts_verts_uv * bary_w.view(B * M, 3, 1)).sum(dim=-2)
pts_uv = pts_uv.view(B, M, 1, 2)
_, C, H, W = uv_feat.shape
assert uv_feat.shape == (B, C, H, W)
# pts_feat = F.grid_sample(uv_feat, pts_uv, mode='bilinear', align_corners=False) # (B, C, M, 1)
grid_sample = MyGridSample.apply
pts_feat = grid_sample(pts_uv, uv_feat) # (B, C, M, 1)
assert pts_feat.shape == (B, C, M, 1)
pts_feat = pts_feat.permute(0, 2, 1, 3).squeeze(-1).contiguous()
assert pts_feat.shape == (B, M, C)
return sd, v_proj, pts_feat, pts_uv.view(B, M, 2)
def lbs(
betas,
pose,
v_template,
shapedirs,
posedirs,
J_regressor,
parents,
lbs_weights,
pose2rot: bool = True,
):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device, dtype = betas.device, betas.dtype
# Add shape contribution
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=device)
if pose2rot:
rot_mats = batch_rodrigues(pose.view(-1, 3)).view(
[batch_size, -1, 3, 3])
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(
pose_feature, posedirs).view(batch_size, -1, 3)
else:
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
rot_mats = pose.view(batch_size, -1, 3, 3)
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
posedirs).view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed, A
# Classes -------------------------------------------------------------------------------------------------------
class CAPEJson():
"""
CAPE .bin Structure:
'subject'
'cloth_type'
'seqs'
for seq in seqs:
'id'
'seq_name': longlong_athletics_trial1
'frames'
for frame in frames:
'npz_path'
'smooth_mesh_path': New field!!!
"""
def __init__(self, bin_path=None):
self.data = None
if bin_path is not None:
self.load_bin_file(bin_path)
def load_bin_file(self, bin_path):
with open(bin_path, 'rb') as f:
self.data = pickle.load(f)
def dump_bin_file(self, bin_path):
with open(bin_path, 'wb') as f:
pickle.dump(self.data, f)
def append_frames(self, frames, npz_path):
frames.append({
'npz_path': npz_path
})
return frames
def append_seqs(self, seqs, seq_name, frames):
seqs.append({
'id': len(seqs),
'seq_name': seq_name,
'frames': frames
})
return seqs
def set_data(self, subject, cloth_type, seqs):
self.data = {
'subject': subject,
'cloth_type': cloth_type,
'seqs': seqs
}
def num_of_seqs(self):
return len(self.data['seqs'])
def num_of_frames(self):
count = 0
for seq in self.data['seqs']:
count += len(seq['frames'])
return count
class MySMPL(smplx.SMPLLayer):
def __init__(
self, model_path: str,
kid_template_path: str = '',
data_struct = None,
create_betas: bool = True,
betas = None,
num_betas: int = 10,
create_global_orient: bool = True,
global_orient = None,
create_body_pose: bool = True,
body_pose = None,
create_transl: bool = True,
transl = None,
dtype=torch.float32,
batch_size: int = 1,
joint_mapper=None,
gender: str = 'neutral',
age: str = 'adult',
vertex_ids = None,
v_template = None,
**kwargs
) -> None:
super().__init__(model_path=model_path, kid_template_path=kid_template_path, data_struct=data_struct, betas=betas, num_betas=num_betas,
global_orient=global_orient, body_pose=body_pose, transl=transl, dtype=dtype, batch_size=batch_size, joint_mapper=joint_mapper,
gender=gender, age=age, vertex_ids=vertex_ids, v_template=v_template, **kwargs)
self.register_buffer('pose_cano', torch.zeros((1, self.NUM_BODY_JOINTS * 3), dtype=dtype))
self.faces = self.faces_tensor
def forward(
self,
poses,
betas = None,
body_pose = None,
global_orient = None,
transl = None,
return_verts=True,
return_full_pose: bool = False,
pose2rot: bool = True,
**kwargs
) -> SMPLOutput:
''' Forward pass for the SMPL model
Parameters
----------
global_orient: torch.tensor, optional, shape Bx3
If given, ignore the member variable and use it as the global
rotation of the body. Useful if someone wishes to predicts this
with an external model. (default=None)
betas: torch.tensor, optional, shape BxN_b
If given, ignore the member variable `betas` and use it
instead. For example, it can used if shape parameters
`betas` are predicted from some external model.
(default=None)
body_pose: torch.tensor, optional, shape Bx(J*3)
If given, ignore the member variable `body_pose` and use it
instead. For example, it can used if someone predicts the
pose of the body joints are predicted from some external model.
It should be a tensor that contains joint rotations in
axis-angle format. (default=None)
transl: torch.tensor, optional, shape Bx3
If given, ignore the member variable `transl` and use it
instead. For example, it can used if the translation
`transl` is predicted from some external model.
(default=None)
return_verts: bool, optional
Return the vertices. (default=True)
return_full_pose: bool, optional
Returns the full axis-angle pose vector (default=False)
Returns
-------
'''
transl, global_orient, body_pose = poses[:, :3], poses[:, 3:6], poses[:, 6:]
apply_trans = True
full_pose = torch.cat([global_orient, body_pose], dim=1)
batch_size = poses.shape[0]
betas = torch.zeros([batch_size, self.num_betas], dtype=self.dtype, device=poses.device)
vertices, joints, A = lbs(betas, full_pose, self.v_template,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.lbs_weights, pose2rot=pose2rot)
joints = self.vertex_joint_selector(vertices, joints)
# Map the joints to the current dataset
if self.joint_mapper is not None:
joints = self.joint_mapper(joints)
if apply_trans:
joints += transl.unsqueeze(dim=1)
vertices += transl.unsqueeze(dim=1)
output = SMPLOutput(vertices=vertices if return_verts else None,
global_orient=global_orient,
body_pose=body_pose,
joints=joints,
betas=betas,
full_pose=full_pose if return_full_pose else None)
output.A = A
return output
@classmethod
def compute_poses_quat(cls, poses):
"""
:param poses: (B, 69)
"""
B, _ = poses.shape
J = cls.NUM_BODY_JOINTS
poses = poses.view(B, J, 3)
poses_quat = axis_angle_to_quaternion(poses)
assert poses_quat.shape == (B, J, 4)
return poses_quat
SMPL_JOINT_NAMES = [
'pelvis',
'left_hip',
'right_hip',
'spine1',
'left_knee',
'right_knee',
'spine2',
'left_ankle',
'right_ankle',
'spine3',
'left_foot',
'right_foot',
'neck',
'left_collar',
'right_collar',
'head',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hand',
'right_hand'
]
def batched_gradient(features):
"""
Compute gradient of a batch of feature maps
:param features: a 3D tensor for a batch of feature maps, dim: (N, C, H, W)
:return: gradient maps of input features, dim: (N, 2*C, H, W), the last row and column are padded with zeros
(N, 0:C, H, W) = dI/dx, (N, C:2C, H, W) = dI/dy
"""
H = features.size(-2)
W = features.size(-1)
C = features.size(1)
N = features.size(0)
grad_x = (features[:, :, :, 2:] - features[:, :, :, :W - 2]) / 2.0
grad_x = F.pad(grad_x, (1, 1, 0, 0), mode='replicate')
grad_y = (features[:, :, 2:, :] - features[:, :, :H - 2, :]) / 2.0
grad_y = F.pad(grad_y, (0, 0, 1, 1), mode='replicate')
grad = torch.cat([grad_x.view(N, C, H, W), grad_y.view(N, C, H, W)], dim=1)
return grad
class MyGridSample(torch.autograd.Function):
@staticmethod
def forward(ctx, grid, feat):
vert_feat = F.grid_sample(feat, grid, mode='bilinear', padding_mode='zeros', align_corners=True).detach()
ctx.save_for_backward(feat, grid)
return vert_feat
@staticmethod
def backward(ctx, grad_output):
feat, grid = ctx.saved_tensors
# Gradient for grid
N, C, H, W = feat.shape
_, Hg, Wg, _ = grid.shape
feat_grad = batched_gradient(feat) # dim: (N, 2*C, H, W)
grid_grad = F.grid_sample(feat_grad, grid, mode='bilinear', padding_mode='zeros', align_corners=True) # dim: (N, 2*C, Hg, Wg)
grid_grad = grid_grad.view(N, 2, C, Hg, Wg).permute(0, 3, 4, 2, 1).contiguous() # dim: (N, Hg, Wg, C, 2)
grad_output_perm = grad_output.permute(0, 2, 3, 1).contiguous() # dim: (N, Hg, Wg, C)
grid_grad = torch.bmm(grad_output_perm.view(N * Hg * Wg, 1, C),
grid_grad.view(N * Hg * Wg, C, 2)).view(N, Hg, Wg, 2)
grid_grad[:, :, :, 0] = grid_grad[:, :, :, 0] * (W - 1) / 2
grid_grad[:, :, :, 1] = grid_grad[:, :, :, 1] * (H - 1) / 2
# Gradient for feat
feat_d = feat.detach()
feat_d.requires_grad = True
grid_d = grid.detach()
grid_d.requires_grad = True
with torch.enable_grad():
vert_feat = F.grid_sample(feat_d, grid_d, mode='bilinear', padding_mode='zeros', align_corners=True)
vert_feat.backward(grad_output.detach())
feat_grad = feat_d.grad
return grid_grad, feat_grad
|
AutoAvatar-main
|
utils/CAPE.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import padding
from torch.nn.modules.module import Module
class Embedder(nn.Module):
def __init__(self, nch, n_freq):
super().__init__()
self.nch = nch
self.n_freq = n_freq
self.out_ch = nch
self.freq_fn = [(1, lambda x: x)]
for i in range(n_freq):
for fn in [torch.sin, torch.cos]:
self.freq_fn.append((2 ** i, fn))
self.out_ch += nch
def forward(self, x):
out = torch.cat([fn(x * freq) for freq, fn in self.freq_fn], dim=-1)
assert out.shape[-1] == self.out_ch
return out
class MLP(nn.Module):
def __init__(self, nchs, skips, act, w_norm, act_last, w_norm_last, init_zero_last=False):
super().__init__()
self.nchs = copy.deepcopy(nchs)
self.skips = copy.deepcopy(skips)
self.mlp = nn.ModuleList()
for i in range(len(nchs) - 1):
in_ch = nchs[i] if i not in skips else nchs[i] + nchs[0]
out_ch = nchs[i + 1]
if i < len(nchs) - 2:
layer = nn.utils.weight_norm(nn.Linear(in_ch, out_ch)) if w_norm else nn.Linear(in_ch, out_ch)
else:
assert i == len(nchs) - 2
layer = nn.utils.weight_norm(nn.Linear(in_ch, out_ch)) if w_norm_last else nn.Linear(in_ch, out_ch)
if init_zero_last:
torch.nn.init.zeros_(layer.weight)
if hasattr(layer, 'bias') and layer.bias is not None:
torch.nn.init.zeros_(layer.bias)
self.mlp.append(layer)
if act == 'softplus':
self.act = nn.Softplus(beta=100, threshold=20)
elif act == 'linear':
self.act = nn.Identity()
else:
raise NotImplementedError('Not implement activation type \'%s\'!' % act)
if act_last == 'softplus':
self.act_last = nn.Softplus(beta=100, threshold=20)
elif act_last == 'linear':
self.act_last = nn.Identity()
else:
raise NotImplementedError('Not implement activation type \'%s\'!' % act_last)
def forward(self, x):
x_ = x
for i in range(len(self.mlp)):
if i in self.skips:
x_ = torch.cat([x_, x], dim=-1)
x_ = self.mlp[i](x_)
x_ = self.act(x_) if i < len(self.mlp) - 1 else self.act_last(x_)
return x_
class Conv2dBias(nn.Conv2d):
def __init__(self, in_ch, out_ch, kernel_size, size, stride, padding, use_bias=True, *args, **kwargs):
super().__init__(in_ch, out_ch, bias=False, kernel_size=kernel_size, stride=stride, padding=padding, *args, **kwargs)
self.use_bias = use_bias
if self.use_bias:
self.register_parameter('bias', nn.Parameter(torch.zeros(1, out_ch, size, size), requires_grad=True))
def forward(self, x):
out = F.conv2d(x, self.weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
if self.use_bias:
out = out + self.bias
return out
class ConvDownBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
assert size % 2 == 0
self.conv1 = Conv2dBias(in_ch, in_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv2dBias(in_ch, out_ch, kernel_size=kernel_size, size=size//2, stride=2, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=2, padding=0, bias=False)
def forward(self, x):
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
class ConvUpBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
assert size % 2 == 0
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv1 = Conv2dBias(in_ch, out_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv2dBias(out_ch, out_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
x = self.upsample(x)
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
class ConvBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
assert size % 2 == 0
self.conv1 = Conv2dBias(in_ch, in_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv2dBias(in_ch, out_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
class Conv1dBias(nn.Conv1d):
def __init__(self, in_ch, out_ch, kernel_size, size, stride, padding, use_bias=True, *args, **kwargs):
super().__init__(in_ch, out_ch, bias=False, kernel_size=kernel_size, stride=stride, padding=padding, *args, **kwargs)
self.use_bias = use_bias
if self.use_bias:
self.register_parameter('bias', nn.Parameter(torch.zeros(1, out_ch, size), requires_grad=True))
def forward(self, x):
out = F.conv1d(x, self.weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
if self.use_bias:
out = out + self.bias
return out
class Conv1dDownBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
# assert size % 2 == 0
size_half = size // 2 if size % 2 == 0 else (size + 1) // 2
self.conv1 = Conv1dBias(in_ch, in_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv1dBias(in_ch, out_ch, kernel_size=kernel_size, size=size_half, stride=2, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv1d(in_ch, out_ch, kernel_size=1, stride=2, padding=0, bias=False)
def forward(self, x):
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
class Conv1dBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
assert size % 2 == 0
self.conv1 = Conv1dBias(in_ch, in_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv1dBias(in_ch, out_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv1d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
|
AutoAvatar-main
|
models/nets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
import copy
import shutil, inspect
import torch
import torch.nn.functional as F
import torch.optim as optim
import pytorch_lightning as pl
from pytorch3d.io import save_ply
import utils.CAPE as cape_utils
from utils.implicit import reconstruction
from models.std.nets import DynNet
import models.std.visual as visual
class Implicit_Trainbox(pl.LightningModule):
def __init__(self, args, log_dir, resolution, recurrent=True, eval_frames=None):
super().__init__()
self.args = copy.deepcopy(args)
self.log_dir = log_dir
self.resolution = resolution
self.recurrent = recurrent
self.eval_frames = eval_frames
if not os.path.exists(log_dir):
os.mkdir(log_dir)
if not os.path.exists(os.path.join(log_dir, 'ckpt')):
os.mkdir(os.path.join(log_dir, 'ckpt'))
if not os.path.exists(os.path.join(log_dir, 'net_def')):
os.mkdir(os.path.join(log_dir, 'net_def'))
if not os.path.exists(os.path.join(log_dir, 'mesh')):
os.mkdir(os.path.join(log_dir, 'mesh'))
shutil.copy(os.path.realpath(__file__), os.path.join(log_dir, 'net_def'))
shutil.copy(inspect.getfile(DynNet), os.path.join(log_dir, 'net_def'))
self.dyn_net = DynNet(args)
self.itr = 0
def save_ckpt(self):
torch.save(self.dyn_net.state_dict(), os.path.join(self.log_dir, 'ckpt', 'dyn_net_%06d.pth' % self.itr))
def load_ckpt(self, itr, log_dir):
self.dyn_net.load_state_dict(torch.load(os.path.join(log_dir, 'ckpt', 'dyn_net_%06d.pth' % itr), map_location='cpu'))
def preprocess(self, batch):
verts_detail, faces_detail, verts_smt, faces_smt, poses = batch['verts_detail'], batch['faces_detail'], batch['verts_smt'], batch['faces_smt'], batch['poses']
B, T, _ = poses.shape
N = self.dyn_net.smpl_model.v_template.shape[0]
verts_smpl = self.dyn_net.smpl_model(poses.view(B * T, 75)).vertices.view(B, T, N, 3)
return verts_detail, faces_detail, verts_smt, faces_smt, poses, verts_smpl
def train_or_valid_step(self, batch, batch_idx, is_train):
verts_detail_all, faces_detail_all, verts_smt_all, faces_smt_all, poses_all, verts_smpl_all = self.preprocess(batch)
T = self.args['model']['n_batch_frames']
T_hist = self.args['model']['n_hist_frames']
T_next = T - T_hist
K = self.args['model']['ob_vals'][-1]
assert T_next == 1
sd_errs = []
cos_errs = []
obsdf_rollout = None
loss_surf_sdf = 0
loss_surf_grad = 0
loss_igr = 0
loss_o = 0
end_idx = self.args['train']['n_rollout']
if self.eval_frames is not None and batch_idx + T_hist == self.eval_frames[0]:
end_idx = poses_all.shape[1] - T + 1
for i in range(end_idx):
verts_detail = verts_detail_all[i:i+T]
faces_detail = faces_detail_all[i:i+T]
verts_smt = verts_smt_all[i:i+T]
faces_smt = faces_smt_all[i:i+T]
poses = poses_all[:, i:i+T]
verts_smpl = verts_smpl_all[:, i:i+T]
N = verts_smpl.shape[2]
B = poses.shape[0]
if poses.shape[1] < T:
break
verts = verts_smt
faces = faces_smt
verts_gt = verts_smt if not self.args['model']['use_detail'] else verts_detail
faces_gt = faces_smt if not self.args['model']['use_detail'] else faces_detail
bbmin = verts_smpl.min(dim=2)[0] - 0.1
bbmax = verts_smpl.max(dim=2)[0] + 0.1
surf_pts, surf_normals, igr_pts, bbox_pts, rand_pts = cape_utils.sample_igr_pts(verts_gt[-1], faces_gt[-1], bbmin[:, -1], bbmax[:, -1], self.args)
if self.args['model']['stage'] == 'shape_enc_dec':
obsdf, _ = self.dyn_net.shapes_to_obsdf(verts[-1], poses[:, -1], mode='meshes', faces=faces[-1])
assert obsdf.shape == (B, N, K)
obsdf = obsdf[:, None]
if self.args['model']['stage'] == 'auto_regr':
if obsdf_rollout is None:
obsdf = [self.dyn_net.shapes_to_obsdf(verts[j], poses[:, j], mode='meshes', faces=faces[j])[0] for j in range(T_hist)]
obsdf = torch.stack(obsdf, dim=1)
assert obsdf.shape == (B, T_hist, N, K)
else:
obsdf = obsdf_rollout.detach()
shapes = self.dyn_net(obsdf, poses)
if i + 1 < end_idx and self.recurrent:
with torch.no_grad():
obsdf_new, _ = self.dyn_net.shapes_to_obsdf(shapes, poses[:, -1], mode='nets')
obsdf_rollout = torch.cat([obsdf[:, 1:], obsdf_new[:, None]], dim=1).detach()
assert obsdf_rollout.shape == (B, T_hist, N, K)
if self.eval_frames is None:
# Losses
surf_sdf, surf_sdf_grad = self.dyn_net.query_sdf_with_grad(surf_pts, poses[:, -1], shapes)
rand_sdf, rand_sdf_grad = self.dyn_net.query_sdf_with_grad(rand_pts, poses[:, -1], shapes)
bbox_sdf = rand_sdf[:, self.args['train']['n_pts_scan_igr']:]
assert bbox_sdf.shape == (B, self.args['train']['n_pts_bbox_igr'])
loss_surf_sdf += surf_sdf.abs().mean() / self.args['train']['n_rollout']
loss_surf_grad += torch.norm(surf_sdf_grad - surf_normals, p=2, dim=-1).mean() / self.args['train']['n_rollout']
loss_igr += (torch.norm(rand_sdf_grad, p=2, dim=-1) - 1).pow(2).mean() / self.args['train']['n_rollout']
loss_o += torch.exp(-50.0 * torch.abs(bbox_sdf)).mean() / self.args['train']['n_rollout']
else:
out_dir = os.path.join(self.log_dir, 'mesh', 'batch_%06d' % batch_idx)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(os.path.join(out_dir, 'gt')):
os.mkdir(os.path.join(out_dir, 'gt'))
if not os.path.exists(os.path.join(out_dir, 'pred')):
os.mkdir(os.path.join(out_dir, 'pred'))
if i == 0:
for j in range(T_hist):
save_ply(os.path.join(out_dir, 'gt', 'gt_%06d.ply' % j), verts_gt[j][0], faces_gt[j][0])
save_ply(os.path.join(out_dir, 'gt', 'gt_%06d.ply' % (i + T_hist)), verts_gt[-1][0], faces_gt[-1][0])
out = reconstruction(self.dyn_net.query_sdf_nets, poses.device, None,
self.resolution, bbmin[0, -1].cpu().numpy(), bbmax[0, -1].cpu().numpy(),
use_octree=False, num_samples=4096, transform=None, thresh=0, texture_net = None, poses=poses[:1, -1], shapes=shapes[:1])
verts_out, faces_out = out
verts_out, faces_out = torch.from_numpy(verts_out).float().to(poses.device), torch.from_numpy(faces_out.astype(np.int32)).long().to(poses.device)
save_ply(os.path.join(out_dir, 'pred', 'pred_%06d.ply' % (i + T_hist)), verts_out, faces_out)
sd_err, cos_err = cape_utils.scan_to_pred_errors(verts_gt[-1], faces_gt[-1], verts_out[None], faces_out[None])
sd_errs.append(sd_err.cpu())
cos_errs.append(cos_err.cpu())
if self.eval_frames is not None:
with open(os.path.join(out_dir, 'errs.bin'), 'wb') as f:
pickle.dump({'sd_errs': sd_errs, 'cos_errs': cos_errs}, f)
visual.render_meshes(out_dir, start_i=T_hist)
os.system('bash models/std/videos.sh %s %s' % (out_dir, str(T_hist)))
loss = loss_surf_sdf + loss_surf_grad + loss_igr * self.args['train']['lambda_igr'] + loss_o * self.args['train']['lambda_o']
res_dict = {
'verts': verts,
'faces': faces,
'verts_gt': verts_gt,
'faces_gt': faces_gt,
'verts_smpl': verts_smpl,
'poses': poses,
'shapes': shapes,
'bbmin': bbmin,
'bbmax': bbmax,
'loss_surf_sdf': loss_surf_sdf,
'loss_surf_grad': loss_surf_grad,
'loss_igr': loss_igr,
'loss_o': loss_o,
'loss': loss
}
return res_dict
def training_step(self, batch, batch_idx):
res_dict = self.train_or_valid_step(batch, batch_idx, True)
# log
prefix = 'Train'
self.log('%s/loss' % prefix, res_dict['loss'])
self.log('%s/loss_surf_sdf' % prefix, res_dict['loss_surf_sdf'])
self.log('%s/loss_surf_grad' % prefix, res_dict['loss_surf_grad'])
self.log('%s/loss_igr' % prefix, res_dict['loss_igr'])
self.log('%s/loss_o' % prefix, res_dict['loss_o'])
# checkpoint
self.itr += 1
if self.itr % self.args['train']['ckpt_step'] == 0:
self.save_ckpt()
return res_dict['loss']
def validation_step(self, batch, batch_idx):
if self.eval_frames is not None and batch_idx + self.args['model']['n_hist_frames'] not in self.eval_frames:
return
res_dict = self.train_or_valid_step(batch, batch_idx, False)
# log
prefix = 'Valid'
self.log('%s/loss' % prefix, res_dict['loss'])
self.log('%s/loss_surf_sdf' % prefix, res_dict['loss_surf_sdf'])
self.log('%s/loss_surf_grad' % prefix, res_dict['loss_surf_grad'])
self.log('%s/loss_igr' % prefix, res_dict['loss_igr'])
self.log('%s/loss_o' % prefix, res_dict['loss_o'])
if self.eval_frames is None:
self.compute_meshes(res_dict, batch, batch_idx)
def configure_optimizers(self):
if self.args['model']['stage'] == 'shape_enc_dec':
optimizer = optim.Adam(self.dyn_net.parameters(), lr=self.args['train']['lr'])
elif self.args['model']['stage'] == 'auto_regr':
optimizer = optim.Adam(self.dyn_net.parameters(), lr=self.args['train']['lr'])
return optimizer
def compute_meshes(self, res_dict, batch, batch_idx):
verts, faces, verts_gt, faces_gt, verts_smpl, poses, shapes, bbmin, bbmax = res_dict['verts'], res_dict['faces'], res_dict['verts_gt'], res_dict['faces_gt'], \
res_dict['verts_smpl'], res_dict['poses'], res_dict['shapes'], res_dict['bbmin'], res_dict['bbmax']
T = self.args['model']['n_batch_frames']
T_hist = self.args['model']['n_hist_frames']
T_next = T - T_hist
if not os.path.exists(os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr)):
os.mkdir(os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr))
out_dir = os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr, 'batch_%06d' % batch_idx)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for i in range(T_hist):
save_ply(os.path.join(out_dir, 'hist_%d.ply' % i), verts[i][0], faces[i][0])
save_ply(os.path.join(out_dir, 'gt.ply'), verts_gt[-1][0], faces_gt[-1][0])
out = reconstruction(self.dyn_net.query_sdf_nets, poses.device, None,
self.resolution, bbmin[0, -1].cpu().numpy(), bbmax[0, -1].cpu().numpy(),
use_octree=False, num_samples=4096, transform=None, thresh=0, texture_net = None, poses=poses[:1, -1], shapes=shapes[:1])
if out != -1:
verts_out, faces_out = out
save_ply(os.path.join(out_dir, 'pred.ply'),
torch.from_numpy(verts_out).float().contiguous(), torch.from_numpy(faces_out.astype(np.int32)).contiguous().long())
def test_step(self, batch, batch_idx):
self.validation_step(batch, batch_idx)
|
AutoAvatar-main
|
models/std/trainbox.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.io import load_ply
from pytorch3d.renderer import look_at_view_transform
from utils.render import render_mesh
def render_meshes(data_dir, start_i=3, gpu_id=0, simplify_mesh=True):
if not os.path.exists(os.path.join(data_dir, 'gt_imgs')):
os.mkdir(os.path.join(data_dir, 'gt_imgs'))
if not os.path.exists(os.path.join(data_dir, 'pred_imgs')):
os.mkdir(os.path.join(data_dir, 'pred_imgs'))
# if not os.path.exists(os.path.join(data_dir, 'pred_cano_imgs')):
# os.mkdir(os.path.join(data_dir, 'pred_cano_imgs'))
if not os.path.exists(os.path.join(data_dir, 'errs_imgs')):
os.mkdir(os.path.join(data_dir, 'errs_imgs'))
# pred_names = sorted(os.listdir(os.path.join(data_dir, 'pred_cano')))
# for i, pred_name in enumerate(pred_names):
# verts, faces = load_ply(os.path.join(data_dir, 'pred_cano', pred_name))
# if i == 0:
# center = verts.median(dim=0)[0]
# t = center.clone()
# t[2] += 9
# R, t = look_at_view_transform(eye=t[None], at=center[None])
# image = render_mesh(verts.cuda(gpu_id), faces.cuda(gpu_id), R[0].cuda(gpu_id), t[0].cuda(gpu_id), 9, simplify_mesh=simplify_mesh)
# plt.imsave(os.path.join(data_dir, 'pred_cano_imgs', '%06d.jpg' % (i + start_i)), image.cpu().numpy())
pred_names = sorted(os.listdir(os.path.join(data_dir, 'pred')))
for i, pred_name in enumerate(pred_names):
verts, faces = load_ply(os.path.join(data_dir, 'pred', pred_name))
if i == 0:
center = verts.median(dim=0)[0]
t = center.clone()
t[2] += 9
R, t = look_at_view_transform(eye=t[None], at=center[None])
image = render_mesh(verts.cuda(gpu_id), faces.cuda(gpu_id), R[0].cuda(gpu_id), t[0].cuda(gpu_id), 9, simplify_mesh=simplify_mesh)
plt.imsave(os.path.join(data_dir, 'pred_imgs', '%06d.jpg' % (i + start_i)), image.cpu().numpy())
gt_names = sorted(os.listdir(os.path.join(data_dir, 'gt')))
with open(os.path.join(data_dir, 'errs.bin'), 'rb') as f:
data = pickle.load(f)
sd_errs, cos_errs = data['sd_errs'], data['cos_errs']
for i, gt_name in enumerate(gt_names):
verts, faces = load_ply(os.path.join(data_dir, 'gt', gt_name))
image = render_mesh(verts.cuda(gpu_id), faces.cuda(gpu_id), R[0].cuda(gpu_id), t[0].cuda(gpu_id), 9)
plt.imsave(os.path.join(data_dir, 'gt_imgs', '%06d.jpg' % i), image.cpu().numpy())
if i < start_i:
continue
sd_err = sd_errs[i - start_i][0]
assert sd_err.shape == (verts.shape[0],)
max_dst = 0.1
sd_err_nc = (sd_err / max_dst).clip(min=-1, max=1)
colors = torch.zeros((verts.shape[0], 3))
colors[sd_err_nc < 0] = (1 - sd_err_nc[sd_err_nc < 0].abs())[:, None] * torch.tensor([1, 1, 1])[None] + \
sd_err_nc[sd_err_nc < 0].abs()[:, None] * torch.tensor([1, 0, 0])[None]
colors[sd_err_nc >= 0] = (1 - sd_err_nc[sd_err_nc >= 0])[:, None] * torch.tensor([1, 1, 1])[None] + \
sd_err_nc[sd_err_nc >= 0][:, None] * torch.tensor([0, 1, 1])[None]
image = render_mesh(verts.cuda(gpu_id), faces.cuda(gpu_id), R[0].cuda(gpu_id), t[0].cuda(gpu_id), 9, colors=colors.cuda(gpu_id))
plt.imsave(os.path.join(data_dir, 'errs_imgs', '%06d.jpg' % i), image.cpu().numpy())
|
AutoAvatar-main
|
models/std/visual.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from pytorch3d.ops import knn_points, knn_gather
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.transforms.rotation_conversions import matrix_to_axis_angle
import utils.CAPE as cape_utils
from utils.render import *
from models.nets import *
class DynNet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.smpl_model = cape_utils.load_smpl(args)
self.register_buffer('v_template', self.smpl_model.v_template, persistent=False)
self.register_buffer('faces', self.smpl_model.faces, persistent=False)
mask_ids = ['left_wrist', 'right_wrist', 'left_hand', 'right_hand', 'left_ankle', 'right_ankle', 'left_foot', 'right_foot', 'head']
mask_ids = [cape_utils.SMPL_JOINT_NAMES.index(e) for e in mask_ids]
head_hands_feet_mask = self.smpl_model.lbs_weights[:, mask_ids].sum(dim=-1) # (N,)
head_hands_feet_mask[head_hands_feet_mask < 2e-2] = 0
head_hands_feet_mask = (head_hands_feet_mask * 10).clip(max=1)
self.register_buffer('head_hands_feet_mask', head_hands_feet_mask, persistent=False)
W = cape_utils.compute_adjacent_matrix(self.smpl_model.parents, 4)
self.register_buffer('W', W, persistent=False) # (J + 1, J)
data = np.load(args['data']['uv_info'])
verts_uv, faces_uv, v2uv = torch.from_numpy(data['verts_uv']), torch.from_numpy(data['faces_uv']).long(), torch.from_numpy(data['v2uv']).long()
self.geo_fn = UVRender(args, verts_uv, faces_uv, v2uv)
self.register_buffer('head_hands_feet_mask_uv', self.geo_fn.to_uv(head_hands_feet_mask[None, :, None].cuda()), persistent=False)
data = np.load(args['data']['resample_idxs_path'])
self.resample_idxs = data['idxs']
self.shape_enc_dec = ShapeEncDec(args)
if args['model']['stage'] == 'auto_regr':
self.dynamics_net = DynamicsNet(args)
def compute_poses_feat(self, poses):
"""
:param poses: (B, 69)
"""
B = poses.shape[0]
J = self.smpl_model.NUM_BODY_JOINTS
N = self.smpl_model.get_num_verts()
assert poses.shape == (B, 69)
poses_quat = self.smpl_model.compute_poses_quat(poses) # (B, J, 4)
assert poses_quat.shape == (B, J, 4)
lbs_w = self.smpl_model.lbs_weights[None].expand(B, N, J + 1)
lbs_w = torch.einsum('bvj,jl->bvl', lbs_w, self.W)
assert lbs_w.shape == (B, N, J)
poses_feat = poses_quat[:, None] * lbs_w[..., None]
assert poses_feat.shape == (B, N, J, 4)
return poses_feat
def normalize_sd_delta(self, sd_delta):
sd_delta_nc = torch.sign(sd_delta) * (sd_delta.abs() * 1000 + 1).log() * 0.25
return sd_delta_nc
def normalize_globalRt(self, pts, poses):
"""
:param pts: (B, M, 3)
:param poses: (B, 75)
"""
B, M, _ = pts.shape
assert poses.shape == (B, 75)
smpl_out = self.smpl_model(poses)
root_T_inv = torch.linalg.inv(smpl_out.A[:, 0]) # (B, 4, 4)
pts_nc = pts - poses[:, None, :3]
pts_nc_homo = torch.ones((B, M, 1), dtype=torch.float, device=pts.device)
pts_nc_homo = torch.cat([pts_nc, pts_nc_homo], dim=-1)
pts_nc = torch.bmm(root_T_inv, pts_nc_homo.transpose(-2, -1)).transpose(-2, -1)[..., :3].contiguous()
assert pts_nc.shape == (B, M, 3)
return pts_nc
def query_sdf_nets(self, pts, poses, shapes, force_coarse=False):
"""
:param pts: (B, M, 3)
:param poses: (B, 75)
:param shapes: (B, N, C)
"""
B, M, _ = pts.shape
_, N, C = shapes.shape
assert poses.shape == (B, 75) and shapes.shape == (B, N, C) and N == self.smpl_model.get_num_verts()
verts = self.smpl_model(poses).vertices
assert verts.shape == (B, N, 3)
# Normalize global Rt
verts = self.normalize_globalRt(verts, poses)
pts = self.normalize_globalRt(pts, poses)
# MLP decode
# SMPL resample
meshes = Meshes(verts=verts, faces=self.faces[None].expand(B, -1, -1))
normals = meshes.verts_normals_padded()
assert normals.shape == (B, N, 3)
verts_ori = verts.clone()
shapes_ori = shapes.clone()
verts = verts[:, self.resample_idxs]
normals = normals[:, self.resample_idxs]
shapes = shapes[:, self.resample_idxs]
N_ = verts.shape[1]
assert verts.shape == (B, N_, 3) and normals.shape == (B, N_, 3) and shapes.shape == (B, N_, C)
# KNN
K = 20
C_s = 64
C_ = 128
_, idx, pts_nn = knn_points(pts, verts, K=K, return_nn=True)
assert torch.allclose(pts_nn, knn_gather(verts, idx))
normals_nn = knn_gather(normals, idx)
shapes_nn = knn_gather(shapes, idx)
assert pts_nn.shape == (B, M, K, 3) and normals_nn.shape == (B, M, K, 3) and shapes_nn.shape == (B, M, K, C)
pts_nn = pts_nn - pts[:, :, None]
# Proj pts to mesh
_, pts_proj, _, _, shapes_proj = cape_utils.proj_pts_to_mesh(pts, verts_ori, self.faces[None].expand(B, -1, -1).contiguous(), shapes_ori)
assert pts_proj.shape == (B, M, 3) and shapes_proj.shape == (B, M, C)
pts_proj = pts_proj - pts
# Aggregate
feat_nn = self.shape_enc_dec.pts_mlp(
torch.cat([
self.shape_enc_dec.pts_emb(pts_nn.view(B * M * K, 3)),
self.shape_enc_dec.pts_emb(normals_nn.view(B * M * K, 3)),
shapes_nn.view(B * M * K, C)[:, :C_s]
], dim=-1)
).view(B, M, K, C_)
feat_proj = self.shape_enc_dec.proj_pts_mlp(
torch.cat([
self.shape_enc_dec.pts_emb(pts_proj.view(B * M, 3)),
shapes_proj.view(B * M, C)[:, :C_s]
], dim=-1)
).view(B, M, 1, C_)
feat = torch.cat([feat_nn, feat_proj], dim=-2)
assert feat.shape == (B, M, K + 1, C_)
w = self.shape_enc_dec.weights_fc(feat.view(B * M * (K + 1), C_)).view(B, M, K + 1, 1)
w = torch.softmax(w, dim=-2)
feat = (feat * w).sum(dim=-2)
assert feat.shape == (B, M, C_)
sdf = self.shape_enc_dec.sdf_mlp(feat.view(B * M, C_)).view(B, M)
return sdf
def compute_obpts(self, poses):
"""
:param poses: (B, 75)
"""
B = poses.shape[0]
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
verts_smpl = self.smpl_model(poses).vertices
meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B, -1, -1))
normals_smpl = meshes.verts_normals_padded() # (B, N, 3)
offset = torch.linspace(*self.args['model']['ob_vals'], device=poses.device)[None, None, :, None] * normals_smpl[:, :, None, :] # (B, N, K, 3)
obpts = offset + verts_smpl[:, :, None]
return obpts
def shapes_to_obsdf(self, shapes, poses, mode='nets', faces=None):
"""
:param shapes: (B, N, C)
:param poses: (B, 75)
"""
B = poses.shape[0]
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
C = shapes.shape[-1]
assert poses.shape == (B, 75) and shapes.shape[0] == B
# Compute observer pts
obpts = self.compute_obpts(poses)
assert obpts.shape == (B, N, K, 3)
# Query sdf
if mode == 'meshes':
assert C == 3 and faces is not None
sdf, _, _, _, _ = cape_utils.proj_pts_to_mesh(obpts.view(B, N * K, 3), shapes, faces)
sdf = sdf.view(B, N, K)
elif mode == 'nets':
assert shapes.shape == (B, N, C)
sdf = self.query_sdf_nets(obpts.view(B, N * K, 3), poses, shapes, force_coarse=True)
sdf = sdf.view(B, N, K)
return sdf, obpts
def query_sdf_with_grad(self, pts, poses, shapes):
B, M, _ = pts.shape
C = shapes.shape[-1]
N = self.smpl_model.get_num_verts()
assert pts.shape == (B, M, 3) and poses.shape == (B, 75) and shapes.shape == (B, N, C)
with torch.enable_grad():
pts.requires_grad_(True)
sdf = self.query_sdf_nets(pts, poses, shapes)
assert sdf.shape == (B, M)
sdf_grad = autograd.grad([sdf.sum()], [pts], retain_graph=True, create_graph=True)[0]
assert sdf_grad.shape == (B, M, 3)
return sdf, sdf_grad
def enc_shapes_to_sdf(self, obsdf, poses):
"""
:param obsdf: (B, T, N, K)
:param poses: (B, T, 75)
"""
B, T, _ = poses.shape
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
H = W = self.args['model']['uv_size']
assert obsdf.shape == (B, T, N, K) and poses.shape == (B, T, 75)
# Compute obpts_uv
obpts = self.compute_obpts(poses.view(B * T, 75))
assert obpts.shape == (B * T, N, K, 3)
obpts = self.normalize_globalRt(obpts.view(B * T, N * K, 3), poses.view(B * T, 75))
obpts_uv = self.geo_fn.to_uv(obpts.view(B * T, N, K * 3))
assert obpts_uv.shape == (B * T, K * 3, H, W)
# Compute obsdf_uv
obsdf_uv = self.geo_fn.to_uv(obsdf.view(B * T, N, K))
assert obsdf_uv.shape == (B * T, K, H, W)
# Net forward
in_feat = torch.cat([obpts_uv, obsdf_uv * 20], dim=1)
shapes_uv = self.shape_enc_dec.shape_enc(in_feat)
C = shapes_uv.shape[1]
feat_uv_ = shapes_uv * (1 - self.head_hands_feet_mask_uv) + self.shape_enc_dec.uv_bias * self.head_hands_feet_mask_uv
shapes = self.geo_fn.from_uv(feat_uv_)
assert shapes.shape == (B * T, N, C) and shapes_uv.shape == (B * T, C, H, W)
shapes = shapes.view(B, T, N, C)
shapes_uv = shapes_uv.view(B, T, C, H, W)
return shapes, shapes_uv
def forward(self, obsdf, poses):
"""
:param obsdf: (B, T, N, K)
:param poses: (B, T_, 75)
"""
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
H = W = self.args['model']['uv_size']
if self.args['model']['stage'] == 'shape_enc_dec':
B, T, _ = poses.shape
assert obsdf.shape == (B, T, N, K) and poses.shape == (B, T, 75) and T == 1
shapes, shapes_uv = self.enc_shapes_to_sdf(obsdf, poses)
shapes = shapes.squeeze(1)
C = shapes.shape[-1]
assert shapes.shape == (B, N, C)
elif self.args['model']['stage'] == 'auto_regr':
B, T_, _ = poses.shape
T = obsdf.shape[1]
J_ = self.smpl_model.NUM_BODY_JOINTS + 1
J = self.smpl_model.NUM_BODY_JOINTS
assert obsdf.shape == (B, T, N, K) and poses.shape == (B, T_, 75) and T_ - T == 1 and T == self.args['model']['n_hist_frames']
poses_ref = poses[:, -1:].expand(B, T_, 75).contiguous()
# Compute obpts_uv
obpts = self.compute_obpts(poses.view(B * T_, 75))
assert obpts.shape == (B * T_, N, K, 3)
obpts = self.normalize_globalRt(obpts.view(B * T_, N * K, 3), poses_ref.view(B * T_, 75))
obpts_uv = self.geo_fn.to_uv(obpts.view(B * T_, N, K * 3))
assert obpts_uv.shape == (B * T_, K * 3, H, W)
obpts_uv = obpts_uv.view(B, T_ * K * 3, H, W)
# Compute poses velocity
poses_prev = poses[:, :-1].clone()
poses_last = poses[:, 1:].clone()
poses_vel = torch.zeros_like(poses_last)
assert poses_prev.shape == (B, T, 75) and poses_last.shape == (B, T, 75) and poses_vel.shape == (B, T, 75)
poses_vel[..., :3] = poses_last[..., :3] - poses_prev[..., :3]
rot_prev = axis_angle_to_matrix(poses_prev[..., 3:].reshape(B * T * J_, 3))
rot_last = axis_angle_to_matrix(poses_last[..., 3:].reshape(B * T * J_, 3))
rot_vel = torch.bmm(rot_last, torch.linalg.inv(rot_prev))
assert rot_vel.shape == (B * T * J_, 3, 3)
poses_vel[..., 3:] = matrix_to_axis_angle(rot_vel).view(B, T, J_ * 3)
poses_vel_feat = self.compute_poses_feat(poses_vel[..., 6:].reshape(B * T, 69))
assert poses_vel_feat.shape == (B * T, N, J, 4)
poses_vel_feat = torch.cat([poses_vel_feat.view(B * T, N, J * 4), poses_vel[..., :6].reshape(B * T, 1, 6).expand(B * T, N, 6)], dim=-1)
assert poses_vel_feat.shape == (B * T, N, J * 4 + 6)
poses_vel_feat_uv = self.geo_fn.to_uv(poses_vel_feat)
assert poses_vel_feat_uv.shape == (B * T, J * 4 + 6, H, W)
poses_vel_feat_uv = self.dynamics_net.local_poses_vel_conv_block(poses_vel_feat_uv).view(B, T * 32, H, W)
poses_vel_feat_uv = self.dynamics_net.temp_poses_vel_conv_block(poses_vel_feat_uv)
assert poses_vel_feat_uv.shape == (B, 32, H, W)
# Compute pose_feat
pose_feat = self.compute_poses_feat(poses[:, -1, 6:].clone())
assert pose_feat.shape == (B, N, J, 4)
pose_feat_uv = self.geo_fn.to_uv(pose_feat.view(B, N, J * 4))
assert pose_feat_uv.shape == (B, J * 4, H, W)
pose_feat_uv = self.dynamics_net.local_pose_conv_block(pose_feat_uv)
assert pose_feat_uv.shape == (B, 32, H, W)
# Compute obsdf_feat_uv
obsdf_delta = obsdf[:, 1:] - obsdf[:, :-1]
assert obsdf_delta.shape == (B, T - 1, N, K)
obsdf_delta = self.normalize_sd_delta(obsdf_delta)
obsdf_delta = obsdf_delta.permute(0, 2, 1, 3).contiguous()
assert obsdf_delta.shape == (B, N, T - 1, K)
obsdf_feat = torch.cat([obsdf_delta.view(B, N, (T - 1) * K), obsdf[:, -1] * 20], dim=-1)
assert obsdf_feat.shape == (B, N, T * K)
obsdf_feat_uv = self.geo_fn.to_uv(obsdf_feat)
assert obsdf_feat_uv.shape == (B, T * K, H, W)
# Unet forward
feat_uv = torch.cat([obpts_uv, poses_vel_feat_uv, pose_feat_uv, obsdf_feat_uv], dim=1)
shapes_uv_delta = self.dynamics_net.unet(feat_uv)
_, shapes_uv_prev = self.enc_shapes_to_sdf(obsdf[:, -1:], poses[:, -2:-1])
shapes_uv = shapes_uv_prev[:, 0] + shapes_uv_delta
C = shapes_uv.shape[1]
feat_uv_ = shapes_uv * (1 - self.head_hands_feet_mask_uv) + self.shape_enc_dec.uv_bias * self.head_hands_feet_mask_uv
shapes = self.geo_fn.from_uv(feat_uv_)
assert shapes.shape == (B, N, C)
return shapes
class ShapeEncDec(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.sdf_mlp = MLP([128, 128, 1], [-1], 'softplus', True, 'linear', False)
self.pts_emb = Embedder(3, 4)
self.proj_pts_mlp = MLP([64 + self.pts_emb.out_ch, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.pts_mlp = MLP([64 + self.pts_emb.out_ch * 2, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.weights_fc = nn.Linear(128, 1)
self.shape_enc = ShapeEnc(args)
self.register_parameter('uv_bias', nn.Parameter(torch.normal(0, 0.01, (1, 64, 256, 256), dtype=torch.float), requires_grad=True))
class DynamicsNet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.local_pose_conv_block = ConvBlock(92, 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.local_poses_vel_conv_block = ConvBlock(98, 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.temp_poses_vel_conv_block = ConvBlock(32 * args['model']['n_hist_frames'], 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.unet = Unet(args)
class ShapeEnc(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.conv_in = ConvBlock(args['model']['ob_vals'][-1] * 4, 64, 256)
self.conv0 = ConvDownBlock(64, 64, 256)
self.conv1 = ConvDownBlock(64, 64, 128)
self.conv2 = ConvUpBlock(64, 64, 128)
self.conv3 = ConvUpBlock(64, 64, 256)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, padding=0),
nn.Tanh()
)
def forward(self, x):
x = self.conv_in(x)
x0 = self.conv0(x)
x1 = self.conv1(x0)
x2 = self.conv2(x1) + x0
x3 = self.conv3(x2) + x
out = self.conv_out(x3)
return out
class Unet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.conv_in = ConvBlock((args['model']['n_hist_frames'] + args['model']['n_batch_frames'] * 3) * args['model']['ob_vals'][-1] + 64, 64, 256)
self.conv_down0 = ConvDownBlock(64, 128, 256)
self.conv_down1 = ConvDownBlock(128, 256, 128)
self.conv_down2 = ConvDownBlock(256, 256, 64)
self.conv_down3 = ConvDownBlock(256, 256, 32)
self.conv_up3 = ConvUpBlock(256, 256, 32)
self.conv_up2 = ConvUpBlock(256, 256, 64)
self.conv_up1 = ConvUpBlock(256, 128, 128)
self.conv_up0 = ConvUpBlock(128, 64, 256)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, padding=0),
nn.Tanh()
)
torch.nn.init.zeros_(self.conv_out[0].weight)
if hasattr(self.conv_out[0], 'bias') and self.conv_out[0].bias is not None:
torch.nn.init.zeros_(self.conv_out[0].bias)
def forward(self, x):
x = self.conv_in(x)
x0 = self.conv_down0(x)
x1 = self.conv_down1(x0)
x2 = self.conv_down2(x1)
x3 = self.conv_down3(x2)
y3 = self.conv_up3(x3) + x2
y2 = self.conv_up2(y3) + x1
y1 = self.conv_up1(y2) + x0
y0 = self.conv_up0(y1) + x
out = self.conv_out(y0)
return out
|
AutoAvatar-main
|
models/std/nets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
import copy
import shutil, inspect
import torch
import torch.nn.functional as F
import torch.optim as optim
import pytorch_lightning as pl
from pytorch3d.io import save_ply
import time
import utils.CAPE as cape_utils
from utils.implicit import reconstruction
from models.PosedDecKNN_dPoses_dHs.nets import DynNet
import models.std.visual as visual
class Implicit_Trainbox(pl.LightningModule):
def __init__(self, args, log_dir, resolution, recurrent=True, eval_frames=None, pose_model=None):
super().__init__()
self.args = copy.deepcopy(args)
self.log_dir = log_dir
self.resolution = resolution
self.recurrent = recurrent
self.eval_frames = eval_frames
self.pose_model = pose_model
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(os.path.join(log_dir, 'ckpt')):
os.mkdir(os.path.join(log_dir, 'ckpt'))
if not os.path.exists(os.path.join(log_dir, 'net_def')):
os.mkdir(os.path.join(log_dir, 'net_def'))
if not os.path.exists(os.path.join(log_dir, 'mesh')):
os.mkdir(os.path.join(log_dir, 'mesh'))
shutil.copy(os.path.realpath(__file__), os.path.join(log_dir, 'net_def'))
shutil.copy(inspect.getfile(DynNet), os.path.join(log_dir, 'net_def'))
self.dyn_net = DynNet(args, eval_frames)
self.itr = 0
def save_ckpt(self):
torch.save(self.dyn_net.state_dict(), os.path.join(self.log_dir, 'ckpt', 'dyn_net_%06d.pth' % self.itr))
def load_ckpt(self, itr, log_dir):
self.dyn_net.load_state_dict(torch.load(os.path.join(log_dir, 'ckpt', 'dyn_net_%06d.pth' % itr), map_location='cpu'))
def preprocess(self, batch):
verts_detail, faces_detail, verts_smt, faces_smt, poses = batch['verts_detail'], batch['faces_detail'], batch['verts_smt'], batch['faces_smt'], batch['poses']
B, T, _ = poses.shape
N = self.dyn_net.smpl_model.v_template.shape[0]
verts_smpl = self.dyn_net.smpl_model(poses.view(B * T, 75)).vertices.view(B, T, N, 3)
if len(verts_detail) == 0:
verts_detail = [verts_smpl[:, i].contiguous() for i in range(T)]
faces_detail = [self.dyn_net.smpl_model.faces.to(verts_smpl.device)[None].expand(B, -1, -1).contiguous() for i in range(T)]
verts_smt = verts_detail
faces_smt = faces_detail
if not self.args['data']['separate_detail']:
verts_smt = verts_detail
faces_smt = faces_detail
return verts_detail, faces_detail, verts_smt, faces_smt, poses, verts_smpl
def train_or_valid_step(self, batch, batch_idx, is_train):
verts_detail_all, faces_detail_all, verts_smt_all, faces_smt_all, poses_all, verts_smpl_all = self.preprocess(batch)
T = self.args['model']['n_batch_frames']
T_hist = self.args['model']['n_hist_frames']
T_next = T - T_hist
K = self.args['model']['ob_vals'][-1]
assert T_next == 1
iter_times = []
sd_errs = []
cos_errs = []
obsdf_rollout = None
shapes_uv_init = None
loss_surf_sdf = 0
loss_surf_grad = 0
loss_igr = 0
loss_o = 0
end_idx = self.args['train']['n_rollout']
if self.eval_frames is not None and batch_idx + T_hist == self.eval_frames[0]:
end_idx = poses_all.shape[1] - T + 1
for i in range(end_idx):
verts_detail = verts_detail_all[i:i+T]
faces_detail = faces_detail_all[i:i+T]
verts_smt = verts_smt_all[i:i+T]
faces_smt = faces_smt_all[i:i+T]
poses = poses_all[:, i:i+T]
verts_smpl = verts_smpl_all[:, i:i+T]
N = verts_smpl.shape[2]
B = poses.shape[0]
if poses.shape[1] < T:
break
verts = verts_smt
faces = faces_smt
verts_gt = verts_smt if not self.args['model']['use_detail'] else verts_detail
faces_gt = faces_smt if not self.args['model']['use_detail'] else faces_detail
bbmin = verts_smpl.min(dim=2)[0] - 0.1
bbmax = verts_smpl.max(dim=2)[0] + 0.1
surf_pts, surf_normals, igr_pts, bbox_pts, rand_pts = cape_utils.sample_igr_pts(verts_gt[-1], faces_gt[-1], bbmin[:, -1], bbmax[:, -1], self.args)
# start_time = time.time()
if self.args['model']['stage'] == 'shape_enc_dec':
obsdf, _ = self.dyn_net.shapes_to_obsdf(verts[-1], poses[:, -1], mode='meshes', faces=faces[-1])
assert obsdf.shape == (B, N, 1)
obsdf = obsdf[:, None]
if self.args['model']['stage'] == 'auto_regr':
if obsdf_rollout is None:
if 'verts_init' not in batch:
obsdf = [self.dyn_net.shapes_to_obsdf(verts[j], poses[:, j], mode='meshes', faces=faces[j])[0] for j in range(T_hist)]
obsdf = torch.stack(obsdf, dim=1)
assert obsdf.shape == (B, T_hist, N, 1)
else:
_, shapes_uv_pose = self.pose_model(None, poses[:, :-1].reshape(B * T_hist, 1, 75))
obsdf, _ = self.pose_model.shapes_to_obsdf(torch.zeros((B * T_hist, 0, 0), device=poses.device), poses[:, :-1].reshape(B * T_hist, 75), mode='nets', shapes_uv=shapes_uv_pose)
assert obsdf.shape == (B * T_hist, N, 1)
obsdf = obsdf.view(B, T_hist, N, 1).contiguous()
# obsdf = self.dyn_net.shapes_to_obsdf(batch['verts_init'], batch['poses_init'], mode='meshes', faces=batch['faces_init'])[0]
# assert obsdf.shape == (B, N, 1)
# obsdf = obsdf[:, None].expand(B, T_hist, N, 1).contiguous()
else:
obsdf = obsdf_rollout.detach()
shapes, shapes_uv = self.dyn_net(obsdf, poses)
if self.eval_frames is not None:
if shapes_uv_init is None:
shapes_uv_init = shapes_uv
else:
shapes_uv = shapes_uv * (1 - self.dyn_net.head_hands_feet_mask_uv) + shapes_uv_init * self.dyn_net.head_hands_feet_mask_uv
if i + 1 < end_idx and self.recurrent:
with torch.no_grad():
obsdf_new, _ = self.dyn_net.shapes_to_obsdf(shapes, poses[:, -1], mode='nets', shapes_uv=shapes_uv)
obsdf_rollout = torch.cat([obsdf[:, 1:], obsdf_new[:, None]], dim=1).detach()
assert obsdf_rollout.shape == (B, T_hist, N, 1)
if self.eval_frames is None:
# Losses
surf_sdf, surf_sdf_grad = self.dyn_net.query_sdf_with_grad(surf_pts, poses[:, -1], shapes_uv)
rand_sdf, rand_sdf_grad = self.dyn_net.query_sdf_with_grad(rand_pts, poses[:, -1], shapes_uv)
bbox_sdf = rand_sdf[:, self.args['train']['n_pts_scan_igr']:]
assert bbox_sdf.shape == (B, self.args['train']['n_pts_bbox_igr'])
loss_surf_sdf += surf_sdf.abs().mean() / self.args['train']['n_rollout']
loss_surf_grad += torch.norm(surf_sdf_grad - surf_normals, p=2, dim=-1).mean() / self.args['train']['n_rollout']
loss_igr += (torch.norm(rand_sdf_grad, p=2, dim=-1) - 1).pow(2).mean() / self.args['train']['n_rollout']
loss_o += torch.exp(-50.0 * torch.abs(bbox_sdf)).mean() / self.args['train']['n_rollout']
else:
out_dir = os.path.join(self.log_dir, 'mesh', 'batch_%06d' % batch_idx)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(os.path.join(out_dir, 'gt')):
os.mkdir(os.path.join(out_dir, 'gt'))
if not os.path.exists(os.path.join(out_dir, 'pred')):
os.mkdir(os.path.join(out_dir, 'pred'))
if not os.path.exists(os.path.join(out_dir, 'poses')):
os.mkdir(os.path.join(out_dir, 'poses'))
with open(os.path.join(out_dir, 'poses', 'poses_%06d.bin' % (i + T_hist)), 'wb') as f:
pickle.dump({'poses': poses.cpu()}, f)
if i == 0:
for j in range(T_hist):
save_ply(os.path.join(out_dir, 'gt', 'gt_%06d.ply' % j), verts_gt[j][0], faces_gt[j][0])
save_ply(os.path.join(out_dir, 'gt', 'gt_%06d.ply' % (i + T_hist)), verts_gt[-1][0], faces_gt[-1][0])
out = reconstruction(self.dyn_net.query_sdf_nets, poses.device, None,
self.resolution, bbmin[0, -1].cpu().numpy(), bbmax[0, -1].cpu().numpy(),
use_octree=False, num_samples=4096, transform=None, thresh=0, texture_net = None, poses=poses[:1, -1], shapes=shapes_uv[:1])
verts_out, faces_out = out
verts_out, faces_out = torch.from_numpy(verts_out).float().to(poses.device), torch.from_numpy(faces_out.astype(np.int32)).long().to(poses.device)
save_ply(os.path.join(out_dir, 'pred', 'pred_%06d.ply' % (i + T_hist)), verts_out, faces_out)
sd_err, cos_err = cape_utils.scan_to_pred_errors(verts_gt[-1], faces_gt[-1], verts_out[None], faces_out[None])
sd_errs.append(sd_err.cpu())
cos_errs.append(cos_err.cpu())
# iter_time = time.time() - start_time
# iter_times.append(iter_time)
# print('time:', iter_time)
# print('mean time:', np.array(iter_times[1:-1]).mean())
# input('pause')
if self.eval_frames is not None:
with open(os.path.join(out_dir, 'errs.bin'), 'wb') as f:
pickle.dump({'sd_errs': sd_errs, 'cos_errs': cos_errs}, f)
visual.render_meshes(out_dir, start_i=T_hist, simplify_mesh=False)
os.system('bash models/std/videos.sh %s %s' % (out_dir, str(T_hist)))
loss = loss_surf_sdf + loss_surf_grad + loss_igr * self.args['train']['lambda_igr'] + loss_o * self.args['train']['lambda_o']
res_dict = {
'verts': verts,
'faces': faces,
'verts_gt': verts_gt,
'faces_gt': faces_gt,
'verts_smpl': verts_smpl,
'poses': poses,
'shapes': shapes_uv,
'bbmin': bbmin,
'bbmax': bbmax,
'loss_surf_sdf': loss_surf_sdf,
'loss_surf_grad': loss_surf_grad,
'loss_igr': loss_igr,
'loss_o': loss_o,
'loss': loss
}
return res_dict
def training_step(self, batch, batch_idx):
res_dict = self.train_or_valid_step(batch, batch_idx, True)
# log
prefix = 'Train'
self.log('%s/loss' % prefix, res_dict['loss'])
self.log('%s/loss_surf_sdf' % prefix, res_dict['loss_surf_sdf'])
self.log('%s/loss_surf_grad' % prefix, res_dict['loss_surf_grad'])
self.log('%s/loss_igr' % prefix, res_dict['loss_igr'])
self.log('%s/loss_o' % prefix, res_dict['loss_o'])
# checkpoint
self.itr += 1
if self.itr % self.args['train']['ckpt_step'] == 0:
self.save_ckpt()
return res_dict['loss']
def validation_step(self, batch, batch_idx):
if self.eval_frames is not None and batch_idx + self.args['model']['n_hist_frames'] not in self.eval_frames:
return
res_dict = self.train_or_valid_step(batch, batch_idx, False)
# log
prefix = 'Valid'
self.log('%s/loss' % prefix, res_dict['loss'])
self.log('%s/loss_surf_sdf' % prefix, res_dict['loss_surf_sdf'])
self.log('%s/loss_surf_grad' % prefix, res_dict['loss_surf_grad'])
self.log('%s/loss_igr' % prefix, res_dict['loss_igr'])
self.log('%s/loss_o' % prefix, res_dict['loss_o'])
if self.eval_frames is None:
self.compute_meshes(res_dict, batch, batch_idx)
def configure_optimizers(self):
if self.args['model']['stage'] == 'shape_enc_dec':
optimizer = optim.Adam(self.dyn_net.parameters(), lr=self.args['train']['lr'])
elif self.args['model']['stage'] == 'auto_regr':# and not self.args['model']['use_detail']:
optimizer = optim.Adam(self.dyn_net.parameters(), lr=self.args['train']['lr'])
# elif self.args['model']['use_detail']:
# optimizer = optim.Adam(self.dyn_net.detail_dec.parameters(), lr=self.args['train']['lr'])
return optimizer
def compute_meshes(self, res_dict, batch, batch_idx):
verts, faces, verts_gt, faces_gt, verts_smpl, poses, shapes, bbmin, bbmax = res_dict['verts'], res_dict['faces'], res_dict['verts_gt'], res_dict['faces_gt'], \
res_dict['verts_smpl'], res_dict['poses'], res_dict['shapes'], res_dict['bbmin'], res_dict['bbmax']
T = self.args['model']['n_batch_frames']
T_hist = self.args['model']['n_hist_frames']
T_next = T - T_hist
if not os.path.exists(os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr)):
os.mkdir(os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr))
out_dir = os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr, 'batch_%06d' % batch_idx)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for i in range(T_hist):
save_ply(os.path.join(out_dir, 'hist_%d.ply' % i), verts[i][0], faces[i][0])
save_ply(os.path.join(out_dir, 'gt.ply'), verts_gt[-1][0], faces_gt[-1][0])
out = reconstruction(self.dyn_net.query_sdf_nets, poses.device, None,
self.resolution, bbmin[0, -1].cpu().numpy(), bbmax[0, -1].cpu().numpy(),
use_octree=False, num_samples=4096, transform=None, thresh=0, texture_net = None, poses=poses[:1, -1], shapes=shapes[:1])
if out != -1:
verts_out, faces_out = out
save_ply(os.path.join(out_dir, 'pred.ply'),
torch.from_numpy(verts_out).float().contiguous(), torch.from_numpy(faces_out.astype(np.int32)).contiguous().long())
def test_step(self, batch, batch_idx):
self.validation_step(batch, batch_idx)
|
AutoAvatar-main
|
models/PosedDecKNN_dPoses_dHs/trainbox.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from pytorch3d.ops import knn_points, knn_gather
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.transforms.rotation_conversions import matrix_to_axis_angle
import utils.CAPE as cape_utils
from utils.render import *
from models.nets import *
class DynNet(nn.Module):
def __init__(self, args, eval_frames):
super().__init__()
self.args = copy.deepcopy(args)
self.eval_frames = eval_frames
self.smpl_model = cape_utils.load_smpl(args)
self.register_buffer('v_template', self.smpl_model.v_template, persistent=False)
self.register_buffer('faces', self.smpl_model.faces, persistent=False)
mask_ids = ['left_wrist', 'right_wrist', 'left_hand', 'right_hand', 'left_ankle', 'right_ankle', 'left_foot', 'right_foot', 'head']
mask_ids = [cape_utils.SMPL_JOINT_NAMES.index(e) for e in mask_ids]
head_hands_feet_mask = self.smpl_model.lbs_weights[:, mask_ids].sum(dim=-1) # (N,)
head_hands_feet_mask[head_hands_feet_mask < 2e-2] = 0
head_hands_feet_mask = (head_hands_feet_mask * 10).clip(max=1)
self.register_buffer('head_hands_feet_mask', head_hands_feet_mask, persistent=False)
mask_ids = ['left_ankle', 'right_ankle', 'left_foot', 'right_foot']
mask_ids = [cape_utils.SMPL_JOINT_NAMES.index(e) for e in mask_ids]
feet_mask = self.smpl_model.lbs_weights[:, mask_ids].sum(dim=-1) # (N,)
self.register_buffer('feet_mask', feet_mask, persistent=False)
W = cape_utils.compute_adjacent_matrix(self.smpl_model.parents, 1)
self.register_buffer('W', W, persistent=False) # (J + 1, J)
data = np.load(args['data']['uv_info'])
verts_uv, faces_uv, v2uv = torch.from_numpy(data['verts_uv']), torch.from_numpy(data['faces_uv']).long(), torch.from_numpy(data['v2uv']).long()
self.geo_fn = UVRender(args, verts_uv, faces_uv, v2uv)
self.register_buffer('head_hands_feet_mask_uv', self.geo_fn.to_uv(head_hands_feet_mask[None, :, None].cuda()), persistent=False)
data = np.load(args['data']['resample_idxs_path'])
self.resample_idxs = data['idxs']
self.shape_enc_dec = ShapeEncDec(args)
if args['model']['stage'] == 'auto_regr':
self.dynamics_net = DynamicsNet(args)
# if args['model']['use_detail']:
# self.detail_dec = DetailDec(args)
def compute_poses_feat(self, poses):
"""
:param poses: (B, 69)
"""
B = poses.shape[0]
J = self.smpl_model.NUM_BODY_JOINTS
N = self.smpl_model.get_num_verts()
assert poses.shape == (B, 69)
poses_quat = self.smpl_model.compute_poses_quat(poses) # (B, J, 4)
assert poses_quat.shape == (B, J, 4)
lbs_w = self.smpl_model.lbs_weights[None].expand(B, N, J + 1)
lbs_w = torch.einsum('bvj,jl->bvl', lbs_w, self.W)
assert lbs_w.shape == (B, N, J)
poses_feat = poses_quat[:, None] * lbs_w[..., None]
assert poses_feat.shape == (B, N, J, 4)
return poses_feat
def normalize_sd_delta(self, sd_delta):
sd_delta_nc = torch.sign(sd_delta) * (sd_delta.abs() * 1000 + 1).log() * 0.25
return sd_delta_nc
def normalize_globalRt(self, pts, poses):
"""
:param pts: (B, M, 3)
:param poses: (B, 75)
"""
B, M, _ = pts.shape
assert poses.shape == (B, 75)
smpl_out = self.smpl_model(poses)
root_T_inv = torch.linalg.inv(smpl_out.A[:, 0]) # (B, 4, 4)
pts_nc = pts - poses[:, None, :3]
pts_nc_homo = torch.ones((B, M, 1), dtype=torch.float, device=pts.device)
pts_nc_homo = torch.cat([pts_nc, pts_nc_homo], dim=-1)
pts_nc = torch.bmm(root_T_inv, pts_nc_homo.transpose(-2, -1)).transpose(-2, -1)[..., :3].contiguous()
assert pts_nc.shape == (B, M, 3)
return pts_nc
def query_sdf_nets(self, pts, poses, shapes, force_coarse=False):
"""
:param pts: (B, M, 3)
:param poses: (B, 75)
:param shapes: (B, C, H, W)
"""
B, M, _ = pts.shape
# _, N, C = shapes.shape
C = shapes.shape[1]
N = self.smpl_model.get_num_verts()
H = W = self.args['model']['uv_size']
# assert poses.shape == (B, 75) and shapes.shape == (B, N, C) and N == self.smpl_model.get_num_verts()
assert poses.shape == (B, 75) and shapes.shape == (B, C, H, W)
verts = self.smpl_model(poses).vertices
assert verts.shape == (B, N, 3)
shapes = self.geo_fn.from_uv(shapes)
# shapes_bias = self.geo_fn.from_uv(self.shape_enc_dec.uv_bias)
# shapes = shapes * (1 - self.head_hands_feet_mask[None, :, None]) + shapes_bias * self.head_hands_feet_mask[None, :, None]
assert shapes.shape == (B, N, C)
# Normalize global Rt
verts = self.normalize_globalRt(verts, poses)
pts = self.normalize_globalRt(pts, poses)
# MLP decode
# SMPL resample
meshes = Meshes(verts=verts, faces=self.faces[None].expand(B, -1, -1))
normals = meshes.verts_normals_padded()
assert normals.shape == (B, N, 3)
verts_ori = verts.clone()
shapes_ori = shapes.clone()
verts = verts[:, self.resample_idxs]
normals = normals[:, self.resample_idxs]
shapes = shapes[:, self.resample_idxs]
N_ = verts.shape[1]
assert verts.shape == (B, N_, 3) and normals.shape == (B, N_, 3) and shapes.shape == (B, N_, C)
# KNN
K = 20
C_s = 64
C_ = 128
_, idx, pts_nn = knn_points(pts, verts, K=K, return_nn=True)
assert torch.allclose(pts_nn, knn_gather(verts, idx))
normals_nn = knn_gather(normals, idx)
shapes_nn = knn_gather(shapes, idx)
assert pts_nn.shape == (B, M, K, 3) and normals_nn.shape == (B, M, K, 3) and shapes_nn.shape == (B, M, K, C)
pts_nn = pts[:, :, None] - pts_nn
cos_nn = torch.cosine_similarity(pts_nn, normals_nn, dim=-1)
len_nn = torch.norm(pts_nn, p=2, dim=-1)
assert cos_nn.shape == (B, M, K) and len_nn.shape == (B, M, K)
x = torch.cat([cos_nn[..., None], len_nn[..., None]], dim=-1)
assert x.shape == (B, M, K, 2)
# Proj pts to mesh
# sd, pts_proj, shapes_proj, pts_uv = cape_utils.proj_pts_to_uv(pts, verts_ori, self.faces[None].expand(B, -1, -1).contiguous(),
# self.geo_fn.verts_uv[None].expand(B, -1, -1),
# self.geo_fn.faces_uv[None].expand(B, -1, -1), shapes_ori)
# assert sd.shape == (B, M) and pts_proj.shape == (B, M, 3) and shapes_proj.shape == (B, M, C) and pts_uv.shape == (B, M, 2)
# x = torch.cat([sd[..., None], pts_uv], dim=-1)
# assert x.shape == (B, M, 3)
# pts_proj = pts_proj - pts
# Aggregate
feat_nn = self.shape_enc_dec.pts_mlp(
torch.cat([
# self.shape_enc_dec.pts_emb(pts_nn.view(B * M * K, 3)),
# self.shape_enc_dec.pts_emb(normals_nn.view(B * M * K, 3)),
self.shape_enc_dec.pts_emb(x.view(B * M * K, 2)),
shapes_nn.view(B * M * K, C)[:, :C_s]
], dim=-1)
).view(B, M, K, C_)
# feat_proj = self.shape_enc_dec.proj_pts_mlp(
# torch.cat([
# self.shape_enc_dec.pts_emb(x.view(B * M, 3)),
# shapes_proj.view(B * M, C)[:, :C_s]
# ], dim=-1)
# ).view(B, M, 1, C_)
feat = feat_nn #torch.cat([feat_nn, feat_proj], dim=-2)
assert feat.shape == (B, M, K, C_) #(B, M, K + 1, C_)
w = self.shape_enc_dec.weights_fc(feat.view(B * M * K, C_)).view(B, M, K, 1) #.view(B * M * (K + 1), C_)).view(B, M, K + 1, 1)
w = torch.softmax(w, dim=-2)
feat = (feat * w).sum(dim=-2)
assert feat.shape == (B, M, C_)
sdf = self.shape_enc_dec.sdf_mlp(feat.view(B * M, C_)).view(B, M)
# # Check weights
# print(x[0, 0])
# print(w[0, 0])
# input('pause')
# sdf = self.shape_enc_dec.sdf_mlp(feat_proj).view(B, M)
# mask feet
if self.eval_frames is not None:
pts_feet_mask = knn_gather(self.feet_mask[None, self.resample_idxs, None].expand(B, -1, -1), idx)[:, :, 0, 0]
assert pts_feet_mask.shape == (B, M)
abs_mask = (cos_nn[..., 0] > 0).long() * (len_nn[..., :4].mean(dim=-1) > 0.04)
sdf_abs = sdf.clone()
sdf_abs = sdf_abs.abs() * abs_mask + sdf_abs * (1 - abs_mask)
sdf = sdf * (1 - pts_feet_mask) + sdf_abs * pts_feet_mask
# if self.args['model']['use_detail'] and not force_coarse:
# # Aggregate
# feat_nn = self.detail_dec.pts_mlp(
# torch.cat([
# self.detail_dec.pts_emb(pts_nn.view(B * M * K, 3)),
# self.detail_dec.pts_emb(normals_nn.view(B * M * K, 3)),
# shapes_nn.view(B * M * K, C)[:, C_s:]
# ], dim=-1)
# ).view(B, M, K, C_)
# feat_proj = self.detail_dec.proj_pts_mlp(
# torch.cat([
# self.detail_dec.pts_emb(pts_proj.view(B * M, 3)),
# shapes_proj.view(B * M, C)[:, C_s:]
# ], dim=-1)
# ).view(B, M, 1, C_)
# feat = torch.cat([feat_nn, feat_proj], dim=-2)
# assert feat.shape == (B, M, K + 1, C_)
# w = self.detail_dec.weights_fc(feat.view(B * M * (K + 1), C_)).view(B, M, K + 1, 1)
# w = torch.softmax(w, dim=-2)
# feat = (feat * w).sum(dim=-2)
# assert feat.shape == (B, M, C_)
# sdf_delta = self.detail_dec.sdf_mlp(feat.view(B * M, C_)).view(B, M)
# sdf = sdf + sdf_delta
return sdf
def compute_obpts(self, poses):
"""
:param poses: (B, 75)
"""
B = poses.shape[0]
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
verts_smpl = self.smpl_model(poses).vertices
meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B, -1, -1))
normals_smpl = meshes.verts_normals_padded() # (B, N, 3)
offset = torch.linspace(*self.args['model']['ob_vals'], device=poses.device)[None, None, :, None] * normals_smpl[:, :, None, :] # (B, N, K, 3)
obpts = offset + verts_smpl[:, :, None]
return obpts
def shapes_to_obsdf(self, shapes, poses, mode='nets', faces=None, shapes_uv=None):
"""
:param shapes: (B, N, C)
:param poses: (B, 75)
"""
B = poses.shape[0]
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
C = shapes.shape[-1]
assert poses.shape == (B, 75) and shapes.shape[0] == B
# Compute observer pts
obpts = self.compute_obpts(poses)
assert obpts.shape == (B, N, K, 3)
# Query sdf
def query(obpts, clip):
B, N, K, _ = obpts.shape
if mode == 'meshes':
assert C == 3 and faces is not None
sdf, _, _, _, _ = cape_utils.proj_pts_to_mesh(obpts.view(B, N * K, 3), shapes, faces)
sdf = sdf.view(B, N, K)
elif mode == 'nets':
assert shapes_uv is not None
sdf = self.query_sdf_nets(obpts.view(B, N * K, 3), poses, shapes_uv, force_coarse=True)
sdf = sdf.view(B, N, K)
if clip:
thres = (self.args['model']['ob_vals'][1] - self.args['model']['ob_vals'][0]) / (K - 1)
assert thres > 0
sdf = sdf.clip(min=-thres, max=thres)
return sdf
sdf = query(obpts, False)
verts_smpl = self.smpl_model(poses).vertices
meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B, -1, -1))
normals_smpl = meshes.verts_normals_padded() # (B, N, 3)
offsets = torch.linspace(*self.args['model']['ob_vals'], device=poses.device)[None, None, :].expand(B, N, K).contiguous()
idxs = torch.arange(0, K, 1, device=poses.device).long()[None, None, :].expand(B, N, K).contiguous()
idxs[sdf < 0] += 777
idxs_pos = torch.min(idxs, dim=-1)[0].clip(max=K-1)
idxs_neg = (idxs_pos - 1).clip(min=0)
offsets_pos = torch.gather(offsets, -1, idxs_pos[..., None])
offsets_neg = torch.gather(offsets, -1, idxs_neg[..., None])
assert offsets_pos.shape == (B, N, 1) and offsets_neg.shape == (B, N, 1)
sdf_pos = torch.gather(sdf, -1, idxs_pos[..., None])
sdf_neg = torch.gather(sdf, -1, idxs_neg[..., None])
assert sdf_pos.shape == (B, N, 1) and sdf_neg.shape == (B, N, 1)
# binary search
for i in range(2):
offsets_mid = (offsets_neg + offsets_pos) / 2
obpts_mid = offsets_mid[..., None] * normals_smpl[:, :, None, :] + verts_smpl[:, :, None]
sdf_mid = query(obpts_mid, False)
assert sdf_mid.shape == (B, N, 1)
offsets_neg_new = offsets_neg.clone()
offsets_pos_new = offsets_pos.clone()
offsets_neg_new[sdf_mid <= 0] = offsets_mid[sdf_mid <= 0]
offsets_pos_new[sdf_mid > 0] = offsets_mid[sdf_mid > 0]
offsets_neg = offsets_neg_new.contiguous()
offsets_pos = offsets_pos_new.contiguous()
sdf_neg_new = sdf_neg.clone()
sdf_pos_new = sdf_pos.clone()
sdf_neg_new[sdf_mid <= 0] = sdf_mid[sdf_mid <= 0]
sdf_pos_new[sdf_mid > 0] = sdf_mid[sdf_mid > 0]
sdf_neg = sdf_neg_new.contiguous()
sdf_pos = sdf_pos_new.contiguous()
# offsets_surf = (offsets_neg + offsets_pos) / 2
# Interpolation
zero_mask = idxs_neg != idxs_pos
w = sdf_neg.abs() + sdf_pos.abs()
zero_mask = (zero_mask.long() * (w.squeeze(-1) > 1e-10).long()).bool()
w_neg = torch.zeros_like(sdf_neg) + 0.5
w_neg[zero_mask] = sdf_pos[zero_mask].abs() / w[zero_mask]
w_pos = torch.zeros_like(sdf_pos) + 0.5
w_pos[zero_mask] = sdf_neg[zero_mask].abs() / w[zero_mask]
offsets_surf = w_neg * offsets_neg + w_pos * offsets_pos
return offsets_surf, obpts
def query_sdf_with_grad(self, pts, poses, shapes):
B, M, _ = pts.shape
C = shapes.shape[1]
N = self.smpl_model.get_num_verts()
H = W = self.args['model']['uv_size']
assert pts.shape == (B, M, 3) and poses.shape == (B, 75) and shapes.shape == (B, C, H, W)
with torch.enable_grad():
pts.requires_grad_(True)
sdf = self.query_sdf_nets(pts, poses, shapes)
assert sdf.shape == (B, M)
sdf_grad = autograd.grad([sdf.sum()], [pts], retain_graph=True, create_graph=True)[0]
assert sdf_grad.shape == (B, M, 3)
return sdf, sdf_grad
def enc_shapes_to_sdf(self, obsdf, poses):
"""
:param obsdf: (B, T, N, K)
:param poses: (B, T, 75)
"""
B, T, _ = poses.shape
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
H = W = self.args['model']['uv_size']
assert obsdf.shape == (B, T, N, 1) and poses.shape == (B, T, 75)
# Compute obpts_uv
verts_smpl = self.smpl_model(poses.view(B * T, 75)).vertices
assert verts_smpl.shape == (B * T, N, 3)
verts_smpl = self.normalize_globalRt(verts_smpl, poses.view(B * T, 75))
meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B * T, -1, -1))
normals_smpl = meshes.verts_normals_padded() # (B * T, N, 3)
obpts = torch.cat([verts_smpl, normals_smpl], dim=-1)
assert obpts.shape == (B * T, N, 6)
obpts_uv = self.geo_fn.to_uv(obpts)
assert obpts_uv.shape == (B * T, 6, H, W)
# Compute obsdf_uv
obsdf_uv = self.geo_fn.to_uv(obsdf.view(B * T, N, 1))
assert obsdf_uv.shape == (B * T, 1, H, W)
# Net forward
in_feat = torch.cat([obpts_uv, obsdf_uv * 20], dim=1)
shapes_uv = self.shape_enc_dec.shape_enc(in_feat)
C = shapes_uv.shape[1]
feat_uv_ = shapes_uv * (1 - self.head_hands_feet_mask_uv) + self.shape_enc_dec.uv_bias * self.head_hands_feet_mask_uv
shapes = self.geo_fn.from_uv(feat_uv_)
assert shapes.shape == (B * T, N, C) and shapes_uv.shape == (B * T, C, H, W)
shapes = shapes.view(B, T, N, C)
shapes_uv = shapes_uv.view(B, T, C, H, W)
return shapes, shapes_uv
def pose_temp_deri(self, poses):
"""
:param poses: (B, T_, 75)
"""
B, T_, _ = poses.shape
T = T_ - 1
J_ = self.smpl_model.NUM_BODY_JOINTS + 1
poses_prev = poses[:, :-1].clone()
poses_last = poses[:, 1:].clone()
poses_vel = torch.zeros_like(poses_last)
assert poses_prev.shape == (B, T, 75) and poses_last.shape == (B, T, 75) and poses_vel.shape == (B, T, 75)
poses_vel[..., :3] = poses_last[..., :3] - poses_prev[..., :3]
rot_prev = axis_angle_to_matrix(poses_prev[..., 3:].reshape(B * T * J_, 3))
rot_last = axis_angle_to_matrix(poses_last[..., 3:].reshape(B * T * J_, 3))
rot_vel = torch.bmm(rot_last, torch.linalg.inv(rot_prev))
assert rot_vel.shape == (B * T * J_, 3, 3)
poses_vel[..., 3:] = matrix_to_axis_angle(rot_vel).view(B, T, J_ * 3)
return poses_vel
def forward(self, obsdf, poses):
"""
:param obsdf: (B, T, N, K)
:param poses: (B, T_, 75)
"""
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
H = W = self.args['model']['uv_size']
if self.args['model']['stage'] == 'shape_enc_dec':
B, T, _ = poses.shape
assert obsdf.shape == (B, T, N, 1) and poses.shape == (B, T, 75) and T == 1
shapes, shapes_uv = self.enc_shapes_to_sdf(obsdf, poses)
shapes = shapes.squeeze(1)
C = shapes.shape[-1]
assert shapes.shape == (B, N, C)
elif self.args['model']['stage'] == 'auto_regr':
B, T_, _ = poses.shape
T = obsdf.shape[1]
n_H = self.args['model']['n_H']
J_ = self.smpl_model.NUM_BODY_JOINTS + 1
J = self.smpl_model.NUM_BODY_JOINTS
assert obsdf.shape == (B, T, N, 1) and poses.shape == (B, T_, 75) and T_ - T == 1 and T == self.args['model']['n_hist_frames']
poses_ref = poses[:, -1:].expand(B, T_, 75).contiguous()
# # Compute obpts_uv
# verts_smpl = self.smpl_model(poses.view(B * T_, 75)).vertices
# assert verts_smpl.shape == (B * T_, N, 3)
# verts_smpl = self.normalize_globalRt(verts_smpl, poses_ref.view(B * T_, 75))
# meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B * T_, -1, -1))
# normals_smpl = meshes.verts_normals_padded() # (B * T_, N, 3)
# obpts = torch.cat([verts_smpl, normals_smpl], dim=-1)
# assert obpts.shape == (B * T_, N, 6)
# obpts_uv = self.geo_fn.to_uv(obpts)
# assert obpts_uv.shape == (B * T_, 6, H, W)
# obpts_uv = obpts_uv.view(B, T_ * 6, H, W)
# Compute poses velocity
# poses_prev = poses[:, :-1].clone()
# poses_last = poses[:, 1:].clone()
# poses_vel = torch.zeros_like(poses_last)
# assert poses_prev.shape == (B, T, 75) and poses_last.shape == (B, T, 75) and poses_vel.shape == (B, T, 75)
# poses_vel[..., :3] = poses_last[..., :3] - poses_prev[..., :3]
# rot_prev = axis_angle_to_matrix(poses_prev[..., 3:].reshape(B * T * J_, 3))
# rot_last = axis_angle_to_matrix(poses_last[..., 3:].reshape(B * T * J_, 3))
# rot_vel = torch.bmm(rot_last, torch.linalg.inv(rot_prev))
# assert rot_vel.shape == (B * T * J_, 3, 3)
# poses_vel[..., 3:] = matrix_to_axis_angle(rot_vel).view(B, T, J_ * 3)
# poses_vel_feat = self.compute_poses_feat(poses_vel[..., 6:].reshape(B * T, 69))
# assert poses_vel_feat.shape == (B * T, N, J, 4)
# poses_vel_feat = torch.cat([poses_vel_feat.view(B * T, N, J * 4), poses_vel[..., :6].reshape(B * T, 1, 6).expand(B * T, N, 6)], dim=-1)
# assert poses_vel_feat.shape == (B * T, N, J * 4 + 6)
# poses_vel_feat_uv = self.geo_fn.to_uv(poses_vel_feat)
# assert poses_vel_feat_uv.shape == (B * T, J * 4 + 6, H, W)
# poses_vel_feat_uv = self.dynamics_net.local_poses_vel_conv_block(poses_vel_feat_uv).view(B, T * 32, H, W)
# poses_vel_feat_uv = self.dynamics_net.temp_poses_vel_conv_block(poses_vel_feat_uv)
# assert poses_vel_feat_uv.shape == (B, 32, H, W)
pose_vel = self.pose_temp_deri(poses)
assert pose_vel.shape == (B, T, 75)
pose_vel = pose_vel.view(B * T, 75)
# pose_acc = self.pose_temp_deri(pose_vel)
# pose_vel = pose_vel[:, -1]
# pose_acc = pose_acc[:, -1]
# pose_vel
pose_vel_feat = self.compute_poses_feat(pose_vel[:, 6:].clone())
assert pose_vel_feat.shape == (B * T, N, J, 4)
pose_vel_feat = torch.cat([pose_vel_feat.view(B * T, N, J * 4), pose_vel[:, None, :6].expand(B * T, N, 6)], dim=-1)
assert pose_vel_feat.shape == (B * T, N, J * 4 + 6)
pose_vel_feat_uv = self.geo_fn.to_uv(pose_vel_feat)
assert pose_vel_feat_uv.shape == (B * T, J * 4 + 6, H, W)
pose_vel_feat_uv = self.dynamics_net.local_pose_vel_conv_block(pose_vel_feat_uv)
assert pose_vel_feat_uv.shape == (B * T, 32, H, W)
pose_vel_feat_uv = pose_vel_feat_uv.view(B, T * 32, H, W)
# # pose_acc
# pose_acc_feat = self.compute_poses_feat(pose_acc[:, 6:].clone())
# assert pose_acc_feat.shape == (B, N, J, 4)
# pose_acc_feat = torch.cat([pose_acc_feat.view(B, N, J * 4), pose_acc[:, None, :6].expand(B, N, 6)], dim=-1)
# assert pose_acc_feat.shape == (B, N, J * 4 + 6)
# pose_acc_feat_uv = self.geo_fn.to_uv(pose_acc_feat)
# assert pose_acc_feat_uv.shape == (B, J * 4 + 6, H, W)
# pose_acc_feat_uv = self.dynamics_net.local_pose_acc_conv_block(pose_acc_feat_uv)
# assert pose_acc_feat_uv.shape == (B, 32, H, W)
# Compute pose_feat
pose_feat = self.compute_poses_feat(poses[:, -1, 6:].clone())
assert pose_feat.shape == (B, N, J, 4)
pose_feat_uv = self.geo_fn.to_uv(pose_feat.view(B, N, J * 4))
assert pose_feat_uv.shape == (B, J * 4, H, W)
pose_feat_uv = self.dynamics_net.local_pose_conv_block(pose_feat_uv)
assert pose_feat_uv.shape == (B, 32, H, W)
# Compute obsdf_feat_uv
obsdf_delta = obsdf[:, 1:] - obsdf[:, :-1]
assert obsdf_delta.shape == (B, T - 1, N, 1)
# np.save('/mnt/ImpDyn_ws/logs/tmp/obsdf0.npy', obsdf.detach().cpu().numpy())
# print(obsdf_delta.min(), obsdf_delta.max())
# plt.hist(obsdf_delta.detach().view(-1).cpu().numpy())
# plt.savefig('/mnt/ImpDyn_ws/logs/tmp/obsdf_delta.jpg')
obsdf_delta = self.normalize_sd_delta(obsdf_delta)
# print(obsdf_delta.min(), obsdf_delta.max())
# plt.hist(obsdf_delta.detach().view(-1).cpu().numpy())
# plt.savefig('/mnt/ImpDyn_ws/logs/tmp/obsdf_delta_nc.jpg')
# input('pause')
obsdf_delta = obsdf_delta.permute(0, 2, 1, 3).contiguous()
assert obsdf_delta.shape == (B, N, T - 1, 1)
obsdf_feat = torch.cat([obsdf_delta.view(B, N, T - 1), obsdf[:, -1] * 20], dim=-1)
assert obsdf_feat.shape == (B, N, T)
obsdf_feat_uv = self.geo_fn.to_uv(obsdf_feat)
assert obsdf_feat_uv.shape == (B, T, H, W)
# obsdf_feat = obsdf.permute(0, 2, 1, 3).contiguous() * 20
# assert obsdf_feat.shape == (B, N, T, 1)
# obsdf_feat_uv = self.geo_fn.to_uv(obsdf_feat.view(B, N, T)[:, :, -n_H:].contiguous())
# assert obsdf_feat_uv.shape == (B, n_H, H, W)
# Unet forward
feat_uv = torch.cat([pose_vel_feat_uv, pose_feat_uv, obsdf_feat_uv], dim=1)
# shapes_uv_delta = self.dynamics_net.unet(feat_uv)
# _, shapes_uv_prev = self.enc_shapes_to_sdf(obsdf[:, -1:], poses[:, -2:-1])
# shapes_uv = shapes_uv_prev[:, 0] + shapes_uv_delta
shapes_uv = self.dynamics_net.unet(feat_uv)
C = shapes_uv.shape[1]
# feat_uv_ = shapes_uv * (1 - self.head_hands_feet_mask_uv) + self.shape_enc_dec.uv_bias * self.head_hands_feet_mask_uv
# if self.args['model']['use_detail']:
# feat = self.detail_dec.unet(feat_uv_)
# feat_uv_ = torch.cat([feat_uv_, feat], dim=1)
# C = feat_uv_.shape[1]
shapes = self.geo_fn.from_uv(shapes_uv)
assert shapes.shape == (B, N, C)
return shapes, shapes_uv
class ShapeEncDec(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.sdf_mlp = MLP([128, 128, 1], [-1], 'softplus', True, 'linear', False)
self.pts_emb = Embedder(2, 4)
# self.proj_pts_mlp = MLP([64 + self.pts_emb.out_ch, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.pts_mlp = MLP([64 + self.pts_emb.out_ch, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.weights_fc = nn.Linear(128, 1)
# self.shape_enc = ShapeEnc(args)
# self.register_parameter('uv_bias', nn.Parameter(torch.normal(0, 0.01, (1, 64, 256, 256), dtype=torch.float), requires_grad=True))
class DynamicsNet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.local_pose_conv_block = ConvBlock(92, 32, args['model']['uv_size'], kernel_size=1, padding=0)
# self.local_poses_vel_conv_block = ConvBlock(98, 32, args['model']['uv_size'], kernel_size=1, padding=0)
# self.temp_poses_vel_conv_block = ConvBlock(32 * args['model']['n_hist_frames'], 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.local_pose_vel_conv_block = ConvBlock(98, 32, args['model']['uv_size'], kernel_size=1, padding=0)
# self.local_pose_acc_conv_block = ConvBlock(98, 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.unet = Unet(args)
class DetailDec(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.sdf_mlp = MLP([128, 128, 1], [-1], 'softplus', True, 'linear', False, init_zero_last=True)
self.pts_emb = Embedder(3, 6)
self.proj_pts_mlp = MLP([64 + self.pts_emb.out_ch, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.pts_mlp = MLP([64 + self.pts_emb.out_ch * 2, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.weights_fc = nn.Linear(128, 1)
self.unet = DetailUnet(args)
class ShapeEnc(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.conv_in = ConvBlock(7, 64, 256)
self.conv0 = ConvDownBlock(64, 64, 256)
self.conv1 = ConvDownBlock(64, 64, 128)
self.conv2 = ConvUpBlock(64, 64, 128)
self.conv3 = ConvUpBlock(64, 64, 256)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, padding=0),
nn.Tanh()
)
def forward(self, x):
x = self.conv_in(x)
x0 = self.conv0(x)
x1 = self.conv1(x0)
x2 = self.conv2(x1) + x0
x3 = self.conv3(x2) + x
out = self.conv_out(x3)
return out
class Unet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
# self.conv_in = ConvBlock(args['model']['n_hist_frames'] + 64, 64, 256)
self.conv_in = ConvBlock(args['model']['n_hist_frames'] + 32 * args['model']['n_batch_frames'], 64, 256)
self.conv_down0 = ConvDownBlock(64, 128, 256)
self.conv_down1 = ConvDownBlock(128, 256, 128)
self.conv_down2 = ConvDownBlock(256, 256, 64)
self.conv_down3 = ConvDownBlock(256, 256, 32)
self.conv_up3 = ConvUpBlock(256, 256, 32)
self.conv_up2 = ConvUpBlock(256, 256, 64)
self.conv_up1 = ConvUpBlock(256, 128, 128)
self.conv_up0 = ConvUpBlock(128, 64, 256)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, padding=0),
nn.Tanh()
)
torch.nn.init.zeros_(self.conv_out[0].weight)
if hasattr(self.conv_out[0], 'bias') and self.conv_out[0].bias is not None:
torch.nn.init.zeros_(self.conv_out[0].bias)
def forward(self, x):
x = self.conv_in(x)
x0 = self.conv_down0(x)
x1 = self.conv_down1(x0)
x2 = self.conv_down2(x1)
x3 = self.conv_down3(x2)
y3 = self.conv_up3(x3) + x2
y2 = self.conv_up2(y3) + x1
y1 = self.conv_up1(y2) + x0
y0 = self.conv_up0(y1) + x
out = self.conv_out(y0)
return out
class DetailUnet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.conv0 = ConvDownBlock(64, 64, 256)
self.conv1 = ConvDownBlock(64, 64, 128)
self.conv2 = ConvUpBlock(64, 64, 128)
self.conv3 = ConvUpBlock(64, 64, 256)
def forward(self, x):
x0 = self.conv0(x)
x1 = self.conv1(x0)
x2 = self.conv2(x1) + x0
x3 = self.conv3(x2) + x
return x3
|
AutoAvatar-main
|
models/PosedDecKNN_dPoses_dHs/nets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import torch
from torch.utils.data import DataLoader
import pickle
import datetime
import shutil
import glob
import random
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from utils.configs import *
from utils.io import *
import utils.CAPE as cape_utils
import utils.DFaust as dfaust_utils
from data.DFaust_dataset import DFaustDataset
from models.PosedDecKNN_dPoses_dHs.trainbox import Implicit_Trainbox
np.random.rand(777)
torch.random.manual_seed(777)
torch.cuda.manual_seed_all(777)
random.seed(777)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
from argparse import ArgumentParser
parser = ArgumentParser(description='Test AutoAvatar.')
parser.add_argument('--ws_dir', required=True, help='path of work space directory')
parser.add_argument('--ckpt_dir', required=True, help='path of checkpoint directory')
parser.add_argument('--ckpt_itr', default=7500, type=int)
parser.add_argument('--gpu_id', default=0, type=int)
parser.add_argument('--resolution', default=256, type=int, help='marching cube resolution')
parser.add_argument('--data_mode', default='extrap', type=str, help='test which type of data. choose from ["extrap", "interp"]')
cmd_args = parser.parse_args()
gpu_id = cmd_args.gpu_id
resolution = cmd_args.resolution
data_mode = cmd_args.data_mode
eval_frames = [3] #list(range(3, 99999, 20)) #
ckpt_dir = cmd_args.ckpt_dir
#'/mnt/ImpDyn_ws/logs/Feb27_00-22-01_04s_50002_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/May26_20-56-44_04s_50002_v2_PosedDecKNN_dPoses_dHs_HalfSub_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/Mar03_10-44-09_04s_50004_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout8'
#'/mnt/ImpDyn_ws/logs/Mar03_10-42-05_04s_50002_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout8'
#'/mnt/ImpDyn_ws/logs/Feb27_20-50-42_04s_50004_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/Feb27_09-57-21_04s_50004_v2_PosedDecKNN_dPoses_dHs_AutoRegr'#Feb27_20-50-42_04s_50004_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/Feb23_12-43-09_04s_50002_v2_PosedDecKNN_Dyna_dHs_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/Feb19_01-26-38_04s_50002_NA_PosedDecKNN_Dyna_Hs_AutoRegr'
ckpt_itr = cmd_args.ckpt_itr
#90000#7500
#105000
configs_path = glob.glob(os.path.join(ckpt_dir, 'net_def', '*.yaml'))[0]
args = load_configs(configs_path, cmd_args.ws_dir)
dev_tag = '72s'
subject_tag = args['data']['subject'] + '_' + args['data']['cloth_type']
current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
log_name = current_time + '_' + dev_tag + '_' + subject_tag + '_' + args['name'] + '_' + args['version'] + '_' + data_mode
args['log_dir'] = os.path.join(cmd_args.ws_dir, 'logs_test')
args['train']['n_rollout'] = 32
with open(args['data']['interp_bin_path'], 'rb') as f:
interp_list = pickle.load(f)
with open(args['data']['extrap_bin_path'], 'rb') as f:
extrap_list = pickle.load(f)
if data_mode == 'extrap':
seqs_list = extrap_list
elif data_mode == 'interp':
seqs_list = interp_list
elif data_mode == 'train':
seqs_list = [4]
if not os.path.exists(os.path.join(args['log_dir'], log_name)):
os.makedirs(os.path.join(args['log_dir'], log_name))
for seq_idx in seqs_list:
log_dir = os.path.join(args['log_dir'], log_name, 'seq_%03d' % seq_idx)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
dfaust_json = dfaust_utils.DFaustJson(args['data']['bin_path'])
validset = DFaustDataset(args, dfaust_json, [seq_idx], eval_frames=eval_frames)
valid_loader = DataLoader(validset, batch_size=1, shuffle=False, num_workers=4)
logger = TensorBoardLogger(log_dir, name='')
trainbox = Implicit_Trainbox(args, log_dir, resolution, eval_frames=eval_frames)
if ckpt_dir is not None:
trainbox.load_ckpt(ckpt_itr, ckpt_dir)
shutil.copy(os.path.realpath(__file__), os.path.join(log_dir, 'net_def'))
shutil.copy(configs_path, os.path.join(log_dir, 'net_def'))
train_params = {
'max_steps': 10,
'gpus': [gpu_id],
'logger': logger,
'max_epochs': 200000,
'log_every_n_steps': 50,
}
if 'check_val_every_n_epoch' in args['train']:
train_params['check_val_every_n_epoch'] = args['train']['check_val_every_n_epoch']
else:
train_params['val_check_interval'] = args['train']['ckpt_step']
trainer = Trainer(**train_params)
trainer.test(trainbox, valid_loader)
|
AutoAvatar-main
|
exps/PosedDecKNN_dPoses_dHs/implicit_eval_dfaust.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import torch
from torch.utils.data import DataLoader
import pickle
import datetime
import shutil
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from utils.configs import *
from utils.io import *
import utils.CAPE as cape_utils
import utils.DFaust as dfaust_utils
from data.DFaust_dataset import DFaustDataset
from models.PosedDecKNN_dPoses_dHs.trainbox import Implicit_Trainbox
np.random.rand(777)
torch.random.manual_seed(777)
def train(configs_path, args, log_name, gpu_id, resolution, max_steps, ckpt_dir, ckpt_itr, ShapeEncDec_ckpt_dir, ShapeEncDec_ckpt_itr, coarse_ckpt_dir=None, coarse_ckpt_itr=None):
dfaust_json = dfaust_utils.DFaustJson(args['data']['bin_path'])
with open(args['data']['train_bin_path'], 'rb') as f:
train_list = pickle.load(f)
with open(args['data']['interp_bin_path'], 'rb') as f:
interp_list = pickle.load(f)
with open(args['data']['extrap_bin_path'], 'rb') as f:
extrap_list = pickle.load(f)
trainset = DFaustDataset(args, dfaust_json, train_list)
validset = DFaustDataset(args, dfaust_json, extrap_list + interp_list, gap=10)
train_loader = DataLoader(trainset, batch_size=1, shuffle=True, num_workers=8)
valid_loader = DataLoader(validset, batch_size=1, shuffle=False, num_workers=4)
logger = TensorBoardLogger(args['log_dir'], name=log_name)
log_dir = os.path.join(args['log_dir'], log_name)
trainbox = Implicit_Trainbox(args, log_dir, resolution)
if ckpt_dir is not None:
trainbox.load_ckpt(ckpt_itr, ckpt_dir)
if ShapeEncDec_ckpt_dir is not None:
load_components(trainbox.dyn_net, ShapeEncDec_ckpt_dir, ShapeEncDec_ckpt_itr, 'shape_enc_dec')
if coarse_ckpt_dir is not None:
load_components(trainbox.dyn_net, coarse_ckpt_dir, coarse_ckpt_itr, 'shape_enc_dec')
load_components(trainbox.dyn_net, coarse_ckpt_dir, coarse_ckpt_itr, 'dynamics_net')
shutil.copy(os.path.realpath(__file__), os.path.join(log_dir, 'net_def'))
shutil.copy(configs_path, os.path.join(log_dir, 'net_def'))
train_params = {
'max_steps': max_steps,
'gpus': [gpu_id],
'logger': logger,
'max_epochs': 200000,
'log_every_n_steps': 50,
}
if 'check_val_every_n_epoch' in args['train']:
train_params['check_val_every_n_epoch'] = args['train']['check_val_every_n_epoch']
else:
train_params['val_check_interval'] = args['train']['ckpt_step']
trainer = Trainer(**train_params)
trainer.fit(trainbox, train_loader, valid_loader)
from argparse import ArgumentParser
parser = ArgumentParser(description='Train AutoAvatar.')
parser.add_argument('--ws_dir', required=True, help='path of work space directory')
parser.add_argument('--configs_path', required=True, help='path of configs file')
parser.add_argument('--configs_path_rollout', required=True, help='path of configs file')
parser.add_argument('--gpu_id', default=0, type=int)
parser.add_argument('--resolution', default=128, type=int, help='marching cube resolution')
parser.add_argument('--max_steps', default=90000, type=int, help='max training steps')
parser.add_argument('--max_steps_rollout', default=7500, type=int, help='max training steps')
cmd_args = parser.parse_args()
ShapeEncDec_ckpt_dir = None
ShapeEncDec_ckpt_itr = None
gpu_id = cmd_args.gpu_id
resolution = cmd_args.resolution
max_steps = cmd_args.max_steps
configs_path = cmd_args.configs_path
args = load_configs(configs_path, cmd_args.ws_dir)
dev_tag = '04s'
subject_tag = args['data']['subject'] + '_' + args['data']['cloth_type']
current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
log_name = current_time + '_' + dev_tag + '_' + subject_tag + '_' + args['name'] + '_' + args['version']
log_dir = os.path.join(args['log_dir'], log_name)
train(configs_path, args, log_name, gpu_id, resolution, max_steps + 5, None, None, ShapeEncDec_ckpt_dir, ShapeEncDec_ckpt_itr)
ckpt_dir = log_dir
ckpt_itr = max_steps
gpu_id = cmd_args.gpu_id
resolution = cmd_args.resolution
max_steps = cmd_args.max_steps_rollout
configs_path = cmd_args.configs_path_rollout
args = load_configs(configs_path, cmd_args.ws_dir)
dev_tag = '04s'
subject_tag = args['data']['subject'] + '_' + args['data']['cloth_type']
current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
log_name = current_time + '_' + dev_tag + '_' + subject_tag + '_' + args['name'] + '_' + args['version']
log_dir = os.path.join(args['log_dir'], log_name)
train(configs_path, args, log_name, gpu_id, resolution, max_steps + 5, ckpt_dir, ckpt_itr, None, None)
|
AutoAvatar-main
|
exps/PosedDecKNN_dPoses_dHs/implicit_train_dfaust.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import torch
from torch.utils.data import Dataset
from pytorch3d.io import load_ply
import utils.CAPE as cape_utils
class CAPEDataset(Dataset):
def __init__(self, args, cape_json, seq_list, skip=1, gap=1, eval_frames=None) -> None:
super().__init__()
self.args = copy.deepcopy(args)
self.cape_json = cape_json
self.seq_list = seq_list
self.skip = skip
self.gap = gap
self.eval_frames = eval_frames
self.n_frames = args['model']['n_batch_frames']
self.n_rollout = args['train']['n_rollout']
self.raw_dataset_dir = args['data']['raw_dataset_dir']
self.dataset_dir = args['data']['dataset_dir']
self.smooth_tag = args['data']['smooth_tag']
self.faces = torch.from_numpy(np.load(os.path.join(args['data']['raw_dataset_dir'], 'misc', 'smpl_tris.npy')).astype(np.int32)).long()
self.samples = [[], []]
for seq_idx in seq_list:
seq = cape_json.data['seqs'][seq_idx]
seq_len = len(seq['frames'])
if eval_frames is None:
frame_idxs = list(range(0, seq_len - (self.n_frames - 2 + self.n_rollout) * skip, gap))
else:
frame_idxs = list(range(0, seq_len - self.n_frames + 1, 1))
self.samples[0] += [seq_idx] * len(frame_idxs)
self.samples[1] += frame_idxs
assert len(self.samples[0]) == len(self.samples[1])
def __len__(self):
return len(self.samples[0])
def __getitem__(self, index):
seq_idx, frame_idx = self.samples[0][index], self.samples[1][index]
end_idx = frame_idx + (self.n_frames + self.n_rollout - 1) * self.skip
if self.eval_frames is not None:
if frame_idx + self.args['model']['n_hist_frames'] == self.eval_frames[0]:
end_idx = len(self.cape_json.data['seqs'][seq_idx]['frames'])
else:
end_idx = min(end_idx, len(self.cape_json.data['seqs'][seq_idx]['frames']))
verts_list = []
faces_list = []
poses_list = []
verts_smt_list = []
faces_smt_list = []
for i in range(frame_idx, end_idx, self.skip):
frame = self.cape_json.data['seqs'][seq_idx]['frames'][i]
npz_path = os.path.join(self.raw_dataset_dir, frame['npz_path'])
data = np.load(npz_path)
verts, rot, transl = data['v_posed'], data['pose'], data['transl']
poses = np.concatenate([transl, rot], axis=0)
assert poses.shape == (75,)
ply_path = os.path.join(self.dataset_dir, self.smooth_tag, self.cape_json.data['subject'],
self.cape_json.data['seqs'][seq_idx]['seq_name'], npz_path.split('/')[-1][:-4] + '_smt.ply')
verts_smt, faces_smt = load_ply(ply_path)
verts_list.append(torch.from_numpy(verts).float())
faces_list.append(self.faces.clone())
poses_list.append(torch.from_numpy(poses).float())
verts_smt_list.append(verts_smt)
faces_smt_list.append(faces_smt)
poses_list = torch.stack(poses_list, dim=0)
return {'verts_detail': verts_list, 'faces_detail': faces_list, 'verts_smt': verts_smt_list, 'faces_smt': faces_smt_list, 'poses': poses_list}
|
AutoAvatar-main
|
data/CAPE_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import h5py
import sys
import os
import copy
import pickle
import yaml
import smplx
import open3d as o3d
from tqdm import tqdm
from pytorch3d.io import save_ply, load_obj, load_ply
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.transforms.rotation_conversions import matrix_to_axis_angle
from pytorch3d.ops import knn_points
from human_body_prior.tools.omni_tools import copy2cpu as c2c
from human_body_prior.body_model.body_model import BodyModel
import utils.DFaust as dfaust_utils
import utils.CAPE as cape_utils
from utils.configs import *
def generate_DFaust_SMPLH(data_dir, smpl_dir, out_dir, subject, subject_gender, gpu_id=0):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(os.path.join(out_dir, subject)):
os.mkdir(os.path.join(out_dir, subject))
bm_fname = os.path.join(smpl_dir, 'smplh/%s/model.npz' % subject_gender)
dmpl_fname = os.path.join(smpl_dir, 'dmpls/%s/model.npz' % subject_gender)
num_betas = 16 # number of body parameters
num_dmpls = 8 # number of DMPL parameters
bm = BodyModel(bm_fname=bm_fname, num_betas=num_betas, num_dmpls=num_dmpls, dmpl_fname=dmpl_fname)#.cuda(gpu_id)
faces = c2c(bm.f)
npz_files = sorted(os.listdir(os.path.join(data_dir, 'DFaust_67', subject)))
for npz_file in npz_files:
if '_poses' not in npz_file:
continue
if not os.path.exists(os.path.join(out_dir, subject, npz_file[:-4])):
os.mkdir(os.path.join(out_dir, subject, npz_file[:-4]))
bdata = np.load(os.path.join(data_dir, 'DFaust_67', subject, npz_file))
time_length = len(bdata['trans'])
body_parms = {
'root_orient': torch.Tensor(bdata['poses'][:, :3]),#.cuda(gpu_id), # controls the global root orientation
'pose_body': torch.Tensor(bdata['poses'][:, 3:66]),#.cuda(gpu_id), # controls the body
'pose_hand': torch.Tensor(bdata['poses'][:, 66:]),#.cuda(gpu_id), # controls the finger articulation
'trans': torch.Tensor(bdata['trans']),#.cuda(gpu_id), # controls the global body position
'betas': torch.Tensor(np.repeat(bdata['betas'][:num_betas][np.newaxis], repeats=time_length, axis=0)),#.cuda(gpu_id), # controls the body shape. Body shape is static
'dmpls': torch.Tensor(bdata['dmpls'][:, :num_dmpls]),#.cuda(gpu_id) # controls soft tissue dynamics
}
body_pose_beta = bm(**{k:v for k,v in body_parms.items() if k in ['pose_body', 'betas', 'root_orient', 'trans']})
for i in tqdm(range(time_length)):
verts = c2c(body_pose_beta.v[i])
verts = torch.from_numpy(verts)
verts_ = verts.clone()
verts[:, 1] = verts_[:, 2]
verts[:, 2] = -verts_[:, 1]
save_ply(os.path.join(out_dir, subject, npz_file[:-4], '%06d.ply' % i), verts, torch.from_numpy(faces))
def smplh_to_smpl(data_dir, subject, smpl_model_path, gpu_id=0):
if not os.path.exists(os.path.join(data_dir, 'smpl_poses')):
os.mkdir(os.path.join(data_dir, 'smpl_poses'))
if not os.path.exists(os.path.join(data_dir, 'smpl_poses', subject)):
os.mkdir(os.path.join(data_dir, 'smpl_poses', subject))
with open('data/smplh2smpl.yaml', 'r') as f:
default_configs = yaml.load(f, Loader=yaml.FullLoader)
seqs = sorted(os.listdir(os.path.join(data_dir, 'smplh_meshes', subject)))
for seq in seqs:
if not os.path.exists(os.path.join(data_dir, 'smpl_poses', subject, seq)):
os.mkdir(os.path.join(data_dir, 'smpl_poses', subject, seq))
configs = copy.deepcopy(default_configs)
configs['body_model']['folder'] = smpl_model_path
configs['datasets']['mesh_folder']['data_folder'] = os.path.join(data_dir, 'smplh_meshes', subject, seq)
configs['output_folder'] = os.path.join(data_dir, 'smpl_poses', subject, seq)
with open('tmp/configs.yaml', 'w') as f:
yaml.dump(configs, f)
os.system('cd external/smplx | python -m transfer_model --exp-cfg tmp/configs.yaml')
def DFaust_smplh_to_smpl(dataset_dir, smpl_dir, out_dir, subject, subject_gender, gpu_id=0):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(os.path.join(out_dir, subject)):
os.mkdir(os.path.join(out_dir, subject))
bm_fname = os.path.join(smpl_dir, 'smplh/%s/model.npz' % subject_gender)
dmpl_fname = os.path.join(smpl_dir, 'dmpls/%s/model.npz' % subject_gender)
num_betas = 10 # number of body parameters
num_dmpls = 8 # number of DMPL parameters
bm = BodyModel(bm_fname=bm_fname, num_betas=num_betas, num_dmpls=num_dmpls, dmpl_fname=dmpl_fname)#.cuda(gpu_id)
faces = c2c(bm.f)
npz_files = sorted(os.listdir(os.path.join(dataset_dir, 'DFaust_67', subject)))
for npz_file in npz_files:
if '_poses' not in npz_file:
continue
bdata = np.load(os.path.join(dataset_dir, 'DFaust_67', subject, npz_file))
time_length = len(bdata['trans'])
body_parms = {
'root_orient': torch.Tensor(bdata['poses'][:, :3]),#.cuda(gpu_id), # controls the global root orientation
'pose_body': torch.Tensor(bdata['poses'][:, 3:66]),#.cuda(gpu_id), # controls the body
'pose_hand': torch.Tensor(bdata['poses'][:, 66:]),#.cuda(gpu_id), # controls the finger articulation
'trans': torch.Tensor(bdata['trans']),#.cuda(gpu_id), # controls the global body position
'betas': torch.Tensor(np.repeat(bdata['betas'][:num_betas][np.newaxis], repeats=time_length, axis=0)),#.cuda(gpu_id), # controls the body shape. Body shape is static
'dmpls': torch.Tensor(bdata['dmpls'][:, :num_dmpls]),#.cuda(gpu_id) # controls soft tissue dynamics
}
body_pose_beta = bm(**{k:v for k,v in body_parms.items() if k in ['pose_body', 'betas', 'root_orient', 'trans']})
root_joints = body_pose_beta.Jtr[:, 0]
smpl_poses = torch.Tensor(bdata['poses'][:, 3:72])
global_orient = torch.Tensor(bdata['poses'][:, :3])
transls = torch.Tensor(bdata['trans'])
flip_yz_mat = torch.tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]]).float()[None].expand(time_length, 3, 3)
global_rotmat = axis_angle_to_matrix(global_orient)
global_rotmat = torch.bmm(flip_yz_mat, global_rotmat)
global_orient = matrix_to_axis_angle(global_rotmat)
root_joints_yup = torch.bmm(flip_yz_mat, root_joints[..., None])[..., 0]
root_joints_notransl = root_joints - transls
transls = root_joints_yup - root_joints_notransl
poses = torch.cat([transls, global_orient, smpl_poses], dim=-1)
assert poses.shape == (time_length, 75)
np.savez_compressed(os.path.join(out_dir, subject, npz_file), poses=poses.numpy())
# Save template
shape_data = np.load(os.path.join(dataset_dir, 'DFaust_67', subject, 'shape.npz'))
betas = torch.Tensor(shape_data['betas'][:10]).unsqueeze(0)
body_pose_beta = bm(betas=betas)
verts = c2c(body_pose_beta.v[0])
verts = torch.from_numpy(verts)
# verts_ = verts.clone()
# verts[:, 1] = verts_[:, 2]
# verts[:, 2] = -verts_[:, 1]
save_ply(os.path.join(out_dir, subject, 'v_template.ply'), verts, torch.from_numpy(faces))
def DFaust_parse_raw(dataset_dir, subject):
dfaust_json = dfaust_utils.DFaustJson()
seq_names = sorted(os.listdir(os.path.join(dataset_dir, 'scans', subject)))
seqs = []
for seq_name in seq_names:
ply_names = sorted(os.listdir(os.path.join(dataset_dir, 'scans', subject, seq_name)))
poses = np.load(os.path.join(dataset_dir, 'smpl_poses', subject, '%s_%s_poses.npz' % (subject, seq_name)))['poses']
pre_idx = None
frames = []
for i, ply_name in enumerate(ply_names):
idx = int(ply_name.split('.')[-2])
if pre_idx is not None and idx != pre_idx + 1:
seqs = dfaust_json.append_seqs(seqs, seq_name, frames)
frames = []
frames = dfaust_json.append_frames(frames, os.path.join('scans', subject, seq_name, ply_name), poses[i])
pre_idx = idx
seqs = dfaust_json.append_seqs(seqs, seq_name, frames)
dfaust_json.set_data(subject, seqs)
dfaust_json.dump_bin_file(os.path.join(dataset_dir, '%s_raw.bin' % subject))
print(dfaust_json.num_of_seqs())
print(dfaust_json.num_of_frames())
for seq in dfaust_json.data['seqs']:
print(seq['id'], seq['seq_name'])
def split_train_test(dataset_dir, tag, bin_path, subject, interp_acts, extrap_acts):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
dfaust_json_new = dfaust_utils.DFaustJson()
seqs_new = []
train_list = []
interp_list = []
extrap_list = []
for seq in dfaust_json.data['seqs']:
if seq['id'] in extrap_acts[0]:
assert seq['seq_name'] in extrap_acts[1]
seqs_new = dfaust_json_new.append_seqs(seqs_new, seq['seq_name'], copy.deepcopy(seq['frames']))
extrap_list.append(seqs_new[-1]['id'])
elif seq['id'] in interp_acts[0]:
assert seq['seq_name'] in interp_acts[1]
half_len = len(seq['frames']) // 2
seqs_new = dfaust_json_new.append_seqs(seqs_new, seq['seq_name'], copy.deepcopy(seq['frames'][:half_len]))
train_list.append(seqs_new[-1]['id'])
seqs_new = dfaust_json_new.append_seqs(seqs_new, seq['seq_name'], copy.deepcopy(seq['frames'][half_len:]))
interp_list.append(seqs_new[-1]['id'])
else:
seqs_new = dfaust_json_new.append_seqs(seqs_new, seq['seq_name'], copy.deepcopy(seq['frames']))
train_list.append(seqs_new[-1]['id'])
dfaust_json_new.set_data(subject, seqs_new)
dfaust_json_new.dump_bin_file(os.path.join(dataset_dir, '%s_%s.bin' % (subject, tag)))
print(dfaust_json_new.num_of_seqs())
print(dfaust_json_new.num_of_frames())
with open(os.path.join(dataset_dir, '%s_%s_train.bin' % (subject, tag)), 'wb') as f:
pickle.dump(train_list, f)
with open(os.path.join(dataset_dir, '%s_%s_interp.bin' % (subject, tag)), 'wb') as f:
pickle.dump(interp_list, f)
with open(os.path.join(dataset_dir, '%s_%s_extrap.bin' % (subject, tag)), 'wb') as f:
pickle.dump(extrap_list, f)
print(train_list)
print(interp_list)
print(extrap_list)
def add_transl(dataset_dir, bin_path, subject, smpl_path):
smpl_model = smplx.SMPLLayer(model_path=smpl_path)
dfaust_json = dfaust_utils.DFaustJson(bin_path)
betas = []
for seq in tqdm(dfaust_json.data['seqs']):
bdata = np.load(os.path.join(dataset_dir, 'DFaust_67', subject, '%s_%s_poses.npz' % (subject, seq['seq_name'])))
for i in range(len(seq['frames'])):
frame = seq['frames'][i]
idx = int(frame['pose_path'].split('/')[-1][:-4])
with open(os.path.join(dataset_dir, frame['pose_path']), 'rb') as f:
data = pickle.load(f)
verts_smpl_ref, _, _ = load_obj(os.path.join(dataset_dir, frame['pose_path'][:-4] + '.obj'))
body_pose = data['full_pose'][0].detach().cpu()[None, 1:]
global_orient = data['full_pose'][0].detach().cpu()[None, 0]
verts_smpl = smpl_model(betas=data['betas'].detach().cpu(), body_pose=body_pose, global_orient=global_orient).vertices[0]
transl = (verts_smpl_ref - verts_smpl).mean(dim=0)
rot = matrix_to_axis_angle(data['full_pose'][0].detach().cpu())
assert rot.shape == (24, 3)
poses = np.concatenate([transl, rot.view(72).numpy()], axis=0)
assert poses.shape == (75,)
frame['poses'] = poses
betas.append(data['betas'].detach().cpu())
betas = torch.cat(betas, dim=0).mean(dim=0)[None]
v_template = smpl_model(betas=betas).vertices[0]
save_ply(os.path.join(dataset_dir, 'smpl_poses', subject, 'v_template.ply'), v_template, smpl_model.faces_tensor)
dfaust_json.dump_bin_file(bin_path)
def simplify_scans(ws_dir, dataset_dir, bin_path, config_path):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
smpl_model = cape_utils.load_smpl(load_configs(config_path, ws_dir)).cuda()
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple')):
os.mkdir(os.path.join(dataset_dir, 'scans_simple'))
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'])):
os.mkdir(os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject']))
for seq in tqdm(dfaust_json.data['seqs']):
mesh_dir = os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'], seq['seq_name'])
if not os.path.exists(mesh_dir):
os.mkdir(mesh_dir)
for i in tqdm(range(len(seq['frames']))):
frame = seq['frames'][i]
ply_path = os.path.join(dataset_dir, frame['ply_path'])
verts, faces = load_ply(ply_path)
verts, faces = verts.cuda(), faces.cuda()
poses = torch.from_numpy(frame['poses']).float().cuda()[None]
verts_smpl = smpl_model(poses).vertices[0]
bbmin = verts_smpl.min(dim=0)[0][None] - 0.1
bbmax = verts_smpl.max(dim=0)[0][None] + 0.1
mask_min = (verts > bbmin).long().cumprod(dim=-1)[:, -1]
mask_max = (verts < bbmax).long().cumprod(dim=-1)[:, -1]
verts_mask = mask_min * mask_max
faces_mask = verts_mask[faces[:, 0]] * verts_mask[faces[:, 1]] * verts_mask[faces[:, 2]]
faces_val = faces[faces_mask.bool()]
verts_idxs_new2old = torch.arange(0, verts.shape[0]).long()[verts_mask.bool()]
verts_idxs_old2new = torch.zeros_like(verts_mask) - 1
verts_idxs_old2new[verts_idxs_new2old] = torch.arange(0, verts_idxs_new2old.shape[0]).long().cuda()
faces = verts_idxs_old2new[faces_val]
verts = verts[verts_idxs_new2old]
mesh_o3d = o3d.geometry.TriangleMesh()
mesh_o3d.vertices = o3d.utility.Vector3dVector(verts.cpu().numpy())
mesh_o3d.triangles = o3d.utility.Vector3iVector(faces.cpu().numpy())
mesh_o3d = mesh_o3d.simplify_quadric_decimation(int(faces.shape[0] * 0.075 * 0.67))
verts, faces = torch.from_numpy(np.asarray(mesh_o3d.vertices)), torch.from_numpy(np.asarray(mesh_o3d.triangles))
save_ply(os.path.join(mesh_dir, ply_path.split('/')[-1]), verts, faces)
def simplify_scans_2nd(dataset_dir, bin_path):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple_2nd')):
os.mkdir(os.path.join(dataset_dir, 'scans_simple_2nd'))
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple_2nd', dfaust_json.data['subject'])):
os.mkdir(os.path.join(dataset_dir, 'scans_simple_2nd', dfaust_json.data['subject']))
for seq in tqdm(dfaust_json.data['seqs']):
mesh_dir = os.path.join(dataset_dir, 'scans_simple_2nd', dfaust_json.data['subject'], seq['seq_name'])
if not os.path.exists(mesh_dir):
os.mkdir(mesh_dir)
for i in tqdm(range(len(seq['frames']))):
frame = seq['frames'][i]
ply_path = os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'], seq['seq_name'], frame['ply_path'].split('/')[-1])
verts, faces = load_ply(ply_path)
mesh_o3d = o3d.geometry.TriangleMesh()
mesh_o3d.vertices = o3d.utility.Vector3dVector(verts.cpu().numpy())
mesh_o3d.triangles = o3d.utility.Vector3iVector(faces.cpu().numpy())
mesh_o3d = mesh_o3d.simplify_quadric_decimation(int(faces.shape[0] * 0.5))
verts, faces = torch.from_numpy(np.asarray(mesh_o3d.vertices)), torch.from_numpy(np.asarray(mesh_o3d.triangles))
save_ply(os.path.join(mesh_dir, ply_path.split('/')[-1]), verts, faces)
def filter_outlier_verts(ws_dir, dataset_dir, bin_path, config_path):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
smpl_model = cape_utils.load_smpl(load_configs(config_path, ws_dir)).cuda()
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple')):
os.mkdir(os.path.join(dataset_dir, 'scans_simple'))
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'])):
os.mkdir(os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject']))
for seq in tqdm(dfaust_json.data['seqs']):
mesh_dir = os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'], seq['seq_name'])
if not os.path.exists(mesh_dir):
os.mkdir(mesh_dir)
for i in tqdm(range(len(seq['frames']))):
frame = seq['frames'][i]
ply_path = os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'], seq['seq_name'], frame['ply_path'].split('/')[-1])
verts, faces = load_ply(ply_path)
verts, faces = verts.cuda(), faces.cuda()
poses = torch.from_numpy(frame['poses']).float().cuda()[None]
verts_smpl = smpl_model(poses).vertices[0]
dst, _, _ = knn_points(verts[None], verts_smpl[None], K=1, return_nn=True)
verts_mask = (dst.sqrt() < 0.1)[0, ..., 0]
if (~verts_mask).sum().item() > 0:
verts_mask = verts_mask.long()
faces_mask = verts_mask[faces[:, 0]] * verts_mask[faces[:, 1]] * verts_mask[faces[:, 2]]
faces_val = faces[faces_mask.bool()]
verts_idxs_new2old = torch.arange(0, verts.shape[0]).long()[verts_mask.bool()]
verts_idxs_old2new = torch.zeros_like(verts_mask) - 1
verts_idxs_old2new[verts_idxs_new2old] = torch.arange(0, verts_idxs_new2old.shape[0]).long().cuda()
faces = verts_idxs_old2new[faces_val]
verts = verts[verts_idxs_new2old]
save_ply(os.path.join(mesh_dir, ply_path.split('/')[-1]), verts, faces)
def save_registered_mesh(dataset_dir, subject, h5py_path):
if not os.path.exists(os.path.join(dataset_dir, 'reg_meshes')):
os.mkdir(os.path.join(dataset_dir, 'reg_meshes'))
if not os.path.exists(os.path.join(dataset_dir, 'reg_meshes', subject)):
os.mkdir(os.path.join(dataset_dir, 'reg_meshes', subject))
seq_names = sorted(os.listdir(os.path.join(dataset_dir, 'scans', subject)))
for seq_name in tqdm(seq_names):
ply_names = sorted(os.listdir(os.path.join(dataset_dir, 'scans', subject, seq_name)))
mesh_dir = os.path.join(dataset_dir, 'reg_meshes', subject, seq_name)
if not os.path.exists(mesh_dir):
os.mkdir(mesh_dir)
sidseq = subject + '_' + seq_name
with h5py.File(h5py_path, 'r') as f:
if sidseq not in f:
print('Sequence %s from subject %s not in %s' % (seq_name, subject, h5py_path))
f.close()
sys.exit(1)
verts_seq = np.array(f[sidseq]).astype(np.float32).transpose([2, 0, 1])
faces = np.array(f['faces']).astype(np.float32)
for i, ply_name in tqdm(enumerate(ply_names)):
verts = verts_seq[i]
save_ply(os.path.join(mesh_dir, ply_name), torch.from_numpy(verts), torch.from_numpy(faces))
def add_idx(bin_path):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
count = 0
for seq in tqdm(dfaust_json.data['seqs']):
for i in tqdm(range(len(seq['frames']))):
frame = seq['frames'][i]
frame['z_id'] = count
count += 1
dfaust_json.dump_bin_file(bin_path)
print(count)
if __name__ == '__main__':
# """
# generate_DFaust_SMPLH('/mnt/ImpDyn_ws/DFaust',
# '/mnt/ImpDyn_ws/SMPL',
# '/mnt/ImpDyn_ws/DFaust/smplh_meshes',
# '50002', 'male', gpu_id=0)
# """
# """
# smplh_to_smpl('/mnt/ImpDyn_ws/DFaust',
# '50002',
# '/mnt/ImpDyn_ws/SMPL/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')
# """
# """
# DFaust_parse_raw('/mnt/ImpDyn_ws/DFaust', '50002')
# """
# """
# # 50002: interp (1st half train): seq_000_chicken_wings, seq_014_running_on_spot; extrap: seq_004_jumping_jacks, seq_015_shake_arms
# split_train_test('/mnt/ImpDyn_ws/DFaust', 'v1', '/mnt/ImpDyn_ws/DFaust/50002_raw.bin', '50002',
# ([0, 14], ['chicken_wings', 'running_on_spot']), ([4, 15], ['jumping_jacks', 'shake_arms']))
# """
# """
# add_transl('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', '50002',
# '/mnt/ImpDyn_ws/SMPL/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')
# """
# """
# simplify_scans('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# simplify_scans_2nd('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# """
# """
# filter_outlier_verts('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# """
# """
# save_registered_mesh('/mnt/ImpDyn_ws/DFaust', '50002', '/mnt/ImpDyn_ws/DFaust/registrations_m.hdf5')
# """
# """
# add_idx('/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# """
# # New process ---------------------
# DFaust_smplh_to_smpl('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/SMPL', '/mnt/ImpDyn_ws/DFaust/smpl_poses', '50002', 'male', gpu_id=0)
# DFaust_parse_raw('/mnt/ImpDyn_ws/DFaust', '50002')
# # 50002: interp (1st half train): seq_000_chicken_wings, seq_014_running_on_spot; extrap: seq_009_one_leg_jump, seq_010_one_leg_jump
# split_train_test('/mnt/ImpDyn_ws/DFaust', 'v2', '/mnt/ImpDyn_ws/DFaust/50002_raw.bin', '50002',
# ([0, 14], ['chicken_wings', 'running_on_spot']), ([9, 10], ['one_leg_jump', 'one_leg_jump']))
# """
# simplify_scans('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# simplify_scans_2nd('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# filter_outlier_verts('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# save_registered_mesh('/mnt/ImpDyn_ws/DFaust', '50002', '/mnt/ImpDyn_ws/DFaust/registrations_m.hdf5')
# """
# add_idx('/mnt/ImpDyn_ws/DFaust/50002_v2.bin')
# # ---------------------------------
# """
# generate_DFaust_SMPLH('/mnt/ImpDyn_ws/DFaust',
# '/mnt/ImpDyn_ws/SMPL',
# '/mnt/ImpDyn_ws/DFaust/smplh_meshes',
# '50004', 'female', gpu_id=0)
# """
# """
# smplh_to_smpl('/mnt/ImpDyn_ws/DFaust',
# '50004',
# '/mnt/ImpDyn_ws/SMPL/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
# """
# """
# DFaust_parse_raw('/mnt/ImpDyn_ws/DFaust', '50004')
# """
# """
# # 50004: interp (2nd half train): seq_000_chicken_wings, seq_014_running_on_spot; extrap: seq_004_jumping_jacks, seq_015_shake_arms
# split_train_test('/mnt/ImpDyn_ws/DFaust', 'v1', '/mnt/ImpDyn_ws/DFaust/50004_raw.bin', '50004',
# ([0, 18], ['chicken_wings', 'running_on_spot']), ([3, 19], ['jumping_jacks', 'shake_arms']))
# """
# """
# add_transl('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50004_v1.bin', '50004',
# '/mnt/ImpDyn_ws/SMPL/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
# """
# """
# simplify_scans('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50004_v1.bin', 'configs/DispInput/DFaust_50004/AutoRegr.yaml')
# # simplify_scans_2nd('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# """
# # New process ---------------------
# DFaust_smplh_to_smpl('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/SMPL', '/mnt/ImpDyn_ws/DFaust/smpl_poses', '50004', 'female', gpu_id=0)
# DFaust_parse_raw('/mnt/ImpDyn_ws/DFaust', '50004')
# # 50004: interp (1st half train): seq_000_chicken_wings, seq_018_running_on_spot; extrap: seq_016_one_leg_loose
# split_train_test('/mnt/ImpDyn_ws/DFaust', 'v2', '/mnt/ImpDyn_ws/DFaust/50004_raw.bin', '50004',
# ([0, 18], ['chicken_wings', 'running_on_spot']), ([16], ['one_leg_loose']))
# """
# simplify_scans('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# simplify_scans_2nd('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# """
# filter_outlier_verts('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50004_v2.bin', 'configs/DispInput/DFaust_50004/AutoRegr.yaml')
# """
# save_registered_mesh('/mnt/ImpDyn_ws/DFaust', '50004', '/mnt/ImpDyn_ws/DFaust/registrations_f.hdf5')
# """
# add_idx('/mnt/ImpDyn_ws/DFaust/50004_v2.bin')
from argparse import ArgumentParser
parser = ArgumentParser(description='Process DFaust data.')
parser.add_argument('--ws_dir', required=True, help='path of work space directory')
args = parser.parse_args()
# New process ---------------------
DFaust_smplh_to_smpl(
os.path.join(args.ws_dir, 'DFaust'),
os.path.join(args.ws_dir, 'SMPL'),
os.path.join(args.ws_dir, 'DFaust', 'smpl_poses'),
'50002', 'male', gpu_id=0
)
DFaust_parse_raw(os.path.join(args.ws_dir, 'DFaust'), '50002')
# 50002: interp (1st half train): seq_000_chicken_wings, seq_014_running_on_spot; extrap: seq_009_one_leg_jump, seq_010_one_leg_jump
split_train_test(
os.path.join(args.ws_dir, 'DFaust'),
'v2',
os.path.join(args.ws_dir, 'DFaust', '50002_raw.bin'),
'50002',
([0, 14], ['chicken_wings', 'running_on_spot']),
([9, 10], ['one_leg_jump', 'one_leg_jump'])
)
simplify_scans(
args.ws_dir,
os.path.join(args.ws_dir, 'DFaust'),
os.path.join(args.ws_dir, 'DFaust', '50002_v2.bin'),
'configs/PosedDecKNN_dPoses_dHs/AutoRegr.yaml'
)
filter_outlier_verts(
args.ws_dir,
os.path.join(args.ws_dir, 'DFaust'),
os.path.join(args.ws_dir, 'DFaust', '50002_v2.bin'),
'configs/PosedDecKNN_dPoses_dHs/AutoRegr.yaml'
)
# save_registered_mesh('/mnt/ImpDyn_ws/DFaust', '50002', '/mnt/ImpDyn_ws/DFaust/registrations_m.hdf5')
add_idx(os.path.join(args.ws_dir, 'DFaust', '50002_v2.bin'))
# ---------------------------------
|
AutoAvatar-main
|
data/DFaust_generate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import pickle
import torch
from torch.utils.data import Dataset
from pytorch3d.io import load_ply
import utils.CAPE as cape_utils
class DFaustDataset(Dataset):
def __init__(self, args, dfaust_json, seq_list, skip=1, gap=1, eval_frames=None, no_mesh=False) -> None:
super().__init__()
self.args = copy.deepcopy(args)
self.dfaust_json = dfaust_json
self.seq_list = seq_list
self.skip = skip
self.gap = gap
self.eval_frames = eval_frames
self.no_mesh = no_mesh
self.n_frames = args['model']['n_batch_frames']
self.n_rollout = args['train']['n_rollout']
self.raw_dataset_dir = args['data']['raw_dataset_dir']
self.dataset_dir = args['data']['dataset_dir']
self.smooth_tag = args['data']['smooth_tag']
# self.faces = torch.from_numpy(np.load(os.path.join(args['data']['raw_dataset_dir'], 'misc', 'smpl_tris.npy')).astype(np.int32)).long()
self.samples = [[], []]
for seq_idx in seq_list:
seq = dfaust_json.data['seqs'][seq_idx]
seq_len = len(seq['frames'])
if eval_frames is None:
frame_idxs = list(range(0, seq_len - (self.n_frames - 2 + self.n_rollout) * skip, gap))
else:
frame_idxs = list(range(0, seq_len - self.n_frames + 1, 1))
self.samples[0] += [seq_idx] * len(frame_idxs)
self.samples[1] += frame_idxs
assert len(self.samples[0]) == len(self.samples[1])
def __len__(self):
return len(self.samples[0])
def __getitem__(self, index):
seq_idx, frame_idx = self.samples[0][index], self.samples[1][index]
end_idx = frame_idx + (self.n_frames + self.n_rollout - 1) * self.skip
if self.eval_frames is not None:
if frame_idx + self.args['model']['n_hist_frames'] == self.eval_frames[0]:
end_idx = len(self.dfaust_json.data['seqs'][seq_idx]['frames'])
else:
end_idx = min(end_idx, len(self.dfaust_json.data['seqs'][seq_idx]['frames']))
verts_list = []
faces_list = []
poses_list = []
verts_smt_list = []
faces_smt_list = []
z_ids_list = []
for i in range(frame_idx, end_idx, self.skip):
frame = self.dfaust_json.data['seqs'][seq_idx]['frames'][i]
ply_path = os.path.join(self.raw_dataset_dir, frame['ply_path'])
ply_path_ = os.path.join(self.dataset_dir, 'scans_simple', self.dfaust_json.data['subject'],
self.dfaust_json.data['seqs'][seq_idx]['seq_name'], ply_path.split('/')[-1])
if not self.no_mesh:
verts, faces = load_ply(ply_path_)
else:
verts, faces = torch.zeros((0, 3), dtype=torch.float32), torch.zeros((0, 3), dtype=torch.long)
poses = frame['poses']
assert poses.shape == (75,)
if not self.args['data']['separate_detail']:
verts_smt, faces_smt = verts.clone(), faces.clone()
else:
ply_path_ = os.path.join(self.dataset_dir, self.smooth_tag, self.dfaust_json.data['subject'],
self.dfaust_json.data['seqs'][seq_idx]['seq_name'], ply_path.split('/')[-1][:-4] + '_smt.ply')
verts_smt, faces_smt = load_ply(ply_path_)
z_ids_list.append(torch.tensor(frame['z_id']).long())
verts_list.append(verts)
faces_list.append(faces)
poses_list.append(torch.from_numpy(poses).float())
verts_smt_list.append(verts_smt)
faces_smt_list.append(faces_smt)
z_ids_list = torch.stack(z_ids_list, dim=0)
poses_list = torch.stack(poses_list, dim=0)
return {'verts_detail': verts_list, 'faces_detail': faces_list, 'verts_smt': verts_smt_list, 'faces_smt': faces_smt_list, 'poses': poses_list, 'z_ids': z_ids_list}
|
AutoAvatar-main
|
data/DFaust_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pickle
import utils.CAPE as cape_utils
def CAPE_parse_raw(raw_dataset_dir, out_dir, subject, cloth_type):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
cape_json = cape_utils.CAPEJson()
seqs = []
txt_path = os.path.join(raw_dataset_dir, 'seq_lists', 'seq_list_%s.txt' % subject)
with open(txt_path, 'r') as f:
for line in f:
if cloth_type not in line:
continue
seq_name = line.strip().split()[0]
seq_dir = os.path.join(raw_dataset_dir, 'sequences', subject, seq_name)
npz_files = sorted(os.listdir(seq_dir))
pre_idx = None
frames = []
for i, npz_file in enumerate(npz_files):
idx = int(npz_file.strip().split('.')[1])
if pre_idx is not None and idx != pre_idx + 1:
seqs = cape_json.append_seqs(seqs, seq_name, frames)
frames = []
frames = cape_json.append_frames(frames, os.path.join('sequences', subject, seq_name, npz_file))
pre_idx = idx
seqs = cape_json.append_seqs(seqs, seq_name, frames)
cape_json.set_data(subject, cloth_type, seqs)
cape_json.dump_bin_file(os.path.join(out_dir, '%s_%s.bin' % (subject, cloth_type)))
print(cape_json.num_of_seqs())
print(cape_json.num_of_frames())
def split_train_test(out_dir, tag, bin_path, interp_acts, extrap_acts, test_trial):
def act_in_acts(query_act, acts):
for act in acts:
if act in query_act:
return True
return False
cape_json = cape_utils.CAPEJson(bin_path)
train_list = []
interp_list = []
extrap_list = []
for seq in cape_json.data['seqs']:
if act_in_acts(seq['seq_name'], extrap_acts):
extrap_list.append(seq['id'])
elif act_in_acts(seq['seq_name'], interp_acts):
if test_trial in seq['seq_name']:
interp_list.append(seq['id'])
else:
train_list.append(seq['id'])
else:
train_list.append(seq['id'])
with open(os.path.join(out_dir, '%s_train.bin' % tag), 'wb') as f:
pickle.dump(train_list, f)
with open(os.path.join(out_dir, '%s_interp.bin' % tag), 'wb') as f:
pickle.dump(interp_list, f)
with open(os.path.join(out_dir, '%s_extrap.bin' % tag), 'wb') as f:
pickle.dump(extrap_list, f)
print(train_list)
print(interp_list)
print(extrap_list)
if __name__ == '__main__':
# CAPE_parse_raw('/mnt/Datasets/CAPE/cape_release', '/mnt/Datasets/CAPE', '03375', 'longlong')
# split_train_test('/mnt/Datasets/CAPE/', '03375_longlong', '/mnt/Datasets/CAPE/03375_longlong.bin',
# ['box', 'swim', 'twist_tilt'], ['athletics', 'frisbee', 'volleyball'], 'trial1')
CAPE_parse_raw('/mnt/Datasets/CAPE/cape_release', '/mnt/Datasets/CAPE', '00134', 'shortlong')
|
AutoAvatar-main
|
data/CAPE_generate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import pickle
import torch
from torch.utils.data import Dataset
from pytorch3d.io import load_ply
import utils.CAPE as cape_utils
class AistDataset(Dataset):
def __init__(self, args, dfaust_json, seq_dir, skip=1, gap=1, eval_frames=None) -> None:
super().__init__()
self.args = copy.deepcopy(args)
self.dfaust_json = dfaust_json
self.seq_dir = seq_dir
self.skip = skip
self.gap = gap
self.eval_frames = eval_frames
self.n_frames = args['model']['n_batch_frames']
self.n_rollout = args['train']['n_rollout']
self.raw_dataset_dir = args['data']['raw_dataset_dir']
self.dataset_dir = args['data']['dataset_dir']
self.smooth_tag = args['data']['smooth_tag']
# self.faces = torch.from_numpy(np.load(os.path.join(args['data']['raw_dataset_dir'], 'misc', 'smpl_tris.npy')).astype(np.int32)).long()
self.frame_list = sorted(os.listdir(seq_dir))
def __len__(self):
return 1
def __getitem__(self, index):
verts_list = []
faces_list = []
poses_list = []
verts_smt_list = []
faces_smt_list = []
for i in range(index, len(self.frame_list), self.skip):
data = np.load(os.path.join(self.seq_dir, self.frame_list[i]))
rot, transl = data['pose'], data['transl']
poses = np.concatenate([transl, rot], axis=0)
assert poses.shape == (75,)
poses_list.append(torch.from_numpy(poses).float())
poses_list = torch.stack(poses_list, dim=0)
frame = self.dfaust_json.data['seqs'][0]['frames'][0]
ply_path = os.path.join(self.raw_dataset_dir, frame['ply_path'])
ply_path_ = os.path.join(self.dataset_dir, 'scans_simple_2nd', self.dfaust_json.data['subject'],
self.dfaust_json.data['seqs'][0]['seq_name'], ply_path.split('/')[-1])
verts_init, faces_init = load_ply(ply_path_)
poses_init = torch.from_numpy(frame['poses']).float()
assert poses_init.shape == (75,)
z_ids_list = torch.zeros(poses_list.shape[0]).long()
return {'verts_detail': verts_list, 'faces_detail': faces_list, 'verts_smt': verts_smt_list, 'faces_smt': faces_smt_list, 'poses': poses_list,
'verts_init': verts_init, 'faces_init': faces_init, 'poses_init': poses_init, 'z_ids': z_ids_list}
|
AutoAvatar-main
|
data/Aist_dataset.py
|
from setuptools import setup
if __name__ == "__main__":
setup(name='flyingsquid',
version='0.0.0a0',
description='Weak supervision with triplet methods',
url='https://github.com/HazyResearch/flyingsquid',
author='Dan Fu',
author_email='danfu@cs.stanford.edu',
license='Apache 2.0',
packages=['flyingsquid'])
|
flyingsquid-master
|
setup.py
|
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
from flyingsquid import _triplets
from flyingsquid import _graphs
from flyingsquid import _observables
from flyingsquid import _lm_parameters
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class LabelModel(_triplets.Mixin, _graphs.Mixin, _observables.Mixin,
_lm_parameters.Mixin):
def __init__(self, m, v=1, y_edges=[], lambda_y_edges=[], lambda_edges=[],
allow_abstentions=True, triplets=None, triplet_seed=0):
'''Initialize the LabelModel with a graph G.
m: number of LF's
v: number of Y tasks
y_edges: edges between the tasks. (i, j) in y_edges means that
there is an edge between y_i and y_j.
lambda_y_edges: edges between LF's and tasks. (i, j) in lambda_y_edges
means that there is an edge between lambda_i and y_j. If this list
is empty, assume that all labeling functions are connected to Y_0.
lambda_edges: edges between LF's. (i, j) in lambda_edges means that
there is an edge between lambda_i and lambda_j.
allow_abstentions: if True, allow abstentions in L_train.
triplets: if specified, use these triplets
triplet_seed: if triplets not specified, randomly shuffle the nodes
with this seed when generating triplets
'''
if lambda_y_edges == []:
lambda_y_edges = [(i, 0) for i in range(m)]
G = MarkovModel()
# Add LF nodes
G.add_nodes_from([
'lambda_{}'.format(i)
for i in range(m)
])
G.add_nodes_from([
'Y_{}'.format(i)
for i in range(v)
])
# Add edges
G.add_edges_from([
('Y_{}'.format(start), 'Y_{}'.format(end))
for start, end in y_edges
])
G.add_edges_from([
('lambda_{}'.format(start), 'Y_{}'.format(end))
for start, end in lambda_y_edges
])
G.add_edges_from([
('lambda_{}'.format(start), 'lambda_{}'.format(end))
for start, end in lambda_edges
])
self.fully_independent_case = lambda_edges == []
self.m = m
if m < 3:
raise NotImplementedError("Triplet method needs at least three LF's to run.")
self.v = v
self.G = G
self.junction_tree = self.G.to_junction_tree()
self.nodes = sorted(list(self.G.nodes))
self.triplet_seed = triplet_seed
if triplet_seed is not None:
random.seed(triplet_seed)
random.shuffle(self.nodes)
self.separator_sets = set([
tuple(sorted(list((set(clique1).intersection(set(clique2))))))
for clique1, clique2 in self.junction_tree.edges
])
self.allow_abstentions = allow_abstentions
self.triplets = triplets
if not self._check():
raise NotImplementedError('Cannot run triplet method for specified graph.')
# Make this Picklable
def save(obj):
return (obj.__class__, obj.__dict__)
def load(cls, attributes):
obj = cls.__new__(cls)
obj.__dict__.update(attributes)
return obj
def enumerate_ys(self):
# order to output probabilities
vals = { Y: (-1, 1) for Y in range(self.v) }
Y_vecs = sorted([
[ vec_dict[Y] for Y in range(self.v) ]
for vec_dict in dict_product(vals)
])
return Y_vecs
def _lambda_pass(self, L_train, lambda_marginals, lambda_moment_vals, lambda_equals_one,
lambda_zeros, abstention_probabilities, verbose = False):
'''
Make the pass over L_train.
In this pass, we need to:
* Compute all the joint marginal distributions over multiple lambda's (lambda_marginals)
* Compute the probabilities that some set of lambda's are all equal to zero (lambda_zeros)
* Compute all the lambda moments, including conditional moments (lambda_moment_vals)
* Compute the probability that the product of some lambdas is zero (abstention_probabilities)
'''
# do the fast cases first
easy_marginals = {
marginal: None
for marginal in lambda_marginals
if len(marginal) == 1
}
easy_moments = {
moment: None
for moment in lambda_moment_vals
if type(moment[0]) != type(()) and len(moment) <= 2
}
easy_equals_one = {
factor: None
for factor in lambda_equals_one
if type(factor[0]) != type(()) and len(factor) == 1
}
easy_zeros = {
condition: None
for condition in lambda_zeros if len(condition) == 1
}
easy_abstention_probs = {
factor: None
for factor in abstention_probabilities if len(factor) == 1
}
means = np.einsum('ij->j', L_train)/L_train.shape[0]
covariance = np.einsum('ij,ik->jk', L_train, L_train)/L_train.shape[0]
lf_cardinality = 3 if self.allow_abstentions else 2
lf_values = (-1, 0, 1) if self.allow_abstentions else (-1, 1)
for marginal in easy_marginals:
idx = marginal[0]
counts = [ np.sum(L_train[:,idx] == val) / L_train.shape[0] for val in lf_values ]
easy_marginals[marginal] = JointProbabilityDistribution(
[ 'lambda_{}'.format(idx) ], [ lf_cardinality ], counts
)
if marginal in easy_equals_one:
easy_equals_one[marginal] = counts[-1]
if marginal in easy_zeros:
easy_zeros[marginal] = counts[1]
if marginal in easy_abstention_probs:
easy_abstention_probs[marginal] = counts[1]
for moment in easy_moments:
if len(moment) == 1:
easy_moments[moment] = means[moment[0]]
else:
easy_moments[moment] = covariance[moment[0]][moment[1]]
for factor in easy_equals_one:
if easy_equals_one[factor] is None:
easy_equals_one[factor] = np.sum(L_train[:,factor[0]] == 1) / L_train.shape[0]
for condition in easy_zeros:
if easy_zeros[condition] is None:
idx = condition[0]
easy_zeros[condition] = np.sum(L_train[:,idx] == 0) / L_train.shape[0]
for factor in easy_abstention_probs:
if easy_abstention_probs[factor] is None:
idx = factor[0]
easy_abstention_probs[factor] = np.sum(L_train[:,idx] == 0) / L_train.shape[0]
# time for the remaining cases
lambda_marginals = {
key: lambda_marginals[key]
for key in lambda_marginals
if key not in easy_marginals
}
lambda_moment_vals = {
key: lambda_moment_vals[key]
for key in lambda_moment_vals
if key not in easy_moments
}
lambda_equals_one = {
key: lambda_equals_one[key]
for key in lambda_equals_one
if key not in easy_equals_one
}
lambda_zeros = {
key: lambda_zeros[key]
for key in lambda_zeros
if key not in easy_zeros
}
abstention_probabilities = {
key: abstention_probabilities[key]
for key in abstention_probabilities
if key not in easy_abstention_probs
}
# for the rest, loop through L_train
if (len(lambda_marginals) > 0 or len(lambda_moment_vals) > 0 or
len(lambda_equals_one) > 0 or len(lambda_zeros) > 0 or
len(abstention_probabilities) > 0):
# figure out which lambda states we need to keep track of
lambda_marginal_counts = {}
lambda_marginal_vecs = {}
lf_values = (-1, 0, 1) if self.allow_abstentions else (-1, 1)
for lambda_marginal in lambda_marginals:
nodes = [ 'lambda_{}'.format(idx) for idx in lambda_marginal ]
vals = { lf: lf_values for lf in nodes }
lf_vecs = sorted([
[ vec_dict[lf] for lf in nodes ]
for vec_dict in dict_product(vals)
])
counts = {
tuple(lf_vec): 0
for lf_vec in lf_vecs
}
lambda_marginal_vecs[lambda_marginal] = lf_vecs
lambda_marginal_counts[lambda_marginal] = counts
lambda_moment_counts = { moment: 0 for moment in lambda_moment_vals }
lambda_moment_basis = { moment: 0 for moment in lambda_moment_vals }
lambda_equals_one_counts = { factor: 0 for factor in lambda_equals_one }
lambda_equals_one_basis = { factor: 0 for factor in lambda_equals_one }
lambda_zero_counts = { condition: 0 for condition in lambda_zeros }
abstention_probability_counts = { factor: 0 for factor in abstention_probabilities }
for data_point in tqdm(L_train) if verbose else L_train:
for marginal in lambda_marginals:
mask = [ data_point[idx] for idx in marginal ]
lambda_marginal_counts[marginal][tuple(mask)] += 1
for moment in lambda_moment_vals:
if type(moment[0]) == type(()):
pos_mask = [ data_point[idx] for idx in moment[0] ]
zero_mask = [ data_point[idx] for idx in moment[1] ]
if np.count_nonzero(zero_mask) == 0:
lambda_moment_basis[moment] += 1
lambda_moment_counts[moment] += np.prod(pos_mask)
else:
mask = [ data_point[idx] for idx in moment ]
lambda_moment_counts[moment] += np.prod(mask)
lambda_moment_basis[moment] += 1
for factor in lambda_equals_one:
if type(factor[0]) == type(()):
pos_mask = [ data_point[idx] for idx in factor[0] ]
zero_mask = [ data_point[idx] for idx in factor[1] ]
if np.count_nonzero(zero_mask) == 0:
lambda_equals_one_basis[factor] += 1
if np.prod(pos_mask) == 1:
lambda_equals_one_counts[factor] += 1
else:
mask = [ data_point[idx] for idx in factor ]
if np.prod(mask) == 1:
lambda_equals_one_counts[factor] += 1
lambda_equals_one_basis[factor] += 1
for zero_condition in lambda_zeros:
zero_mask = [ data_point[idx] for idx in zero_condition ]
if np.count_nonzero(zero_mask) == 0:
lambda_zero_counts[zero_condition] += 1
for factor in abstention_probability_counts:
zero_mask = [ data_point[idx] for idx in factor ]
if np.prod(zero_mask) == 0:
abstention_probability_counts[factor] += 1
lf_cardinality = 3 if self.allow_abstentions else 2
for marginal in lambda_marginals:
nodes = [ 'lambda_{}'.format(idx) for idx in marginal ]
lf_vecs = lambda_marginal_vecs[marginal]
counts = lambda_marginal_counts[marginal]
lambda_marginals[marginal] = JointProbabilityDistribution(
nodes, [ lf_cardinality for node in nodes ],
[
float(counts[tuple(lf_vec)]) / len(L_train)
for lf_vec in lf_vecs
]
)
for moment in lambda_moment_vals:
if lambda_moment_basis[moment] == 0:
moment_val = 0
else:
moment_val = lambda_moment_counts[moment] / lambda_moment_basis[moment]
lambda_moment_vals[moment] = moment_val
for factor in lambda_equals_one:
if lambda_equals_one_basis[factor] == 0:
prob = 0
else:
prob = lambda_equals_one_counts[factor] / lambda_equals_one_basis[factor]
lambda_equals_one[factor] = prob
for zero_condition in lambda_zeros:
lambda_zeros[zero_condition] = lambda_zero_counts[zero_condition] / len(L_train)
for factor in abstention_probabilities:
abstention_probabilities[factor] = abstention_probability_counts[factor] / len(L_train)
# update with the easy values
lambda_marginals.update(easy_marginals)
lambda_moment_vals.update(easy_moments)
lambda_equals_one.update(easy_equals_one)
lambda_zeros.update(easy_zeros)
abstention_probabilities.update(easy_abstention_probs)
return lambda_marginals, lambda_moment_vals, lambda_equals_one, lambda_zeros, abstention_probabilities
def fit(self, L_train, class_balance=None, Y_dev=None, flip_negative=True, clamp=True,
solve_method='triplet_mean',
sign_recovery='all_positive',
verbose = False):
'''Compute the marginal probabilities of each clique and separator set in the junction tree.
L_train: an m x n matrix of LF outputs. L_train[k][i] is the value of \lambda_i on item k.
1 means positive, -1 means negative, 0 means abstain.
class_balance: a 2^v vector of the probabilities of each combination of Y values. Sorted in
lexicographical order (entry zero is for Y_0 = -1, ..., Y_{v-1} = -1, entry one is for
Y_0 = -1, ..., Y_{v-1} = 1, last entry is for Y_0 = 1, ..., Y_{v-1} = 1).
Y_dev: a v x |Y_dev| matrix of ground truth examples. If class_balance is not specified, this
is used to find out the class balance. Otherwise not used.
If this is not specified, and class_balance is not specified, then class balance is uniform.
1 means positive, -1 means negative.
flip_negative: if True, flip sign of negative probabilities
clamp: if True and flip_negative is not True, set negative probabilities to 0
solve_method: one of ['triplet_mean', 'triplet_median', 'triplet', 'independencies']
If triplet, use the method below and the independencies we write down there.
If independencies, use the following facts:
* For any lambda_i: lambda_i * Y and Y are independent for any i, so
E[lambda_i Y] = E[lambda_i] / E[Y]
* For any lambda_i, lambda_j: E[lambda_i * lambda_j * Y] = E[lambda_i * lambda_j] * E[Y]
* For an odd number of lambda's, the first property holds; for an even number, the second
property holds
Only triplet implemented right now.
sign_recovery: one of ['all_positive', 'fully_independent']
If all_positive, assume that all accuracies that we compute are positive.
If fully_independent, assume that the accuracy of lambda_0 on Y_0 is positive, and that for
any lambda_i and lambda_{i+1}, sign(lambda_i lambda_{i+1}) = sign(M_{i,i+1}) where M_{i, i+1}
is the second moment between lambda_0 and lambda_i.
If solve_method is independencies, we don't need to do this.
Only all_positive implemented right now.
verbose: if True, print out messages to stderr as we make progress
How we go about solving these probabilities (for Triplet method):
* We assume that we have the joint distribution/class balance of our Y's (or can infer it
from the dev set).
* We observe agreements and disagreements between LF's, so we can compute values like
P(\lambda_i \lambda_j = 1).
* The only thing we need to estimate now are correlations between LF's and (unseen) Y's -
values like P(\lambda_i Y_j = 1).
* Luckily, we have P(\lambda_i Y_j = 1) = 1/2(1 + E[\lambda_i Y_j]). We refer to E[\lambda_i Y_j]
as the accuracy of \lambda_i on Y_j.
* And because of the format of our exponential model, we have:
E[\lambda_i Y_j]E[\lambda_k Y_j] = E[\lambda_i Y_j \lambda_k Y_j] = E[\lambda_i \lambda_k]
For any \lambda_i, \lambda_k that are conditionally independent given Y_j. This translates to
Y_j being a separator of \lambda_i and \lambda_k in our graphical model.
And we can observe E[\lambda_i \lambda_k] (the second moment) from L_train!
* The algorithm proceeds to estimate the marginal probabilities by picking out triplets of
conditionally-independent subsets of LF's, and estimating the accuracies of LF's on Y's.
* Then, to recover the joint probabilities, we can solve a linear system B e = r (written out in latex):
$$\begin{align*}
\begin{bmatrix}
1 & 1 & 1 & 1 \\
1 & 0 & 1 & 0 \\
1 & 1 & 0 & 0 \\
1 & 0 & 0 &1
\end{bmatrix}
\begin{bmatrix}
p_{\lambda_i, Y_j}(+1, +1)\\
p_{\lambda_i, Y_j}(-1, +1) \\
p_{\lambda_i, Y_j}(+1, -1) \\
p_{\lambda_i, Y_j}(-1, -1) \end{bmatrix} =
\begin{bmatrix} 1 \\
P(\lambda_{i} = 1) \\
P(Y_j = 1) \\
\rho_{i, j} \end{bmatrix} .
\end{align*}$$
The values on the left of the equality are an invertible matrix, and values like
P(\lambda_i = 1, Y_j = 1), P(\lambda_i = -1, Y_j = 1), etc for the full marginal probability.
The values on the left of the equality are [1, P(\lambda_i = 1), P(Y_j = 1), P(\lambda_i = Y_j)]^T.
We can observe or solve for all the values on the right, to solve for the values in the marginal
probability!
This can also be extended to multiple dimensions.
Outputs: None.
'''
# if abstentions not allowed, check for zero's
if not self.allow_abstentions:
if np.count_nonzero(L_train) < L_train.shape[0] * L_train.shape[1]:
print('Abstentions not allowed!')
return
# Y marginals to compute
Y_marginals = {}
# lambda marginals to compute
lambda_marginals = {}
# marginals will eventually be returned here
marginals = [
(clique, None)
for clique in sorted(list(self.junction_tree.nodes)) + sorted(list(self.separator_sets))
]
def num_Ys(nodes):
if nodes == tuple([1]) or nodes == tuple([0]):
return 0
return len([
node for node in nodes if 'Y' in node
])
def num_lambdas(nodes):
if nodes == tuple([1]) or nodes == tuple([0]):
return 0
return len([
node for node in nodes if 'lambda' in node
])
observable_cliques = []
non_observable_cliques = []
for i, (clique, _) in enumerate(marginals):
if num_Ys(clique) == 0 or num_lambdas(clique) == 0:
observable_cliques.append(i)
else:
non_observable_cliques.append(i)
# write down everything we need for the observable cliques
for idx in observable_cliques:
clique = marginals[idx][0]
indices = tuple(sorted([ int(node.split('_')[1]) for node in clique ]))
if 'Y' in clique[0]:
if indices not in Y_marginals:
Y_marginals[indices] = None
else:
if indices not in lambda_marginals:
lambda_marginals[indices] = None
if verbose:
print('Marginals written down', file=sys.stderr)
# for each marginal we need to estimate, write down the r vector that we need
r_vecs = {} # mapping from clique index to the r vector
r_vals = {} # mapping from a value name (like Y_1 or tuple(lambda_1, Y_1)) to its value
for idx in non_observable_cliques:
clique = list(reversed(sorted(marginals[idx][0])))
r_vec = self._generate_r_vector(clique)
r_vecs[idx] = r_vec
for r_val in r_vec:
if r_val not in r_vals:
r_vals[r_val] = None
if verbose:
print('R vector written down', file=sys.stderr)
# write down all the sets of zero conditions
lambda_zeros = {}
# write down the moment values that we need to keep track of when we walk through the L matrix
Y_equals_one = {}
lambda_equals_one = {}
# write down which expectations we need to solve using the triplet method
expectations_to_estimate = set()
for r_val in r_vals:
if not self.allow_abstentions or r_val[1] == tuple(['0']):
equals_one_tup = r_val if not self.allow_abstentions else r_val[0]
if equals_one_tup[0] == '1':
# If the value is 1, the probability is just 1
r_vals[r_val] = 1
elif num_Ys(equals_one_tup) != 0 and num_lambdas(equals_one_tup) != 0:
# If this contains lambdas and Y's, we can't observe it
expectations_to_estimate.add(r_val)
elif num_Ys(equals_one_tup) != 0:
# We need to cache this moment
indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_one_tup ]))
if indices not in Y_equals_one:
Y_equals_one[indices] = None
elif num_lambdas(equals_one_tup) != 0:
# If it contains just lambdas, go through L_train
indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_one_tup ]))
if indices not in lambda_equals_one:
lambda_equals_one[indices] = None
else:
# we allow abstentions, and there are clauses that are equal to zero
equals_one_tup = r_val[0]
equals_zero_tup = r_val[1]
if num_lambdas(equals_one_tup) > 0 and num_Ys(equals_one_tup) > 0:
# we can't observe this
expectations_to_estimate.add(r_val)
elif num_lambdas(equals_one_tup) > 0:
# compute probability some lambda's multiply to one, subject to some zeros
pos_indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_one_tup ]))
zero_indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_zero_tup ]))
tup = (pos_indices, zero_indices)
if tup not in lambda_equals_one:
lambda_equals_one[tup] = None
if zero_indices not in lambda_zeros:
lambda_zeros[zero_indices] = None
else:
# compute a Y equals one probability, and multiply it by probability of zeros
if equals_one_tup[0] != '1':
pos_indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_one_tup ]))
if pos_indices not in Y_equals_one:
Y_equals_one[pos_indices] = None
zero_indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_zero_tup ]))
if zero_indices not in lambda_zeros:
lambda_zeros[zero_indices] = None
if verbose:
print('Expectations to estimate written down', file=sys.stderr)
if solve_method[:len('triplet')] == 'triplet':
triplets, new_moment_vals, abstention_probabilities = self._triplet_method_preprocess(
expectations_to_estimate, solve_method)
self.triplets = triplets
elif solve_method == 'independencies':
print('Independencies not implemented yet!')
return
if verbose:
print('Triplets constructed', file=sys.stderr)
lambda_moment_vals = {}
for moment in new_moment_vals:
if moment not in lambda_moment_vals:
lambda_moment_vals[moment] = None
# now time to compute all the Y marginals
self.cb = self._compute_class_balance(class_balance, Y_dev)
Y_marginals = self._compute_Y_marginals(Y_marginals)
if verbose:
print('Y marginals computed', file=sys.stderr)
Y_equals_one = self._compute_Y_equals_one(Y_equals_one)
if verbose:
print('Y equals one computed', file=sys.stderr)
self.Y_marginals = Y_marginals
self.Y_equals_one = Y_equals_one
# now time to compute the lambda moments, marginals, zero conditions, and abstention probs
lambda_marginals, lambda_moment_vals, lambda_equals_one, lambda_zeros, abstention_probabilities = self._lambda_pass(
L_train, lambda_marginals, lambda_moment_vals, lambda_equals_one,
lambda_zeros, abstention_probabilities, verbose = verbose)
if verbose:
print('lambda marginals, moments, conditions computed', file=sys.stderr)
self.lambda_marginals = lambda_marginals
self.lambda_moment_vals = lambda_moment_vals
self.lambda_equals_one = lambda_equals_one
self.lambda_zeros = lambda_zeros
self.abstention_probabilities = abstention_probabilities
# put observable cliques in the right place
for idx in observable_cliques:
clique = marginals[idx][0]
indices = tuple(sorted([ int(node.split('_')[1]) for node in clique ]))
if 'Y' in clique[0]:
marginal = Y_marginals[indices]
else:
marginal = lambda_marginals[indices]
marginals[idx] = (clique, marginal)
# get unobserved probabilities
if solve_method[:len('triplet')] == 'triplet':
probability_values, expectation_values = self._triplet_method_probabilities(
triplets, lambda_moment_vals, lambda_zeros,
abstention_probabilities, sign_recovery, solve_method)
elif solve_method == 'independencies':
print('Independencies not implemented yet!')
return
self.probability_values = probability_values
self.expectation_values = expectation_values
if verbose:
print('Unobserved probabilities computed', file=sys.stderr)
# put values into the R vectors
for r_val in r_vals:
if not self.allow_abstentions or r_val[1] == tuple(['0']):
equals_one_tup = r_val if not self.allow_abstentions else r_val[0]
if equals_one_tup[0] == '1':
# If the value is 1, the probability is just 1
pass
elif num_Ys(equals_one_tup) != 0 and num_lambdas(equals_one_tup) != 0:
# If this contains lambdas and Y's, we can't observe it
r_vals[r_val] = probability_values[r_val]
elif num_Ys(equals_one_tup) != 0:
# We need to cache this moment
indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_one_tup ]))
r_vals[r_val] = Y_equals_one[indices]
elif num_lambdas(equals_one_tup) != 0:
indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_one_tup ]))
r_vals[r_val] = lambda_equals_one[indices]
else:
# we allow abstentions, and there are clauses that are equal to zero
equals_one_tup = r_val[0]
equals_zero_tup = r_val[1]
if num_lambdas(equals_one_tup) > 0 and num_Ys(equals_one_tup) > 0:
# we can't observe this
r_vals[r_val] = probability_values[r_val]
elif num_lambdas(equals_one_tup) > 0:
# compute lambda moment, subject to some zeros
pos_indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_one_tup ]))
zero_indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_zero_tup ]))
tup = (pos_indices, zero_indices)
r_vals[r_val] = lambda_equals_one[tup]
else:
# compute a Y moment, and multiply it by probability of zeros
if equals_one_tup[0] != '1':
pos_indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_one_tup ]))
pos_prob = Y_equals_one[pos_indices]
else:
pos_prob = 1.
zero_indices = tuple(sorted([ int(node.split('_')[1]) for node in equals_zero_tup ]))
zero_probs = lambda_zeros[zero_indices]
r_vals[r_val] = pos_prob * zero_probs
self.r_vals = r_vals
if verbose:
print('R values computed', file=sys.stderr)
# solve for marginal values
for idx in non_observable_cliques:
clique = list(reversed(sorted(marginals[idx][0])))
r_vec = r_vecs[idx]
r_vec_vals = np.array([ r_vals[exp] for exp in r_vec ])
# e_vec is the vector of marginal values
e_vec = self._generate_e_vector(clique)
b_matrix = self._generate_b_matrix(clique)
e_vec_vals = np.linalg.inv(b_matrix) @ r_vec_vals
e_vec_val_index = { tup: i for i, tup in enumerate(e_vec) }
marginal_vals = np.array([
e_vec_vals[e_vec_val_index[tup]]
for tup in sorted(e_vec)
])
if flip_negative:
marginal_vals[marginal_vals < 0] = marginal_vals[marginal_vals < 0] * -1
marginal_vals /= sum(marginal_vals)
elif clamp:
marginal_vals[marginal_vals < 0] = 1e-8
marginal_vals /= sum(marginal_vals)
indices = [ int(node.split('_')[1]) for node in clique ]
lf_indices = sorted(indices[:-1])
Y_idx = indices[-1]
variables = [ 'lambda_{}'.format(i) for i in lf_indices ] + [ 'Y_{}'.format(Y_idx) ]
# cardinality 3 for lambda variables if you allow abstentions, 2 for Y's
cardinalities = [
3 if self.allow_abstentions else 2
for i in range(len(lf_indices))
] + [2]
marginal = DiscreteFactor(variables, cardinalities, marginal_vals).normalize(inplace = False)
marginals[idx] = (clique, marginal)
self.clique_marginals = marginals[:len(self.junction_tree.nodes)]
self.separator_marginals = marginals[len(self.junction_tree.nodes):]
separator_degrees = {
sep: 0
for sep in self.separator_sets
}
for clique1, clique2 in self.junction_tree.edges:
separator_degrees[tuple(sorted(list((set(clique1).intersection(set(clique2))))))] += 1
self.separator_degrees = separator_degrees
def reduce_marginal(self, marginal, data_point):
lf_vals = [-1, 0, 1] if self.allow_abstentions else [-1, 1]
params = [
(var, lf_vals.index(data_point[int(var.split('_')[1])]))
for var in marginal.variables if 'lambda' in var
]
return marginal.reduce(params, inplace=False) if len(params) > 0 else marginal
def predict_proba(self, L_matrix, verbose=True):
'''Predict the probabilities of the Y's given the outputs of the LF's.
L_matrix: a m x |Y| matrix of of LF outputs. L_matrix[k][i] is the value of \lambda_i on item k.
1 means positive, -1 means negative, 0 means abstain.
Let C be the set of all cliques in the graphical model, and S the set of all separator sets.
Let d(s) for s \in S be the number of maximal cliques that s separates.
Then, we have the following formula for the joint probability:
P(\lambda_1, ..., \lambda_m, Y_1, ..., Y_v) =
\prod_{c \in C} \mu_c(c) / \prod_{s \in S} [\mu_s(s)]^(d(s) - 1)
Where \mu_c and \mu_s are the marginal probabilities of a clique c or a separator s, respectively.
We solved for these marginals during the fit function, so now we use them for inference!
Outputs: a 2^v x |Y| matrix of probabilities. The probabilities for the combinations are
sorted lexicographically.
'''
def num_lambdas(nodes):
return len([
node for node in nodes if 'lambda' in node
])
L_matrix = np.array(L_matrix)
Y_vecs = self.enumerate_ys()
numerator_vals_by_lambda_count = []
max_lambda_count = max([ num_lambdas(clique) for clique, marginal in self.clique_marginals ])
# Compute all marginals that have lambda_count lambdas
for lambda_count in range(1, max_lambda_count + 1):
correct_lambda_cliques = [
(clique, marginal)
for clique, marginal in self.clique_marginals if num_lambdas(clique) == lambda_count
]
if len(correct_lambda_cliques) == 0:
continue
lambda_options = (-1, 0, 1) if self.allow_abstentions else (-1, 1)
lambda_vals = {
i: lambda_options
for i in range(lambda_count)
}
lambda_vecs = sorted([
[ vec_dict[i] for i in range(lambda_count) ]
for vec_dict in dict_product(lambda_vals)
])
# index by Y_vec, clique, and lambda value
A_lambda = np.zeros((len(Y_vecs), len(correct_lambda_cliques), len(lambda_vecs)))
for i, Y_vec in enumerate(Y_vecs):
for j, (clique, marginal) in enumerate(correct_lambda_cliques):
lambda_marginal = marginal.reduce(
[
('Y_{}'.format(Y_idx), y_val if y_val == 1 else 0)
for Y_idx, y_val in enumerate(Y_vec)
if 'Y_{}'.format(Y_idx) in clique
],
inplace = False
)
for k, lambda_vec in enumerate(lambda_vecs):
A_lambda[i, j, k] = lambda_marginal.reduce(
[
(
clique_node,
lambda_options.index(lambda_val)
)
for clique_node, lambda_val in zip(clique, lambda_vec)
],
inplace=False).values
indexes = np.array([
[
np.sum([
((lambda_options.index(data_point[int(node.split('_')[1])])) *
((len(lambda_options)) ** (lambda_count - i - 1)))
for i, node in enumerate(clique[:-1])
])
for clique, marginal in correct_lambda_cliques
]
for data_point in L_matrix
]).astype('int')
clique_values = A_lambda[:, np.arange(indexes.shape[1]), indexes]
numerator_values = np.prod(clique_values, axis=2)
numerator_vals_by_lambda_count.append(numerator_values)
# Compute all marginals that have zero lambdas
zero_lambda_cliques = [
(clique, marginal)
for clique, marginal in self.clique_marginals if num_lambdas(clique) == 0
]
if len(zero_lambda_cliques) > 0:
A_y = np.zeros((len(Y_vecs), len(zero_lambda_cliques)))
for i, Y_vec in enumerate(Y_vecs):
for j, (clique, marginal) in enumerate(zero_lambda_cliques):
Y_marginal = marginal.reduce(
[
('Y_{}'.format(Y_idx), y_val if y_val == 1 else 0)
for Y_idx, y_val in enumerate(Y_vec)
if 'Y_{}'.format(Y_idx) in clique
],
inplace = False
)
A_y[i, j] = Y_marginal.values
y_probs = np.prod(A_y, axis=1)
numerator_ys = np.array([y_probs,] * L_matrix.shape[0]).T
# Compute all separator marginals
zero_lambda_separators = [
(clique, marginal)
for clique, marginal in self.separator_marginals if num_lambdas(clique) == 0
]
A_y_sep = np.zeros((len(Y_vecs), len(zero_lambda_separators)))
for i, Y_vec in enumerate(Y_vecs):
for j, (clique, marginal) in enumerate(zero_lambda_separators):
Y_marginal = marginal.reduce(
[
('Y_{}'.format(Y_idx), y_val if y_val == 1 else 0)
for Y_idx, y_val in enumerate(Y_vec)
if 'Y_{}'.format(Y_idx) in clique
],
inplace = False
)
A_y_sep[i, j] = Y_marginal.values ** (self.separator_degrees[clique] - 1)
y_probs_sep = np.prod(A_y_sep, axis=1)
denominator_ys = np.array([y_probs_sep,] * L_matrix.shape[0]).T
predictions = numerator_vals_by_lambda_count[0]
for lambda_numerator in numerator_vals_by_lambda_count[1:]:
predictions = predictions * lambda_numerator
if len(zero_lambda_cliques) > 0:
predictions = predictions * numerator_ys
predictions = (predictions / denominator_ys).T
# in the case of zero-sum predictions
predictions[predictions.sum(axis = 1) == 0] += .001
normalized_preds = predictions / np.array(([predictions.sum(axis = 1),] * len(Y_vecs))).T
return normalized_preds
def predict(self, L_matrix, verbose=True):
'''Predict the value of the Y's that best fits the outputs of the LF's.
L_matrix: a m x |Y| matrix of LF outputs. L_matrix[k][i] is the value of \lambda_i on item k.
1 means positive, -1 means negative, 0 means abstain.
Let C be the set of all cliques in the graphical model, and S the set of all separator sets.
Let d(s) for s \in S be the number of maximal cliques that s separates.
Then, we have the following formula for the joint probability:
P(\lambda_1, ..., \lambda_m, Y_1, ..., Y_v) =
\prod_{c \in C} \mu_c(c) / \prod_{s \in S} [\mu_s(s)]^(d(s) - 1)
Where \mu_c and \mu_s are the marginal probabilities of a clique c or a separator s, respectively.
We solved for these marginals during the fit function, so now we use them for inference!
Outputs: a v x |Y| matrix of predicted outputs.
'''
Y_vecs = self.enumerate_ys()
combination_probs = self.predict_proba(L_matrix, verbose=verbose)
most_likely = np.argmax(combination_probs, axis=1)
preds = np.array(Y_vecs)[most_likely]
return preds
def predict_proba_marginalized(self, L_matrix, verbose=False):
'''Predict the probabilities of the Y's given the outputs of the LF's, marginalizing out all the
Y values every time (return a separate probability for +1/-1 for each Y).
L_matrix: a m x |Y| matrix of of LF outputs. L_matrix[k][i] is the value of \lambda_i on item k.
1 means positive, -1 means negative, 0 means abstain.
Let C be the set of all cliques in the graphical model, and S the set of all separator sets.
Let d(s) for s \in S be the number of maximal cliques that s separates.
Then, we have the following formula for the joint probability:
P(\lambda_1, ..., \lambda_m, Y_1, ..., Y_v) =
\prod_{c \in C} \mu_c(c) / \prod_{s \in S} [\mu_s(s)]^(d(s) - 1)
Where \mu_c and \mu_s are the marginal probabilities of a clique c or a separator s, respectively.
We solved for these marginals during the fit function, so now we use them for inference!
Outputs: a v x |Y| matrix of marginalized probabilities (one probability for each task, for each
data point).
'''
combination_probs = self.predict_proba(L_matrix, verbose=verbose)
# construct indices for each task
Y_vecs = self.enumerate_ys()
task_indices = [
[ idx for idx, y_vec in enumerate(Y_vecs) if y_vec[i] == 1 ]
for i in range(self.v)
]
return np.sum(combination_probs[:, task_indices], axis=2).reshape(len(combination_probs) * self.v)
def estimated_accuracies(self):
'''Get the estimated accuracies of each LF.
Assumes that each LF is connected to exactly one Y node.
Let Y(i) denote the node that LF i is connected to.
This function returns an array of values P(lambda_i = Y(i)), for each LF i.
Outputs: a m-sized array of estimated LF accuracies.
'''
if not self.probability_values:
print('You need to train the label model first!')
return
accuracies = []
for i in range(self.m):
lambda_node = 'lambda_{}'.format(i)
Y_node = [
e2
for e1, e2 in self.G.edges
if e1 == lambda_node and 'Y' in e2
][0]
if self.allow_abstentions:
prob_key = (
(lambda_node, Y_node), ('0', )
) if self.allow_abstentions else (lambda_node, Y_node)
accuracies.append(self.probability_values[prob_key])
return accuracies
|
flyingsquid-master
|
flyingsquid/label_model.py
|
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class Mixin:
'''
Triplet algorithms as a Mixin. These algorithms recover the mean parameters
of the graphical model.
'''
def _triplet_method_single_seed(self, expectations_to_estimate):
# create triplets for what we need, and return which moments we'll need to compute
exp_to_estimate_list = sorted(list(expectations_to_estimate))
if self.triplet_seed is not None:
random.shuffle(exp_to_estimate_list)
if self.triplets is None:
expectations_in_triplets = set()
triplets = []
for expectation in exp_to_estimate_list:
# if we're already computing it, don't need to add to a new triplet
if expectation in expectations_in_triplets:
continue
if not self.allow_abstentions:
Y_node = expectation[-1]
else:
Y_node = expectation[0][-1]
def check_triplet(triplet):
return (self._is_separator(triplet[0][:-1], triplet[1][:-1], Y_node) and
self._is_separator(triplet[0][:-1], triplet[2][:-1], Y_node) and
self._is_separator(triplet[1][:-1], triplet[2][:-1], Y_node))
triplet = [expectation]
found = False
# first try looking at the other expectations that we need to estimate
for first_node in exp_to_estimate_list:
if self.allow_abstentions:
# need to check if conditionals are the same
if (first_node in triplet or # skip if it's already in the triplet
first_node[0][-1] != Y_node or # skip if the Y values aren't the same
first_node[1] != expectation[1] or # skip if conditions are different
(len(first_node[0]) > 2 and len(expectation[0]) > 2) or # at most one item in the triplet can have length > 2
first_node in expectations_in_triplets or # we're already computing this
not self._is_separator(expectation[0][:-1], first_node[0][:-1], Y_node)): # not separated
continue
else:
if (first_node in triplet or # skip if it's already in the triplet
first_node[-1] != Y_node or # skip if the Y values aren't the same
(len(first_node) > 2 and len(expectation) > 2) or # at most one item in the triplet can have length > 2
first_node in expectations_in_triplets or # we're already computing this
not self._is_separator(expectation[:-1], first_node[:-1], Y_node)): # not separated
continue
triplet = [expectation, first_node]
# first try looking at the other expectations that we need to estimate
for second_node in exp_to_estimate_list:
if self.allow_abstentions:
if (second_node in triplet or # skip if it's already in the triplet
second_node[0][-1] != Y_node or # skip if the Y values aren't the same
second_node[1] != expectation[1] or # skip if conditions are different
(len(second_node[0]) > 2 and
any(len(exp[0]) > 2 for exp in triplet)) or # at most one item in the triplet can have length > 2
second_node in expectations_in_triplets or # we're already computing this
not all(self._is_separator(exp[0][:-1], second_node[0][:-1], Y_node) for exp in triplet)): # not separated
continue
else:
if (second_node in triplet or # skip if it's already in the triplet
second_node[-1] != Y_node or # skip if the Y values aren't the same
(len(second_node) > 2 and
any(len(exp) > 2 for exp in triplet)) or # at most one item in the triplet can have length > 2
second_node in expectations_in_triplets or # we're already computing this
not all(self._is_separator(exp[:-1], second_node[:-1], Y_node) for exp in triplet)): # not separated
continue
# we found a triplet!
triplet = [expectation, first_node, second_node]
found = True
break
if found:
break
# otherwise, try everything
for second_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes
]:
if self.allow_abstentions:
if (second_node in triplet or # skip if it's already in the triplet
second_node[1] != expectation[1] or # skip if conditions are different
not all(self._is_separator(exp[0][:-1], second_node[0][:-1], Y_node) for exp in triplet)): # not separated
continue
else:
if (second_node in triplet or # skip if it's already in the triplet
not all(self._is_separator(exp[:-1], second_node[:-1], Y_node) for exp in triplet)): # not separated
continue
# we found a triplet!
triplet = [expectation, first_node, second_node]
found = True
break
if found:
break
if not found:
# try everything
for first_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]:
if self.allow_abstentions:
if (first_node in triplet or # skip if it's already in the triplet
first_node[0][0] in expectation[1] or # skip if the node is part of the condition
not self._is_separator(expectation[0][:-1], first_node[0][:-1], Y_node)): # not separated
continue
else:
if (first_node in triplet or # skip if it's already in the triplet
not self._is_separator(expectation[:-1], first_node[:-1], Y_node)): # not separated
continue
triplet = [expectation, first_node]
if found:
break
for second_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]:
if self.allow_abstentions:
if (second_node in triplet or # skip if it's already in the triplet
second_node[0][0] in expectation[1] or # skip if the node is part of the condition
not all(self._is_separator(exp[0][:-1], second_node[0][:-1], Y_node) for exp in triplet)): # not separated
continue
else:
if (second_node in triplet or # skip if it's already in the triplet
not all(self._is_separator(exp[:-1], second_node[:-1], Y_node) for exp in triplet)): # not separated
continue
# we found a triplet!
triplet = [expectation, first_node, second_node]
found = True
break
if found:
break
if found:
triplets.append(triplet)
for expectation in triplet:
expectations_in_triplets.add(expectation)
else:
triplets = self.triplets
all_moments = set()
abstention_probabilities = {}
for exp1, exp2, exp3 in triplets:
if self.allow_abstentions:
condition = exp1[1]
moments = [
tuple(sorted(exp1[0][:-1] + exp2[0][:-1])),
tuple(sorted(exp1[0][:-1] + exp3[0][:-1])),
tuple(sorted(exp2[0][:-1] + exp3[0][:-1]))
]
indices1 = tuple(sorted([ int(node.split('_')[1]) for node in exp1[0][:-1] ]))
indices2 = tuple(sorted([ int(node.split('_')[1]) for node in exp2[0][:-1] ]))
indices3 = tuple(sorted([ int(node.split('_')[1]) for node in exp3[0][:-1] ]))
if indices1 not in abstention_probabilities:
abstention_probabilities[indices1] = 0
if indices2 not in abstention_probabilities:
abstention_probabilities[indices2] = 0
if indices3 not in abstention_probabilities:
abstention_probabilities[indices3] = 0
else:
# first, figure out which moments we need to compute
moments = [
tuple(sorted(exp1[:-1] + exp2[:-1])),
tuple(sorted(exp1[:-1] + exp3[:-1])),
tuple(sorted(exp2[:-1] + exp3[:-1]))
]
for moment in moments:
indices = tuple(sorted([ int(node.split('_')[1]) for node in moment ]))
if indices not in all_moments:
all_moments.add(indices)
return triplets, all_moments, abstention_probabilities
def _triplet_method_mean_median(self, expectations_to_estimate, solve_method):
exp_to_estimate_list = sorted(list(expectations_to_estimate))
triplets = []
if self.triplets is None:
if self.fully_independent_case:
Y_node = 'Y'
all_nodes = [
((node, Y_node), '0') if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]
triplets = [
[i, j, k]
for i in all_nodes
for j in all_nodes if i != j
for k in all_nodes if i != k and k != j
] + [
[expectation, -1, -1] for expectation in exp_to_estimate_list
]
else:
for expectation in exp_to_estimate_list:
if not self.allow_abstentions:
Y_node = expectation[-1]
else:
Y_node = expectation[0][-1]
triplet = [expectation]
# try everything
for first_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]:
if self.allow_abstentions:
if (first_node in triplet or # skip if it's already in the triplet
first_node[0][0] in expectation[1] or # skip if the node is part of the condition
not self._is_separator(expectation[0][:-1], first_node[0][:-1], Y_node)): # not separated
continue
else:
if (first_node in triplet or # skip if it's already in the triplet
not self._is_separator(expectation[:-1], first_node[:-1], Y_node)): # not separated
continue
triplet = [expectation, first_node]
for second_node in [
((node, Y_node), expectation[1]) if self.allow_abstentions else (node, Y_node)
for node in self.nodes if 'Y' not in node
]:
if self.allow_abstentions:
if (second_node in triplet or # skip if it's already in the triplet
second_node[0][0] in expectation[1] or # skip if the node is part of the condition
not all(self._is_separator(exp[0][:-1], second_node[0][:-1], Y_node) for exp in triplet)): # not separated
continue
else:
if (second_node in triplet or # skip if it's already in the triplet
not all(self._is_separator(exp[:-1], second_node[:-1], Y_node) for exp in triplet)): # not separated
continue
if tuple([expectation, second_node, first_node]) in triplets:
continue
# we found a triplet!
triplet = [expectation, first_node, second_node]
triplets.append(tuple(triplet))
triplet = [expectation, first_node]
triplet = [expectation]
else:
triplets = self.triplets
all_moments = set()
abstention_probabilities = {}
if self.fully_independent_case:
all_nodes = list(range(self.m))
all_moments = set([
(i, j)
for i in all_nodes
for j in all_nodes if i != j
])
if self.allow_abstentions:
for node in all_nodes:
abstention_probabilities[tuple([node])] = 0
else:
for exp1, exp2, exp3 in triplets:
if self.allow_abstentions:
condition = exp1[1]
moments = [
tuple(sorted(exp1[0][:-1] + exp2[0][:-1])),
tuple(sorted(exp1[0][:-1] + exp3[0][:-1])),
tuple(sorted(exp2[0][:-1] + exp3[0][:-1]))
]
indices1 = tuple(sorted([ int(node.split('_')[1]) for node in exp1[0][:-1] ]))
indices2 = tuple(sorted([ int(node.split('_')[1]) for node in exp2[0][:-1] ]))
indices3 = tuple(sorted([ int(node.split('_')[1]) for node in exp3[0][:-1] ]))
if indices1 not in abstention_probabilities:
abstention_probabilities[indices1] = 0
if indices2 not in abstention_probabilities:
abstention_probabilities[indices2] = 0
if indices3 not in abstention_probabilities:
abstention_probabilities[indices3] = 0
else:
# first, figure out which moments we need to compute
moments = [
tuple(sorted(exp1[:-1] + exp2[:-1])),
tuple(sorted(exp1[:-1] + exp3[:-1])),
tuple(sorted(exp2[:-1] + exp3[:-1]))
]
for moment in moments:
indices = tuple(sorted([ int(node.split('_')[1]) for node in moment ]))
if indices not in all_moments:
all_moments.add(indices)
return triplets, all_moments, abstention_probabilities
def _triplet_method_preprocess(self, expectations_to_estimate, solve_method):
if solve_method == 'triplet':
return self._triplet_method_single_seed(expectations_to_estimate)
elif solve_method in [ 'triplet_mean', 'triplet_median' ]:
return self._triplet_method_mean_median(expectations_to_estimate, solve_method)
else:
raise NotImplemented('Unknown solve method {}'.format(solve_method))
def _triplet_method_probabilities(self, triplets, lambda_moment_vals, lambda_zeros,
abstention_probabilities, sign_recovery, solve_method):
expectation_values = {}
if solve_method == 'triplet':
pass
else:
# each triplet is constructed for the first value in the expectation
# get all the triplets with the same first value, and take the mean or median
expectation_value_candidates = {}
if self.fully_independent_case and solve_method in ['triplet_mean', 'triplet_median']:
second_moment = np.zeros((self.m, self.m))
for key in lambda_moment_vals:
i, j = key
second_moment[i][j] = lambda_moment_vals[(i, j)]
def all_triplet_vals(idx):
triplet_vals = []
for i in range(self.m):
if i == idx:
continue
for j in range(i):
if j == idx:
continue
val = math.sqrt(abs(
(second_moment[idx, i] * second_moment[idx, j] / second_moment[i, j])
if second_moment[i, j] != 0 else 0
))
triplet_vals.append(val)
return triplet_vals
all_vals = [all_triplet_vals(idx) for idx in range(self.m)]
expectations_to_estimate = [
expectation
for expectation, a, b in triplets if a == -1 and b == -1
]
for expectation in expectations_to_estimate:
if self.allow_abstentions:
idx = int(expectation[0][0].split('_')[1])
else:
idx = int(expectation[0].split('_')[1])
expectation_value_candidates[expectation] = all_vals[idx]
else:
for exp1, exp2, exp3 in triplets:
if self.allow_abstentions:
moments = [
tuple(sorted(exp1[0][:-1] + exp2[0][:-1])),
tuple(sorted(exp1[0][:-1] + exp3[0][:-1])),
tuple(sorted(exp2[0][:-1] + exp3[0][:-1]))
]
else:
# first, figure out which moments we need to compute
moments = [
tuple(sorted(exp1[:-1] + exp2[:-1])),
tuple(sorted(exp1[:-1] + exp3[:-1])),
tuple(sorted(exp2[:-1] + exp3[:-1]))
]
moment_vals = [
lambda_moment_vals[
tuple(sorted([ int(node.split('_')[1]) for node in moment ]))
]
for moment in moments
]
if solve_method == 'triplet':
expectation_values[exp1] = (
math.sqrt(abs(moment_vals[0] * moment_vals[1] / moment_vals[2])) if moment_vals[2] != 0 else 0)
expectation_values[exp2] = (
math.sqrt(abs(moment_vals[0] * moment_vals[2] / moment_vals[1])) if moment_vals[1] != 0 else 0)
expectation_values[exp3] = (
math.sqrt(abs(moment_vals[1] * moment_vals[2] / moment_vals[0])) if moment_vals[0] != 0 else 0)
else:
if exp1 not in expectation_value_candidates:
expectation_value_candidates[exp1] = []
exp_value = (
math.sqrt(abs(moment_vals[0] * moment_vals[1] / moment_vals[2])) if moment_vals[2] != 0 else 0)
expectation_value_candidates[exp1].append(exp_value)
if solve_method in ['triplet_mean', 'triplet_median']:
for exp in expectation_value_candidates:
if solve_method == 'triplet_mean':
agg_function = np.mean
if solve_method == 'triplet_median':
agg_function = np.median
expectation_values[exp] = agg_function(expectation_value_candidates[exp])
self.expectation_value_candidates = expectation_value_candidates
if sign_recovery == 'all_positive':
# all signs are already positive
pass
else:
print('{} sign recovery not implemented'.format(sign_recovery))
return
if self.allow_abstentions:
# probability is 0.5 * (1 + expectation - P(lambda part of factor is zero)) * P(conditional)
# P(conditional) is 1 if there is no conditional
probabilities = {}
for expectation in sorted(list(expectation_values.keys())):
exp_value = expectation_values[expectation]
if expectation[1][0] == '0':
condition_prob = 1
else:
zero_condition = tuple(sorted([ int(node.split('_')[1]) for node in expectation[1] ]))
condition_prob = lambda_zeros[zero_condition]
lambda_factor = tuple(sorted([ int(node.split('_')[1]) for node in expectation[0][:-1] ]))
abstention_prob = abstention_probabilities[lambda_factor]
probabilities[expectation] = 0.5 * (1 + exp_value - abstention_prob) * condition_prob
else:
probabilities = {
expectation: 0.5 * (1 + expectation_values[expectation])
for expectation in sorted(list(expectation_values.keys()))
}
return probabilities, expectation_values
|
flyingsquid-master
|
flyingsquid/_triplets.py
|
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class Mixin:
'''
Functions to check whether we can solve this graph structure.
'''
def _is_separator(self, srcSet, dstSet, separatorSet):
'''Check if separatorSet separates srcSet from dstSet.
Tries to find a path from some node in srcSet to some node in dstSet that doesn't
pass through separatorSet. If successful, return False. Otherwise, return True.
'''
def neighbors(node):
neighbor_set = set()
for edge in self.G.edges:
if edge[0] == node:
neighbor_set.add(edge[1])
if edge[1] == node:
neighbor_set.add(edge[0])
return list(neighbor_set)
visited = set()
for srcNode in srcSet:
if srcNode in dstSet:
return False
queue = [srcNode]
curNode = srcNode
while len(queue) > 0:
curNode = queue.pop()
if curNode not in visited:
visited.add(curNode)
else:
continue
for neighbor in neighbors(curNode):
if neighbor == srcNode:
continue
if neighbor in dstSet:
return False
if neighbor in separatorSet:
continue
if neighbor not in visited:
queue.append(neighbor)
return True
def _check(self):
'''Check to make sure we can solve this.
Checks:
* For each node or separator set in the junction tree:
There is either only one Y node in the clique, or the clique is made up entirely of Y nodes, since
we can only estimate marginals where there is at most one Y, unless the entire marginal is
made up of Y's)
* For each node or separator set in the junction tree that contains at least one
lambda node and exactly one Y node:
The Y node separates the lambda's from at least two other lambda nodes, that are themselves
separated by Y. To estimate the marginal mu(lambda_i, ..., lambda_j, Y_k), we need to find
lambda_a, lambda_b such that lambda_a, lambda_b, and the joint (lambda_i, ..., lambda_j) are
independent conditioned on Y_k. This amounts to Y_k separating lambda_a, lambda_b, and
(lambda_i, ..., lambda_j). Note that lambda_i, ..., lambda_j do not have to be separated by Y_k.
Outputs: True if we can solve this, False otherwise.
'''
def num_Ys(nodes):
return len([
node for node in nodes if 'Y' in node
])
def num_lambdas(nodes):
return len([
node for node in nodes if 'lambda' in node
])
def estimatable_clique(clique):
y_count = num_Ys(clique)
lambda_count = num_lambdas(clique)
return y_count <= 1 or lambda_count == 0
for clique in self.junction_tree.nodes:
if not estimatable_clique(clique):
return False, "We can't estimate {}!".format(clique)
for separator_set in self.separator_sets:
if not estimatable_clique(clique):
return False, "We can't estimate {}!".format(separator_set)
# for each marginal we need to estimate, check if there is a valid triplet
marginals = sorted(list(self.junction_tree.nodes) + list(self.separator_sets))
for marginal in marginals:
y_count = num_Ys(marginal)
lambda_count = num_lambdas(marginal)
if y_count != 1:
continue
if lambda_count == 0:
continue
separator_y = [node for node in marginal if 'Y' in node]
lambdas = [node for node in marginal if 'lambda' in node]
found = False
for first_node in self.nodes:
if 'Y' in first_node or first_node in lambdas:
continue
for second_node in self.nodes:
if 'Y' in second_node or second_node in lambdas:
continue
if (self._is_separator(lambdas, [first_node], separator_y) and
self._is_separator(lambdas, [second_node], separator_y) and
self._is_separator([first_node], [second_node], separator_y)):
found = True
break
if found:
break
if not found:
print('Could not find triplet for {}!'.format(marginal))
return False
return True
|
flyingsquid-master
|
flyingsquid/_graphs.py
|
from flyingsquid.label_model import LabelModel
import torch
import torch.nn as nn
class FSLoss(nn.Module):
'''
Expose FlyingSquid as a loss function.
The loss function takes sequences: one sequence of outputs of your end model,
and another sequence of LF votes.
Let `v` be the length of the sequence.
We will compute BCEWithLogitsLoss, averaged over every element of the
sequence (and over every sequence in the batch).
Let `m` be the number of labeling functions.
Let `batch_size` be the size of your batch during training.
The loss function will take two arguments: `outputs` and `weak_labels`.
* The shape of `outputs` will be `batch_size * v`
* The shape of `weak_labels` will be `batch_size * v * m`
```
# outputs will be batch_size * v
# weak_labels will be batch_size * v * m
loss(outputs, weak_labels)
```
The loss function will keep a buffer of N sequences of previous weak labels
that it's seen.
Each step, the loss function does the following:
* For each sequence in the batch (zip over everything in outputs and weak_labels):
* Add the sequence from `weak_labels` to the buffer (kicking out the oldest
items in the buffer)
* Use the triplet method over everything in the buffer (buffer needs to be on
the CPU) to get probabilistic labels, a tensor of shape `T` (put the tensor
onto device)
* For each element in the sequence, compute `BCEWithLogitsLoss` between the
output and the probabilistic label
* Return the average over losses in the sequence
When the dataloader isn't shuffling data, this amounts to "streaming"
Args:
m: number of LF's
v: number of Y tasks
task_deps: edges between the tasks. (i, j) in y_edges means that
there is an edge between y_i and y_j.
lf_task_deps: edges between LF's and tasks. (i, j) in lambda_y_edges
means that there is an edge between lambda_i and y_j.
lf_deps: edges between LF's. (i, j) in lambda_edges means that
there is an edge between lambda_i and lambda_j.
cb: the class balance
allow_abstentions: if True, all abstentions in LF votes
device: which device to store the loss/gradients
buffer_capacity: how many sequences of LF's to cache
update_frequency: how often to retrain the label model
clamp_vals: if True, clamp the probabilities out of FlyingSquid to 0.
or 1.
triplets: if specified, use this set of triplets for the triplet method
pos_weight: if specified, set the weight of the positive class to this
in BCEWithLogitsLoss
Example::
T = ... # length of a sequence
m = m_per_task * T # m_per_task LF's per frame
# this creates a new triplet label model under the hood
criterion = FSLoss(
m, T,
[(i, i + 1) for i in range(T - 1)], # chain dependencies for tasks
[(i + m_per_task * j, j) # LF's have dependencies to the frames they vote on
for i in range(m_per_task) for j in range(v)],
[], # no dependencies between LF's
cb = ... # pass in class balance if you need to
)
model = ... # end model
frame_sequence = [...] # sequence of T frames
lf_votes = [...] # (T, m) vector of LF votes
model_outputs = [ # run the model on each frame
model(frame)
for frame in frame_sequence
]
# This caches the votes in lf_votes, retrains the label model if necessary, and
# generates probabilistic labels for each frame from the LF votes.
# Then, `BCEWithLogitsLoss` on the model outputs and probabilistic labels is used
# to generate the loss value that can be backpropped.
loss = criterion(
torch.tensor([model_outputs]),
torch.tensor([lf_votes])
)
loss.backward()
'''
def __init__(self, m, v=1, task_deps=[], lf_task_deps=[], lf_deps=[],
Y_dev=None, cb=None, allow_abstentions = True, device='cpu',
buffer_capacity=100, update_frequency=10, clamp_vals=False,
triplets=None, pos_weight=None):
super(WSLoss, self).__init__()
self.m = m
self.v = v
self.task_deps = task_deps
self.lf_task_deps = lf_task_deps
if self.lf_task_deps == []:
self.lf_task_deps = [(i, 0) for i in range(m)]
self.lf_deps = lf_deps
self.Y_dev = Y_dev
self.cb = cb
self.device = device
self.clamp_vals = clamp_vals
self.lm = LabelModel(m, v=v, y_edges=task_deps, lambda_y_edges=lf_task_deps,
lambda_edges=lf_deps, allow_abstentions = allow_abstentions,
triplets=triplets)
self.criterion = nn.BCEWithLogitsLoss() if pos_weight is None else nn.BCEWithLogitsLoss(pos_weight = pos_weight)
self.buffer_capacity = buffer_capacity
self.update_frequency = update_frequency
# register buffer for LF outputs
self.register_buffer('lf_buffer', torch.zeros((buffer_capacity, m), dtype=torch.long))
# register buffer to keep track of how many items
self.register_buffer('buffer_size', torch.zeros(1, dtype=torch.long))
# reigster buffer to keep track of where you are
self.register_buffer('buffer_index', torch.zeros(1, dtype=torch.long))
def forward(self, predictions, weak_labels, update_frequency = None):
'''
Generate probabilistic labels from the weak labels, and use `BCEWithLogitsLoss` to
get the actual loss value for end model training.
Also caches the LF votes, and re-trains the label model if necessary (depending on
update_frequency).
Args:
predictions: A (batch_size, v)-sized tensor of model outputs. For sequences,
v is usually the length of the sequence.
weak_labels: A (batch_size, m)-sized tensor of weak labels.
Returns:
Computes BCEWithLogitsLoss on every item in the batch (for each item, computes it
between the v model outputs and the v probabilistic labels), and returns the
average.
'''
update_frequency = update_frequency if update_frequency else self.update_frequency
output = torch.tensor(0., requires_grad=True, device=self.device)
for i, (prediction, label_vector) in enumerate(zip(predictions, weak_labels)):
self.lf_buffer[self.buffer_index] = label_vector
if self.buffer_size < self.buffer_capacity:
self.buffer_size += 1
if (self.buffer_index % update_frequency) == 0:
L_train = self.lf_buffer.cpu().numpy()[:self.buffer_size]
self.lm.fit(
L_train,
Y_dev = self.Y_dev,
class_balance = self.cb
)
self.buffer_index += 1
if self.buffer_index == self.buffer_capacity:
self.buffer_index = torch.tensor(0)
labels = self.lm.predict_proba_marginalized(
[label_vector.cpu().numpy()], verbose=False)
if self.clamp_vals:
labels[0] = [1. if pred >= 0.5 else 0. for pred in labels[0]]
label_tensor = torch.tensor(labels[0], requires_grad=True, device=self.device).view(prediction.shape)
output = output + self.criterion(
prediction,
label_tensor)
return output / predictions.shape[0]
class MajorityVoteLoss(nn.Module):
'''
Expose majority vote as a loss function (for baselines).
Let `m` be the number of labeling functions.
Let `batch_size` be the size of your batch during training.
The loss function will take two arguments: `outputs` and `weak_labels`.
* The shape of `outputs` will be `batch_size`
* The shape of `weak_labels` will be `batch_size * m`
```
# outputs will be batch_size
# weak_labels will be batch_size * m
loss(outputs, weak_labels)
```
The loss function will keep a buffer of N sequences of previous weak labels
that it's seen.
Each step, the loss function does the following:
* For each sequence in the batch (zip over everything in outputs and weak_labels):
* Add the sequence from `weak_labels` to the buffer (kicking out the oldest
items in the buffer)
* Use the triplet method over everything in the buffer (buffer needs to be on
the CPU) to get probabilistic labels, a tensor of shape `T` (put the tensor
onto device)
* For each element in the sequence, compute `BCEWithLogitsLoss` between the
output and the probabilistic label
* Return the average over losses in the sequence
When the dataloader isn't shuffling data, this amounts to "streaming"
Args:
device: which device to store the loss/gradients
Example::
m = ... # number of LF's
# this creates a new triplet label model under the hood
criterion = MajorityVoteLoss(
device = ...
)
model = ... # end model
frame_sequence = [...] # sequence of T frames
lf_votes = [...] # (T, m) vector of LF votes
model_outputs = [ # run the model on each frame
model(frame)
for frame in frame_sequence
]
# This caches the votes in lf_votes, retrains the label model if necessary, and
# generates probabilistic labels for each frame from the LF votes.
# Then, `BCEWithLogitsLoss` on the model outputs and probabilistic labels is used
# to generate the loss value that can be backpropped.
loss = criterion(
torch.tensor(model_outputs),
torch.tensor(lf_votes)
)
loss.backward()
'''
def __init__(self, device='cpu', pos_weight=None):
super(MajorityVoteLoss, self).__init__()
self.criterion = nn.BCEWithLogitsLoss() if pos_weight is None else nn.BCEWithLogitsLoss(pos_weight = pos_weight)
self.device = device
def forward(self, predictions, weak_labels, update_frequency = None):
'''
Generate probabilistic labels from the weak labels, and use `BCEWithLogitsLoss` to
get the actual loss value for end model training.
Also caches the LF votes, and re-trains the label model if necessary (depending on
update_frequency).
Args:
predictions: A (batch_size)-sized tensor of model outputs.
weak_labels: A (batch_size, m)-sized tensor of weak labels.
Returns:
Computes BCEWithLogitsLoss on every item in the batch (for each item, computes it
between the v model outputs and the v probabilistic labels), and returns the
average.
'''
output = torch.tensor(0., requires_grad=True, device=self.device)
for i, (prediction, label_vector) in enumerate(zip(predictions, weak_labels)):
label = (np.sum(label_vector.cpu().numpy()) > 0).astype(float)
label_tensor = torch.tensor(label, requires_grad=True, device=self.device).view(prediction.shape)
output = output + self.criterion(
prediction,
label_tensor)
return output / predictions.shape[0]
|
flyingsquid-master
|
flyingsquid/pytorch_loss.py
|
flyingsquid-master
|
flyingsquid/__init__.py
|
|
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class Mixin:
'''
Functions to compute label model parameters from mean parameters.
'''
def _generate_e_vector(self, clique):
'''
The e vector is a vector of assignments for a particular marginal.
For example, in a marginal with one LF and one Y variable, and no
abstentions, the e vector entries are:
[
(1, 1),
(1, -1),
(-1, 1),
(-1, -1)
]
The first entry of each tuple is the value of the LF, the second
entry is the value of the Y variagble.
In a marginal with two LFs and one Y variable and no abstentions,
the entries are:
[
(1, 1, 1),
(1, 1, -1),
(1, -1, 1),
(1, -1, -1),
(-1, 1, 1),
(-1, 1, -1),
(-1, -1, 1),
(-1, -1, -1)
]
In a marginal with one Lf, one Y variable, and abstentions:
[
(1, 1),
(0, 1),
(-1, 1),
(1, -1),
(0, -1),
(-1, -1)
]
Two LFs, one Y variable, and abstentions:
[
(1, 1, 1),
(0, 1, 1),
(-1, 1, 1),
(1, 0, 1),
(0, 0, 1),
(-1, 0, 1),
(1, -1, 1),
(0, -1, 1),
(-1, -1, 1),
(1, 1, -1),
(0, 1, -1),
(-1, 1, -1),
(1, 0, -1),
(0, 0, -1),
(-1, 0, -1),
(1, -1, -1),
(0, -1, -1),
(-1, -1, -1)
]
'''
lambda_values = [1, 0, -1] if self.allow_abstentions else [1, -1]
e_vec = [[1], [-1]]
for i in range(len(clique) - 1):
new_e_vec = []
if not self.allow_abstentions:
for new_val in lambda_values:
for e_val in e_vec:
new_e_vec.append(e_val + [new_val])
else:
for e_val in e_vec:
for new_val in lambda_values:
new_e_vec.append([new_val] + e_val)
e_vec = new_e_vec
e_vec = [ tuple(e_val) for e_val in e_vec ]
return e_vec
def _generate_r_vector(self, clique):
'''
The r vector is the vector of probability values that needs to be on the RHS
of the B_matrix * e_vector = r_vector to make e_vector have the right values.
When there are abstentions, the mapping works as follows:
* Each probability is some combination of
P(A * B * ... * C = 1, D = 0, E = 0, ..., F = 0)
* The A, B, ..., C can include any LF, and the Y variable.
* The D, E, ..., F can include any LF
* Let the A, B, ..., C set be called the "equals one set"
* Let the D, E, ..., F set be called the "equals zero set"
* Then, for each entry in the e vector:
* If there is a -1 in an LF spot, add the LF to the "equals zero set"
* If there is a 0 in the LF spot, add the LF to the "equals one set"
* If there is a -1 in the Y variable spot, add it to the "equals one set"
When there are no abstentions, each probability is just defined by the
"equals one set" (i.e., P(A * B * ... * C = 1)).
* For each entry in the e vector:
* If there is a -1 in any spot (LF spot or Y variable), add it to the
"equals one set"
'''
indices = [ int(node.split('_')[1]) for node in clique ]
lf_indices = sorted(indices[:-1])
Y_idx = indices[-1]
Y_val = 'Y_{}'.format(Y_idx)
e_vec = self._generate_e_vector(clique)
r_vec = []
for e_vec_tup in e_vec:
# P(a * b * ... * c = 1) for everything in this array
r_vec_entry_equal_one = []
# P(a = 0, b = 0, ..., c = 0) for everything in this array
r_vec_entry_equal_zero = []
for e_vec_entry, lf_idx in zip(e_vec_tup, lf_indices):
# if you have abstentions, -1 means add to equal zero, 0 means add to equal one
if self.allow_abstentions:
if e_vec_entry == -1:
r_vec_entry_equal_zero.append('lambda_{}'.format(lf_idx))
if e_vec_entry == 0:
r_vec_entry_equal_one.append('lambda_{}'.format(lf_idx))
# otherwise, -1 means add to equal one
else:
if e_vec_entry == -1:
r_vec_entry_equal_one.append('lambda_{}'.format(lf_idx))
if e_vec_tup[-1] == -1:
r_vec_entry_equal_one.append(Y_val)
entries_equal_one = (
tuple(['1']) if len(r_vec_entry_equal_one) == 0 else
tuple(r_vec_entry_equal_one))
entries_equal_zero = (
tuple(['0']) if len(r_vec_entry_equal_zero) == 0 else
tuple(r_vec_entry_equal_zero))
if self.allow_abstentions:
r_vec.append((
entries_equal_one,
entries_equal_zero
))
else:
if len(r_vec_entry_equal_zero) > 0:
print('No abstentions allowed!')
exit(1)
r_vec.append(entries_equal_one)
return r_vec
def _generate_b_matrix(self, clique):
if not self.allow_abstentions:
b_matrix_orig = np.array([[1, 1], [1, -1]])
b_matrix = b_matrix_orig
for i in range(len(clique) - 1):
b_matrix = np.kron(b_matrix, b_matrix_orig)
b_matrix[b_matrix < 0] = 0
return b_matrix
else:
a_zero = np.array([
[1, 1],
[1, 0]
])
b_zero = np.array([
[0, 0],
[0, 1]
])
c_matrix = np.array([
[1, 1, 1],
[1, 0, 0],
[0, 1, 0]
])
d_matrix = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 0, 0]
])
a_i = a_zero
b_i = b_zero
for i in range(len(clique) - 1):
a_prev = a_i
b_prev = b_i
a_i = np.kron(a_prev, c_matrix) + np.kron(b_prev, d_matrix)
b_i = np.kron(a_prev, d_matrix) + np.kron(b_prev, c_matrix)
return a_i
|
flyingsquid-master
|
flyingsquid/_lm_parameters.py
|
from itertools import product
def dict_product(d):
keys = d.keys()
for element in product(*d.values()):
yield dict(zip(keys, element))
|
flyingsquid-master
|
flyingsquid/helpers.py
|
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class Mixin:
'''
Functions to compute observable properties.
'''
def _compute_class_balance(self, class_balance=None, Y_dev=None):
# generate class balance of Ys
Ys_ordered = [ 'Y_{}'.format(i) for i in range(self.v) ]
cardinalities = [ 2 for i in range(self.v) ]
if class_balance is not None:
class_balance = class_balance / sum(class_balance)
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities, class_balance
)
elif Y_dev is not None:
Ys_ordered = [ 'Y_{}'.format(i) for i in range(self.v) ]
vals = { Y: (-1, 1) for Y in Ys_ordered }
Y_vecs = sorted([
[ vec_dict[Y] for Y in Ys_ordered ]
for vec_dict in dict_product(vals)
])
counts = {
tuple(Y_vec): 0
for Y_vec in Y_vecs
}
for data_point in Y_dev:
counts[tuple(data_point)] += 1
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities,
[
float(counts[tuple(Y_vec)]) / len(Y_dev)
for Y_vec in Y_vecs
])
else:
num_combinations = 2 ** self.v
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities, [
1. / num_combinations for i in range(num_combinations)
])
return cb
def _compute_Y_marginals(self, Y_marginals):
for marginal in Y_marginals:
nodes = [ 'Y_{}'.format(idx) for idx in marginal ]
Y_marginals[marginal] = self.cb.marginal_distribution(
nodes,
inplace=False
)
return Y_marginals
def _compute_Y_equals_one(self, Y_equals_one):
# compute from class balance
for factor in Y_equals_one:
nodes = [ 'Y_{}'.format(idx) for idx in factor ]
Y_marginal = self.cb.marginal_distribution(
nodes,
inplace=False
)
vals = { Y: (-1, 1) for Y in nodes }
Y_vecs = sorted([
[ vec_dict[Y] for Y in nodes ]
for vec_dict in dict_product(vals)
])
# add up the probabilities of all the vectors whose values multiply to +1
total_prob = 0
for Y_vec in Y_vecs:
if np.prod(Y_vec) == 1:
vector_prob = Y_marginal.reduce(
[
(Y_i, Y_val if Y_val == 1 else 0)
for Y_i, Y_val in zip(nodes, Y_vec)
],
inplace=False
).values
total_prob += vector_prob
Y_equals_one[factor] = total_prob
return Y_equals_one
|
flyingsquid-master
|
flyingsquid/_observables.py
|
'''
This example code shows how to use the PyTorch integration for online training
(example data loaders and training loop).
This code is only provided as a reference. In a real application, you would
need to load in actual image paths to train over.
'''
from flyingsquid.label_model import LabelModel
from flyingsquid.pytorch_loss import FSLoss
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import Dataset, DataLoader
# Load in the L matrices
L_train = np.load('tutorials/L_train_video.npy')
L_dev = np.load('tutorials/L_dev_video.npy')
Y_dev = np.load('tutorials/Y_dev_video.npy')
# This is where you would load in the images corresponding to the rows
X_paths_train = np.load('....')
X_paths_dev = np.load('....')
# Example dataloader for FSLoss
class ImageFSDataset(Dataset):
def __init__(self, paths, weak_labels, T, gt_labels=None,
transform=None):
self.T = T
self.paths = self.paths[:len(self.paths) - (len(self.paths) % T)]
self.weak_labels = self.weak_labels[:self.weak_labels.shape[0] -
(self.weak_labels.shape[0] % T)]
m_per_task = self.weak_labels.shape[1]
self.transform = transform
n_frames = self.weak_labels.shape[0]
n_seqs = n_frames // T
v = T
m = m_per_task * T
self.data_temporal = {
'paths': np.reshape(self.paths, (n_seqs, v)),
'weak_labels': np.reshape(self.weak_labels, (n_seqs, m))
}
self.gt_labels = gt_labels
if gt_labels is not None:
self.gt_labels = self.gt_labels[:len(self.gt_labels) -
(len(self.gt_labels) % T)]
self.data_temporal['gt_labels'] = np.reshape(self.gt_labels, (n_seqs, v))
def __len__(self):
return self.data_temporal['paths'].shape[0]
def __getitem__(self, idx):
paths_seq = self.data_temporal['paths'][idx]
img_tensors = [
torch.unsqueeze(
self.transform(Image.open(path).convert('RGB')),
dim = 0)
for path in paths_seq
]
weak_labels = self.data_temporal['weak_labels'][idx]
if self.gt_labels is not None:
return (torch.cat(img_tensors),
torch.unsqueeze(torch.tensor(weak_labels), dim=0),
torch.unsqueeze(torch.tensor(self.data_temporal['gt_labels'][idx]), dim = 0))
else:
return torch.cat(img_tensors), torch.unsqueeze(torch.tensor(weak_labels), dim = 0)
# Example training loop
def train_model_online(model, T, criterion, optimizer, dataset):
model.train()
dataset_size = len(dataset) * T
for item in dataset:
image_tensor = item[0]
weak_labels = item[1]
labels = None if dataset.gt_labels is None else item[2]
# zero the parameter gradients
optimizer.zero_grad()
# forward
with torch.set_grad_enabled(True):
outputs = model(inputs)
loss = criterion(torch.unsqueeze(outputs, dim = 0), weak_labels)
# backward + optimize
loss.backward()
optimizer.step()
return model
# Model three frames at a time
v = 3
# Set up the dataset
train_dataset = ImageFSDataset(X_paths_train, L_train, v)
# Set up the loss function
fs_criterion = FSLoss(
m,
v = v,
y_edges = [ (i, i + 1) for i in range(v - 1) ],
lambda_y_edges = [ (i, i // m_per_frame) for i in range(m) ]
)
# Train up a model online
model = models.resnet50(pretrained=True)
num_ftrs = model_online.fc.in_features
model.fc = nn.Linear(num_ftrs, 1)
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model = train_model_online(model, v, fs_criterion, optimizer, dataset)
|
flyingsquid-master
|
examples/03_pytorch_integration.py
|
'''
This example code shows how to train a FlyingSquid label model for video data.
It loads some labeling functions to detect Tennis Rallies from the tutorials
folder, and trains up a label model.
You can run this file from the examples folder.
'''
from flyingsquid.label_model import LabelModel
import numpy as np
L_train = np.load('tutorials/L_train_video.npy')
L_dev = np.load('tutorials/L_dev_video.npy')
Y_dev = np.load('tutorials/Y_dev_video.npy')
# Model three frames at a time
v = 3
# Six labeling functions per frame
m_per_frame = 6
# Total number of labeling functions is m_per_frame * v
m = m_per_frame * v
# Figure out how many sequences we're going to have
n_frames_train = L_train.shape[0]
n_frames_dev = L_dev.shape[0]
n_seqs_train = n_frames_train // v
n_seqs_dev = n_frames_dev // v
# Resize and reshape matrices
L_train_seqs = L_train[:n_seqs_train * v].reshape((n_seqs_train, m))
L_dev_seqs = L_dev[:n_seqs_dev * v].reshape((n_seqs_dev, m))
Y_dev_seqs = Y_dev[:n_seqs_dev * v].reshape((n_seqs_dev, v))
# Create the label model with temporal dependencies
label_model = LabelModel(
m,
v = v,
y_edges = [ (i, i + 1) for i in range(v - 1) ],
lambda_y_edges = [ (i, i // m_per_frame) for i in range(m) ]
)
label_model.fit(L_train_seqs)
probabilistic_labels = label_model.predict_proba_marginalized(L_dev_seqs)
preds = [ 1. if prob > 0.5 else -1. for prob in probabilistic_labels ]
accuracy = np.sum(preds == Y_dev[:n_seqs_dev * v]) / (n_seqs_dev * v)
print('Label model accuracy: {}%'.format(int(100 * accuracy)))
|
flyingsquid-master
|
examples/02_video.py
|
'''
This example code shows a bare-minimum example of how to get FlyingSquid up and
running.
It generates synthetic data from the tutorials folder, and trains up a label
model.
You can run this file from the examples folder.
'''
from flyingsquid.label_model import LabelModel
from tutorials.tutorial_helpers import *
L_train, L_dev, Y_dev = synthetic_data_basics()
m = L_train.shape[1]
label_model = LabelModel(m)
label_model.fit(L_train)
preds = label_model.predict(L_dev).reshape(Y_dev.shape)
accuracy = np.sum(preds == Y_dev) / Y_dev.shape[0]
print('Label model accuracy: {}%'.format(int(100 * accuracy)))
|
flyingsquid-master
|
examples/01_basics.py
|
flyingsquid-master
|
examples/tutorials/__init__.py
|
|
import numpy as np
from numpy.random import seed, rand
import itertools
def exponential_family (lam, y, theta, theta_y):
# without normalization
return np.exp(theta_y * y + y * np.dot(theta, lam))
# create vector describing cumulative distribution of lambda_1, ... lambda_m, Y
def make_pdf(m, v, theta, theta_y, lst):
p = np.zeros(len(lst))
for i in range(len(lst)):
labels = lst[i]
p[i] = exponential_family(labels[0:m], labels[v-1], theta, theta_y)
return p/sum(p)
def make_cdf(pdf):
return np.cumsum(pdf)
# draw a set of lambda_1, ... lambda_m, Y based on the distribution
def sample(lst, cdf):
r = np.random.random_sample()
smaller = np.where(cdf < r)[0]
if len(smaller) == 0:
i = 0
else:
i = smaller.max() + 1
return lst[i]
def generate_data(n, theta, m, theta_y=0):
v = m+1
lst = list(map(list, itertools.product([-1, 1], repeat=v)))
pdf = make_pdf(m, v, theta, theta_y, lst)
cdf = make_cdf(pdf)
sample_matrix = np.zeros((n,v))
for i in range(n):
sample_matrix[i,:] = sample(lst,cdf)
return sample_matrix
def synthetic_data_basics():
seed(0)
n_train = 10000
n_dev = 500
m = 5
theta = [1.5,.5,.2,.2,.05]
abstain_rate = [.8, .88, .28, .38, .45]
train_data = generate_data(n_train, theta, m)
dev_data = generate_data(n_dev, theta, m)
L_train = train_data[:,:-1]
L_dev = dev_data[:,:-1]
Y_dev = dev_data[:,-1]
train_values = rand(n_train * m).reshape(L_train.shape)
dev_values = rand(n_dev * m).reshape(L_dev.shape)
L_train[train_values < (abstain_rate,) * n_train] = 0
L_dev[dev_values < (abstain_rate,) * n_dev] = 0
return L_train, L_dev, Y_dev
def print_statistics(L_dev, Y_dev):
m = L_dev.shape[1]
for i in range(m):
acc = np.sum(L_dev[:,i] == Y_dev)/np.sum(L_dev[:,i] != 0)
abstains = np.sum(L_dev[:,i] == 0)/Y_dev.shape[0]
print('LF {}: Accuracy {}%, Abstain rate {}%'.format(
i, int(acc * 100), int((abstains) * 100)))
|
flyingsquid-master
|
examples/tutorials/tutorial_helpers.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
import operator
from datetime import date
import torch
import torch.nn as nn
#from torch.utils.tensorboard import SummaryWriter
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from data.data_loader import build_data_loader
from utils.config import setup
import utils.saver as saver
from utils.progress import AverageMeter, ProgressMeter, accuracy
import utils.comm as comm
import utils.logging as logging
from evaluate import attentive_nas_eval as attentive_nas_eval
from sampler.attentive_nas_sampler import ArchSampler as ArchSampler
from solver import build_optimizer, build_lr_scheduler
import utils.loss_ops as loss_ops
import models
from copy import deepcopy
import numpy as np
import joblib
from sklearn.ensemble import RandomForestRegressor
parser = argparse.ArgumentParser(description='AttentiveNAS Training')
parser.add_argument('--config-file', default=None, type=str,
help='training configuration')
parser.add_argument('--machine-rank', default=0, type=int,
help='machine rank, distributed setting')
parser.add_argument('--num-machines', default=1, type=int,
help='number of nodes, distributed setting')
parser.add_argument('--dist-url', default="tcp://127.0.0.1:10001", type=str,
help='init method, distributed setting')
logger = logging.get_logger(__name__)
def build_args_and_env(run_args):
assert run_args.config_file and os.path.isfile(run_args.config_file), 'cannot locate config file'
args = setup(run_args.config_file)
args.config_file = run_args.config_file
#load config
assert args.distributed and args.multiprocessing_distributed, 'only support DDP training'
args.distributed = True
args.machine_rank = run_args.machine_rank
args.num_nodes = run_args.num_machines
args.dist_url = run_args.dist_url
args.models_save_dir = os.path.join(args.models_save_dir, args.exp_name)
if not os.path.exists(args.models_save_dir):
os.makedirs(args.models_save_dir)
#backup config file
saver.copy_file(args.config_file, '{}/{}'.format(args.models_save_dir, os.path.basename(args.config_file)))
args.checkpoint_save_path = os.path.join(
args.models_save_dir, 'attentive_nas.pth.tar'
)
args.logging_save_path = os.path.join(
args.models_save_dir, f'stdout.log'
)
return args
def main():
run_args = parser.parse_args()
args = build_args_and_env(run_args)
random.seed(args.seed)
torch.manual_seed(args.seed)
#cudnn.deterministic = True
#warnings.warn('You have chosen to seed training. '
# 'This will turn on the CUDNN deterministic setting, '
# 'which can slow down your training considerably! '
# 'You may see unexpected behavior when restarting '
# 'from checkpoints.')
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.num_nodes
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError
assert args.world_size > 1, 'only support ddp training'
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu # local rank, local machine cuda id
args.local_rank = args.gpu
args.batch_size = args.batch_size_per_gpu
args.batch_size_total = args.batch_size * args.world_size
#rescale base lr
args.lr_scheduler.base_lr = args.lr_scheduler.base_lr * (max(1, args.batch_size_total // 256))
# set random seed, make sure all random subgraph generated would be the same
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu:
torch.cuda.manual_seed(args.seed)
global_rank = args.gpu + args.machine_rank * ngpus_per_node
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=global_rank
)
# Setup logging format.
logging.setup_logging(args.logging_save_path, 'w')
logger.info(f"Use GPU: {args.gpu}, machine rank {args.machine_rank}, num_nodes {args.num_nodes}, \
gpu per node {ngpus_per_node}, world size {args.world_size}")
# synchronize is needed here to prevent a possible timeout after calling
# init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
args.rank = comm.get_rank() # global rank
args.local_rank = args.gpu
torch.cuda.set_device(args.gpu)
# build model
logger.info("=> creating model '{}'".format(args.arch))
model = models.model_factory.create_model(args)
model.cuda(args.gpu)
#build arch sampler
arch_sampler = None
if getattr(args, 'sampler', None):
arch_sampler = ArchSampler(
args.sampler.arch_to_flops_map_file_path, args.sampler.discretize_step, model, None
)
# use sync batchnorm
if getattr(args, 'sync_bn', False):
model.apply(
lambda m: setattr(m, 'need_sync', True))
model = comm.get_parallel_model(model, args.gpu) #local rank
logger.info(model)
criterion = loss_ops.CrossEntropyLossSmooth(args.label_smoothing).cuda(args.gpu)
soft_criterion = loss_ops.KLLossSoft().cuda(args.gpu)
if not getattr(args, 'inplace_distill', True):
soft_criterion = None
## load dataset, train_sampler: distributed
train_loader, val_loader, train_sampler = build_data_loader(args)
args.n_iters_per_epoch = len(train_loader)
logger.info( f'building optimizer and lr scheduler, \
local rank {args.gpu}, global rank {args.rank}, world_size {args.world_size}')
optimizer = build_optimizer(args, model)
lr_scheduler = build_lr_scheduler(args, optimizer)
# optionally resume from a checkpoint
if args.resume:
saver.load_checkpoints(args, model, optimizer, lr_scheduler, logger)
logger.info(args)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
args.curr_epoch = epoch
logger.info('Training lr {}'.format(lr_scheduler.get_lr()[0]))
# train for one epoch
acc1, acc5 = train_epoch(epoch, model, train_loader, optimizer, criterion, args, \
arch_sampler=arch_sampler, soft_criterion=soft_criterion, lr_scheduler=lr_scheduler)
if comm.is_master_process() or args.distributed:
# validate supernet model
validate(
train_loader, val_loader, model, criterion, args
)
if comm.is_master_process():
# save checkpoints
saver.save_checkpoint(
args.checkpoint_save_path,
model,
optimizer,
lr_scheduler,
args,
epoch,
)
def train_epoch(
epoch,
model,
train_loader,
optimizer,
criterion,
args,
arch_sampler=None,
soft_criterion=None,
lr_scheduler=None,
):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
model.train()
end = time.time()
num_updates = epoch * len(train_loader)
for batch_idx, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# total subnets to be sampled
num_subnet_training = max(2, getattr(args, 'num_arch_training', 2))
optimizer.zero_grad()
### compute gradients using sandwich rule ###
# step 1 sample the largest network, apply regularization to only the largest network
drop_connect_only_last_two_stages = getattr(args, 'drop_connect_only_last_two_stages', True)
model.module.sample_max_subnet()
model.module.set_dropout_rate(args.dropout, args.drop_connect, drop_connect_only_last_two_stages) #dropout for supernet
output = model(images)
loss = criterion(output, target)
loss.backward()
with torch.no_grad():
soft_logits = output.clone().detach()
#step 2. sample the smallest network and several random networks
sandwich_rule = getattr(args, 'sandwich_rule', True)
model.module.set_dropout_rate(0, 0, drop_connect_only_last_two_stages) #reset dropout rate
for arch_id in range(1, num_subnet_training):
if arch_id == num_subnet_training-1 and sandwich_rule:
model.module.sample_min_subnet()
else:
# attentive sampling with training loss as the surrogate performance metric
if arch_sampler is not None:
sampling_method = args.sampler.method
if sampling_method in ['bestup', 'worstup']:
target_flops = arch_sampler.sample_one_target_flops()
candidate_archs = arch_sampler.sample_archs_according_to_flops(
target_flops, n_samples=args.sampler.num_trials
)
my_pred_accs = []
for arch in candidate_archs:
model.module.set_active_subnet(**arch)
with torch.no_grad():
my_pred_accs.append(-1.0 * criterion(model(images), target))
if sampling_method == 'bestup':
idx, _ = max(enumerate(my_pred_accs), key=operator.itemgetter(1))
else:
idx, _ = min(enumerate(my_pred_accs), key=operator.itemgetter(1))
model.module.set_active_subnet(**candidate_archs[idx]) #reset
else:
raise NotImplementedError
else:
model.module.sample_active_subnet()
# calcualting loss
output = model(images)
if soft_criterion:
loss = soft_criterion(output, soft_logits)
else:
assert not args.inplace_distill
loss = criterion(output, target)
loss.backward()
#clip gradients if specfied
if getattr(args, 'grad_clip_value', None):
torch.nn.utils.clip_grad_value_(model.parameters(), args.grad_clip_value)
optimizer.step()
#accuracy measured on the local batch
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
corr1, corr5, loss = acc1*args.batch_size, acc5*args.batch_size, loss.item()*args.batch_size #just in case the batch size is different on different nodes
stats = torch.tensor([corr1, corr5, loss, args.batch_size], device=args.gpu)
dist.barrier() # synchronizes all processes
dist.all_reduce(stats, op=torch.distributed.ReduceOp.SUM)
corr1, corr5, loss, batch_size = stats.tolist()
acc1, acc5, loss = corr1/batch_size, corr5/batch_size, loss/batch_size
losses.update(loss, batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
else:
losses.update(loss.item(), images.size(0))
top1.update(acc1, images.size(0))
top5.update(acc5, images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
num_updates += 1
if lr_scheduler is not None:
lr_scheduler.step()
if batch_idx % args.print_freq == 0:
progress.display(batch_idx, logger)
return top1.avg, top5.avg
def validate(
train_loader,
val_loader,
model,
criterion,
args,
distributed = True,
):
subnets_to_be_evaluated = {
'attentive_nas_min_net': {},
'attentive_nas_max_net': {},
}
acc1_list, acc5_list = attentive_nas_eval.validate(
subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
bn_calibration = True,
)
if __name__ == '__main__':
main()
|
AttentiveNAS-main
|
train_attentive_nas.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
from datetime import date
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import models
from utils.config import setup
from utils.flops_counter import count_net_flops_and_params
import utils.comm as comm
import utils.saver as saver
from data.data_loader import build_data_loader
from utils.progress import AverageMeter, ProgressMeter, accuracy
import argparse
parser = argparse.ArgumentParser(description='Test AttentiveNas Models')
parser.add_argument('--config-file', default='./configs/eval_attentive_nas_models.yml')
parser.add_argument('--model', default='a0', type=str, choices=['a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6'])
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
run_args = parser.parse_args()
if __name__ == '__main__':
args = setup(run_args.config_file)
args.model = run_args.model
args.gpu = run_args.gpu
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
args.__dict__['active_subnet'] = args.__dict__['pareto_models'][args.model]
print(args.active_subnet)
train_loader, val_loader, train_sampler = build_data_loader(args)
## init static attentivenas model with weights inherited from the supernet
model = models.model_factory.create_model(args)
model.to(args.gpu)
model.eval()
# bn running stats calibration following Slimmable (https://arxiv.org/abs/1903.05134)
# please consider trying a different random seed if you see a small accuracy drop
with torch.no_grad():
model.reset_running_stats_for_calibration()
for batch_idx, (images, _) in enumerate(train_loader):
if batch_idx >= args.post_bn_calibration_batch_num:
break
images = images.cuda(args.gpu, non_blocking=True)
model(images) #forward only
model.eval()
with torch.no_grad():
criterion = nn.CrossEntropyLoss().cuda()
from evaluate.imagenet_eval import validate_one_subnet
acc1, acc5, loss, flops, params = validate_one_subnet(val_loader, model, criterion, args)
print(acc1, acc5, flops, params)
|
AttentiveNAS-main
|
test_attentive_nas.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from Slimmable - https://github.com/JiahuiYu/slimmable_networks
import torch
class CrossEntropyLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification """
def forward(self, output, target):
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
cross_entropy_loss = -torch.bmm(target, output_log_prob)
return cross_entropy_loss.mean()
class KLLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification
output: output logits of the student network
target: output logits of the teacher network
T: temperature
KL(p||q) = Ep \log p - \Ep log q
"""
def forward(self, output, soft_logits, target=None, temperature=1., alpha=0.9):
output, soft_logits = output / temperature, soft_logits / temperature
soft_target_prob = torch.nn.functional.softmax(soft_logits, dim=1)
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
kd_loss = -torch.sum(soft_target_prob * output_log_prob, dim=1)
if target is not None:
n_class = output.size(1)
target = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
ce_loss = -torch.bmm(target, output_log_prob).squeeze()
loss = alpha*temperature* temperature*kd_loss + (1.0-alpha)*ce_loss
else:
loss = kd_loss
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
class CrossEntropyLossSmooth(torch.nn.modules.loss._Loss):
def __init__(self, label_smoothing=0.1):
super(CrossEntropyLossSmooth, self).__init__()
self.eps = label_smoothing
""" label smooth """
def forward(self, output, target):
n_class = output.size(1)
one_hot = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = one_hot * (1 - self.eps) + self.eps / n_class
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
loss = -torch.bmm(target, output_log_prob)
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
|
AttentiveNAS-main
|
utils/loss_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import builtins
import decimal
import functools
import logging
import os
import sys
from .comm import is_master_process as is_master_proc
def _suppress_print():
"""
Suppresses printing from the current process.
"""
def print_pass(*objects, sep=" ", end="\n", file=sys.stdout, flush=False):
pass
builtins.print = print_pass
def setup_logging(save_path, mode='a'):
"""
Sets up the logging for multiple processes. Only enable the logging for the
master process, and suppress logging for the non-master processes.
"""
if is_master_proc():
# Enable logging for the master process.
logging.root.handlers = []
else:
# Suppress logging for non-master processes.
_suppress_print()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.propagate = False
print_plain_formatter = logging.Formatter(
"[%(asctime)s]: %(message)s",
datefmt="%m/%d %H:%M:%S",
)
fh_plain_formatter = logging.Formatter("%(message)s")
if is_master_proc():
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(print_plain_formatter)
logger.addHandler(ch)
if save_path is not None and is_master_proc():
fh = logging.FileHandler(save_path, mode=mode)
fh.setLevel(logging.DEBUG)
fh.setFormatter(fh_plain_formatter)
logger.addHandler(fh)
def get_logger(name):
"""
Retrieve the logger with the specified name or, if name is None, return a
logger which is the root logger of the hierarchy.
Args:
name (string): name of the logger.
"""
return logging.getLogger(name)
|
AttentiveNAS-main
|
utils/logging.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# implementation adapted from Slimmable: https://github.com/JiahuiYu/slimmable_networks
"""config utilities for yml file."""
import os
import sys
import yaml
class LoaderMeta(type):
"""Constructor for supporting `!include`.
"""
def __new__(mcs, __name__, __bases__, __dict__):
"""Add include constructer to class."""
# register the include constructor on the class
cls = super().__new__(mcs, __name__, __bases__, __dict__)
cls.add_constructor('!include', cls.construct_include)
return cls
class Loader(yaml.SafeLoader, metaclass=LoaderMeta):
"""YAML Loader with `!include` constructor.
"""
def __init__(self, stream):
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(self, node):
"""Include file referenced at node."""
filename = os.path.abspath(
os.path.join(self._root, self.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, Loader)
else:
return ''.join(f.readlines())
class AttrDict(dict):
"""Dict as attribute trick.
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, dict):
self.__dict__[key] = AttrDict(value)
elif isinstance(value, list):
if isinstance(value[0], dict):
self.__dict__[key] = [AttrDict(item) for item in value]
else:
self.__dict__[key] = value
def yaml(self):
"""Convert object to yaml dict and return.
"""
yaml_dict = {}
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
yaml_dict[key] = value.yaml()
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
new_l = []
for item in value:
new_l.append(item.yaml())
yaml_dict[key] = new_l
else:
yaml_dict[key] = value
else:
yaml_dict[key] = value
return yaml_dict
def __repr__(self):
"""Print all variables.
"""
ret_str = []
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
ret_str.append('{}:'.format(key))
child_ret_str = value.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
ret_str.append('{}:'.format(key))
for item in value:
# treat as AttrDict above
child_ret_str = item.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
else:
ret_str.append('{}: {}'.format(key, value))
else:
ret_str.append('{}: {}'.format(key, value))
return '\n'.join(ret_str)
class Config(AttrDict):
def __init__(self, filename=None):
with open(filename, 'r') as f:
cfg_dict = yaml.load(f, Loader)
super(Config, self).__init__(cfg_dict)
def setup(config_file):
assert os.path.isfile(config_file), 'cannot locate {}'.format(config_file)
return Config(config_file)
|
AttentiveNAS-main
|
utils/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import logging
import pickle
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_my_model(model):
if isinstance(model, nn.DataParallel):
return model.module
return model
def is_master_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def get_parallel_model(model, device):
if get_world_size() >= 1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[device], find_unused_parameters=True
)
else:
raise NotImplementedError
return model
def reduce_eval_results(summary, gpu):
summary = summary + "".join([" "] * (2000-len(summary)))
#send summary to rank 0
summary = torch.tensor([ord(c) for c in summary]).cuda(gpu)
summary_list = [torch.zeros_like(summary) for _ in range(dist.get_world_size())]
dist.all_gather(summary_list, summary)
group = []
for _i in range(dist.get_world_size()):
s = "".join([chr(c) for c in summary_list[_i]])
group.append(eval(s))
return group
|
AttentiveNAS-main
|
utils/comm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from copy import deepcopy
import torch
import os
import shutil
import joblib
def copy_file(source_path, target_path):
shutil.copyfile(source_path, target_path)
def save_acc_predictor(args, acc_predictor):
args.curr_acc_predictor_path = os.path.join(args.models_save_dir, f'acc_predictor_{args.curr_epoch}.joblib')
with open(args.curr_acc_predictor_path, 'wb') as fp:
joblib.dump(acc_predictor, fp)
def load_acc_predictor(args, predictor_saved_path=None):
if predictor_saved_path is None:
predictor_saved_path = args.curr_acc_predictor_path
with open(predictor_saved_path, 'rb') as fp:
acc_predictor = joblib.load(fp)
return acc_predictor
def save_checkpoint(save_path, model, optimizer, lr_scheduler, args, epoch, is_best=False):
save_state = {
'epoch': epoch + 1,
'args': args,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict()
}
best_model_path = os.path.join(
os.path.dirname(save_path),
'best_{}'.format(os.path.basename(save_path))
)
with open(save_path, 'wb') as f:
torch.save(save_state, f, _use_new_zipfile_serialization=False)
if is_best:
copy_file(save_path, best_model_path)
def load_checkpoints(args, model, optimizer=None, lr_scheduler=None, logger=None):
resume_path = args.resume
assert os.path.isfile(resume_path), "=> no checkpoint found at '{}'".format(resume_path)
with open(resume_path, 'rb') as f:
checkpoint = torch.load(f, map_location=torch.device('cpu'))
if logger:
logger.info("=> loading checkpoint '{}'".format(resume_path))
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
resume_with_a_different_optimizer = getattr(args, 'resume_with_a_different_optimizer', False)
resume_with_a_different_lr_scheduler = getattr(args, 'resume_with_a_different_lr_scheduler', False)
if optimizer and not resume_with_a_different_optimizer:
optimizer.load_state_dict(checkpoint['optimizer'])
if lr_scheduler and not resume_with_a_different_optimizer and not resume_with_a_different_lr_scheduler:
# use lr_scheduler settings defined in args
skip_keys = list(args.lr_scheduler.__dict__.keys()) + ['clamp_lr']
for k in skip_keys:
if k in checkpoint['lr_scheduler']:
checkpoint['lr_scheduler'].pop(k)
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
elif lr_scheduler is not None:
# reset lr_scheduler start epoch only
lr_scheduler.step(checkpoint['lr_scheduler']['last_epoch'])
if logger:
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(resume_path, checkpoint['epoch']))
del checkpoint
|
AttentiveNAS-main
|
utils/saver.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from OFA - https://github.com/mit-han-lab/once-for-all
import torch
import torch.nn as nn
import copy
multiply_adds = 1
def count_convNd(m, _, y):
cin = m.in_channels
kernel_ops = m.weight.size()[2] * m.weight.size()[3]
ops_per_element = kernel_ops
output_elements = y.nelement()
# cout x oW x oH
total_ops = cin * output_elements * ops_per_element // m.groups
m.total_ops = torch.Tensor([int(total_ops)])
def count_linear(m, _, __):
total_ops = m.in_features * m.out_features
m.total_ops = torch.Tensor([int(total_ops)])
register_hooks = {
nn.Conv1d: count_convNd,
nn.Conv2d: count_convNd,
nn.Conv3d: count_convNd,
######################################
nn.Linear: count_linear,
######################################
nn.Dropout: None,
nn.Dropout2d: None,
nn.Dropout3d: None,
nn.BatchNorm2d: None,
}
def profile(model, input_size=(1, 3, 224, 224), custom_ops=None):
handler_collection = []
custom_ops = {} if custom_ops is None else custom_ops
def add_hooks(m_):
if len(list(m_.children())) > 0:
return
m_.register_buffer('total_ops', torch.zeros(1))
m_.register_buffer('total_params', torch.zeros(1))
for p in m_.parameters():
m_.total_params += torch.Tensor([p.numel()])
m_type = type(m_)
fn = None
if m_type in custom_ops:
fn = custom_ops[m_type]
elif m_type in register_hooks:
fn = register_hooks[m_type]
else:
# print("Not implemented for ", m_)
pass
if fn is not None:
# print("Register FLOP counter for module %s" % str(m_))
_handler = m_.register_forward_hook(fn)
handler_collection.append(_handler)
original_device = model.parameters().__next__().device
training = model.training
model.eval()
model.apply(add_hooks)
x = torch.zeros(input_size).to(original_device)
with torch.no_grad():
model(x)
total_ops = 0
total_params = 0
for m in model.modules():
if len(list(m.children())) > 0: # skip for non-leaf module
continue
total_ops += m.total_ops
total_params += m.total_params
total_ops = total_ops.item()
total_params = total_params.item()
model.train(training)
model.to(original_device)
for handler in handler_collection:
handler.remove()
return total_ops, total_params
def count_net_flops_and_params(net, data_shape=(1, 3, 224, 224)):
if isinstance(net, nn.DataParallel):
net = net.module
net = copy.deepcopy(net)
flop, nparams = profile(net, data_shape)
return flop /1e6, nparams /1e6
|
AttentiveNAS-main
|
utils/flops_counter.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import torch
import torch.nn as nn
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch, logger=None):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
if logger is None:
print('\t'.join(entries))
else:
logger.info('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum() #sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
AttentiveNAS-main
|
utils/progress.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from OFA: https://github.com/mit-han-lab/once-for-all
import copy
import random
import collections
import math
import torch
import torch.nn as nn
from .modules.dynamic_layers import DynamicMBConvLayer, DynamicConvBnActLayer, DynamicLinearLayer, DynamicShortcutLayer
from .modules.static_layers import MobileInvertedResidualBlock
from .modules.nn_utils import make_divisible, int2list
from .modules.nn_base import MyNetwork
from .attentive_nas_static_model import AttentiveNasStaticModel
class AttentiveNasDynamicModel(MyNetwork):
def __init__(self, supernet, n_classes=1000, bn_param=(0., 1e-5)):
super(AttentiveNasDynamicModel, self).__init__()
self.supernet = supernet
self.n_classes = n_classes
self.use_v3_head = getattr(self.supernet, 'use_v3_head', False)
self.stage_names = ['first_conv', 'mb1', 'mb2', 'mb3', 'mb4', 'mb5', 'mb6', 'mb7', 'last_conv']
self.width_list, self.depth_list, self.ks_list, self.expand_ratio_list = [], [], [], []
for name in self.stage_names:
block_cfg = getattr(self.supernet, name)
self.width_list.append(block_cfg.c)
if name.startswith('mb'):
self.depth_list.append(block_cfg.d)
self.ks_list.append(block_cfg.k)
self.expand_ratio_list.append(block_cfg.t)
self.resolution_list = self.supernet.resolutions
self.cfg_candidates = {
'resolution': self.resolution_list ,
'width': self.width_list,
'depth': self.depth_list,
'kernel_size': self.ks_list,
'expand_ratio': self.expand_ratio_list
}
#first conv layer, including conv, bn, act
out_channel_list, act_func, stride = \
self.supernet.first_conv.c, self.supernet.first_conv.act_func, self.supernet.first_conv.s
self.first_conv = DynamicConvBnActLayer(
in_channel_list=int2list(3), out_channel_list=out_channel_list,
kernel_size=3, stride=stride, act_func=act_func,
)
# inverted residual blocks
self.block_group_info = []
blocks = []
_block_index = 0
feature_dim = out_channel_list
for stage_id, key in enumerate(self.stage_names[1:-1]):
block_cfg = getattr(self.supernet, key)
width = block_cfg.c
n_block = max(block_cfg.d)
act_func = block_cfg.act_func
ks = block_cfg.k
expand_ratio_list = block_cfg.t
use_se = block_cfg.se
self.block_group_info.append([_block_index + i for i in range(n_block)])
_block_index += n_block
output_channel = width
for i in range(n_block):
stride = block_cfg.s if i == 0 else 1
if min(expand_ratio_list) >= 4:
expand_ratio_list = [_s for _s in expand_ratio_list if _s >= 4] if i == 0 else expand_ratio_list
mobile_inverted_conv = DynamicMBConvLayer(
in_channel_list=feature_dim,
out_channel_list=output_channel,
kernel_size_list=ks,
expand_ratio_list=expand_ratio_list,
stride=stride,
act_func=act_func,
use_se=use_se,
channels_per_group=getattr(self.supernet, 'channels_per_group', 1)
)
shortcut = DynamicShortcutLayer(feature_dim, output_channel, reduction=stride)
blocks.append(MobileInvertedResidualBlock(mobile_inverted_conv, shortcut))
feature_dim = output_channel
self.blocks = nn.ModuleList(blocks)
last_channel, act_func = self.supernet.last_conv.c, self.supernet.last_conv.act_func
if not self.use_v3_head:
self.last_conv = DynamicConvBnActLayer(
in_channel_list=feature_dim, out_channel_list=last_channel,
kernel_size=1, act_func=act_func,
)
else:
expand_feature_dim = [f_dim * 6 for f_dim in feature_dim]
self.last_conv = nn.Sequential(collections.OrderedDict([
('final_expand_layer', DynamicConvBnActLayer(
feature_dim, expand_feature_dim, kernel_size=1, use_bn=True, act_func=act_func)
),
('pool', nn.AdaptiveAvgPool2d((1,1))),
('feature_mix_layer', DynamicConvBnActLayer(
in_channel_list=expand_feature_dim, out_channel_list=last_channel,
kernel_size=1, act_func=act_func, use_bn=False,)
),
]))
#final conv layer
self.classifier = DynamicLinearLayer(
in_features_list=last_channel, out_features=n_classes, bias=True
)
# set bn param
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
# runtime_depth
self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]
self.zero_residual_block_bn_weights()
self.active_dropout_rate = 0
self.active_drop_connect_rate = 0
self.active_resolution = 224
def zero_residual_block_bn_weights(self):
with torch.no_grad():
for m in self.modules():
if isinstance(m, MobileInvertedResidualBlock):
if isinstance(m.mobile_inverted_conv, DynamicMBConvLayer) and m.shortcut is not None:
m.mobile_inverted_conv.point_linear.bn.bn.weight.zero_()
@staticmethod
def name():
return 'AttentiveNasModel'
def forward(self, x):
# resize input to target resolution first
if x.size(-1) != self.active_resolution:
x = torch.nn.functional.interpolate(x, size=self.active_resolution, mode='bicubic')
# first conv
x = self.first_conv(x)
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
x = self.blocks[idx](x)
x = self.last_conv(x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = torch.squeeze(x)
if self.active_dropout_rate > 0 and self.training:
x = torch.nn.functional.dropout(x, p = self.active_dropout_rate)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
_str += self.blocks[0].module_str + '\n'
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
_str += self.blocks[idx].module_str + '\n'
if not self.use_v3_head:
_str += self.last_conv.module_str + '\n'
else:
_str += self.last_conv.final_expand_layer.module_str + '\n'
_str += self.last_conv.feature_mix_layer.module_str + '\n'
_str += self.classifier.module_str + '\n'
return _str
@property
def config(self):
return {
'name': AttentiveNasDynamicModel.__name__,
'bn': self.get_bn_param(),
'first_conv': self.first_conv.config,
'blocks': [
block.config for block in self.blocks
],
'last_conv': self.last_conv.config if not self.use_v3_head else None,
'final_expand_layer': self.last_conv.final_expand_layer if self.use_v3_head else None,
'feature_mix_layer': self.last_conv.feature_mix_layer if self.use_v3_head else None,
'classifier': self.classifier.config,
'resolution': self.active_resolution
}
@staticmethod
def build_from_config(config):
raise ValueError('do not support this function')
""" set, sample and get active sub-networks """
def set_active_subnet(self, resolution=224, width=None, depth=None, kernel_size=None, expand_ratio=None, **kwargs):
assert len(depth) == len(kernel_size) == len(expand_ratio) == len(width) - 2
#set resolution
self.active_resolution = resolution
# first conv
self.first_conv.active_out_channel = width[0]
for stage_id, (c, k, e, d) in enumerate(zip(width[1:-1], kernel_size, expand_ratio, depth)):
start_idx, end_idx = min(self.block_group_info[stage_id]), max(self.block_group_info[stage_id])
for block_id in range(start_idx, start_idx+d):
block = self.blocks[block_id]
#block output channels
block.mobile_inverted_conv.active_out_channel = c
if block.shortcut is not None:
block.shortcut.active_out_channel = c
#dw kernel size
block.mobile_inverted_conv.active_kernel_size = k
#dw expansion ration
block.mobile_inverted_conv.active_expand_ratio = e
#IRBlocks repated times
for i, d in enumerate(depth):
self.runtime_depth[i] = min(len(self.block_group_info[i]), d)
#last conv
if not self.use_v3_head:
self.last_conv.active_out_channel = width[-1]
else:
# default expansion ratio: 6
self.last_conv.final_expand_layer.active_out_channel = width[-2] * 6
self.last_conv.feature_mix_layer.active_out_channel = width[-1]
def get_active_subnet_settings(self):
r = self.active_resolution
width, depth, kernel_size, expand_ratio= [], [], [], []
#first conv
width.append(self.first_conv.active_out_channel)
for stage_id in range(len(self.block_group_info)):
start_idx = min(self.block_group_info[stage_id])
block = self.blocks[start_idx] #first block
width.append(block.mobile_inverted_conv.active_out_channel)
kernel_size.append(block.mobile_inverted_conv.active_kernel_size)
expand_ratio.append(block.mobile_inverted_conv.active_expand_ratio)
depth.append(self.runtime_depth[stage_id])
if not self.use_v3_head:
width.append(self.last_conv.active_out_channel)
else:
width.append(self.last_conv.feature_mix_layer.active_out_channel)
return {
'resolution': r,
'width': width,
'kernel_size': kernel_size,
'expand_ratio': expand_ratio,
'depth': depth,
}
def set_dropout_rate(self, dropout=0, drop_connect=0, drop_connect_only_last_two_stages=True):
self.active_dropout_rate = dropout
for idx, block in enumerate(self.blocks):
if drop_connect_only_last_two_stages:
if idx not in self.block_group_info[-1] + self.block_group_info[-2]:
continue
this_drop_connect_rate = drop_connect * float(idx) / len(self.blocks)
block.drop_connect_rate = this_drop_connect_rate
def sample_min_subnet(self):
return self._sample_active_subnet(min_net=True)
def sample_max_subnet(self):
return self._sample_active_subnet(max_net=True)
def sample_active_subnet(self, compute_flops=False):
cfg = self._sample_active_subnet(
False, False
)
if compute_flops:
cfg['flops'] = self.compute_active_subnet_flops()
return cfg
def sample_active_subnet_within_range(self, targeted_min_flops, targeted_max_flops):
while True:
cfg = self._sample_active_subnet()
cfg['flops'] = self.compute_active_subnet_flops()
if cfg['flops'] >= targeted_min_flops and cfg['flops'] <= targeted_max_flops:
return cfg
def _sample_active_subnet(self, min_net=False, max_net=False):
sample_cfg = lambda candidates, sample_min, sample_max: \
min(candidates) if sample_min else (max(candidates) if sample_max else random.choice(candidates))
cfg = {}
# sample a resolution
cfg['resolution'] = sample_cfg(self.cfg_candidates['resolution'], min_net, max_net)
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
cfg[k] = []
for vv in self.cfg_candidates[k]:
cfg[k].append(sample_cfg(int2list(vv), min_net, max_net))
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def mutate_and_reset(self, cfg, prob=0.1, keep_resolution=False):
cfg = copy.deepcopy(cfg)
pick_another = lambda x, candidates: x if len(candidates) == 1 else random.choice([v for v in candidates if v != x])
# sample a resolution
r = random.random()
if r < prob and not keep_resolution:
cfg['resolution'] = pick_another(cfg['resolution'], self.cfg_candidates['resolution'])
# sample channels, depth, kernel_size, expand_ratio
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
for _i, _v in enumerate(cfg[k]):
r = random.random()
if r < prob:
cfg[k][_i] = pick_another(cfg[k][_i], int2list(self.cfg_candidates[k][_i]))
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def crossover_and_reset(self, cfg1, cfg2, p=0.5):
def _cross_helper(g1, g2, prob):
assert type(g1) == type(g2)
if isinstance(g1, int):
return g1 if random.random() < prob else g2
elif isinstance(g1, list):
return [v1 if random.random() < prob else v2 for v1, v2 in zip(g1, g2)]
else:
raise NotImplementedError
cfg = {}
cfg['resolution'] = cfg1['resolution'] if random.random() < p else cfg2['resolution']
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
cfg[k] = _cross_helper(cfg1[k], cfg2[k], p)
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def get_active_subnet(self, preserve_weight=True):
with torch.no_grad():
first_conv = self.first_conv.get_active_subnet(3, preserve_weight)
blocks = []
input_channel = first_conv.out_channels
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(MobileInvertedResidualBlock(
self.blocks[idx].mobile_inverted_conv.get_active_subnet(input_channel, preserve_weight),
self.blocks[idx].shortcut.get_active_subnet(input_channel, preserve_weight) if self.blocks[idx].shortcut is not None else None
))
input_channel = stage_blocks[-1].mobile_inverted_conv.out_channels
blocks += stage_blocks
if not self.use_v3_head:
last_conv = self.last_conv.get_active_subnet(input_channel, preserve_weight)
in_features = last_conv.out_channels
else:
final_expand_layer = self.last_conv.final_expand_layer.get_active_subnet(input_channel, preserve_weight)
feature_mix_layer = self.last_conv.feature_mix_layer.get_active_subnet(input_channel*6, preserve_weight)
in_features = feature_mix_layer.out_channels
last_conv = nn.Sequential(
final_expand_layer,
nn.AdaptiveAvgPool2d((1,1)),
feature_mix_layer
)
classifier = self.classifier.get_active_subnet(in_features, preserve_weight)
_subnet = AttentiveNasStaticModel(
first_conv, blocks, last_conv, classifier, self.active_resolution, use_v3_head=self.use_v3_head
)
_subnet.set_bn_param(**self.get_bn_param())
return _subnet
def get_active_net_config(self):
raise NotImplementedError
def compute_active_subnet_flops(self):
def count_conv(c_in, c_out, size_out, groups, k):
kernel_ops = k**2
output_elements = c_out * size_out**2
ops = c_in * output_elements * kernel_ops / groups
return ops
def count_linear(c_in, c_out):
return c_in * c_out
total_ops = 0
c_in = 3
size_out = self.active_resolution // self.first_conv.stride
c_out = self.first_conv.active_out_channel
total_ops += count_conv(c_in, c_out, size_out, 1, 3)
c_in = c_out
# mb blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
block = self.blocks[idx]
c_middle = make_divisible(round(c_in * block.mobile_inverted_conv.active_expand_ratio), 8)
# 1*1 conv
if block.mobile_inverted_conv.inverted_bottleneck is not None:
total_ops += count_conv(c_in, c_middle, size_out, 1, 1)
# dw conv
stride = 1 if idx > active_idx[0] else block.mobile_inverted_conv.stride
if size_out % stride == 0:
size_out = size_out // stride
else:
size_out = (size_out +1) // stride
total_ops += count_conv(c_middle, c_middle, size_out, c_middle, block.mobile_inverted_conv.active_kernel_size)
# 1*1 conv
c_out = block.mobile_inverted_conv.active_out_channel
total_ops += count_conv(c_middle, c_out, size_out, 1, 1)
#se
if block.mobile_inverted_conv.use_se:
num_mid = make_divisible(c_middle // block.mobile_inverted_conv.depth_conv.se.reduction, divisor=8)
total_ops += count_conv(c_middle, num_mid, 1, 1, 1) * 2
if block.shortcut and c_in != c_out:
total_ops += count_conv(c_in, c_out, size_out, 1, 1)
c_in = c_out
if not self.use_v3_head:
c_out = self.last_conv.active_out_channel
total_ops += count_conv(c_in, c_out, size_out, 1, 1)
else:
c_expand = self.last_conv.final_expand_layer.active_out_channel
c_out = self.last_conv.feature_mix_layer.active_out_channel
total_ops += count_conv(c_in, c_expand, size_out, 1, 1)
total_ops += count_conv(c_expand, c_out, 1, 1, 1)
# n_classes
total_ops += count_linear(c_out, self.n_classes)
return total_ops / 1e6
def load_weights_from_pretrained_models(self, checkpoint_path):
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
assert isinstance(checkpoint, dict)
pretrained_state_dicts = checkpoint['state_dict']
for k, v in self.state_dict().items():
name = 'module.' + k if not k.startswith('module') else k
v.copy_(pretrained_state_dicts[name])
|
AttentiveNAS-main
|
models/attentive_nas_dynamic_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .model_factory import *
|
AttentiveNAS-main
|
models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import math
from .modules.static_layers import set_layer_from_config, MBInvertedConvLayer, ConvBnActLayer, ShortcutLayer, LinearLayer, MobileInvertedResidualBlock, IdentityLayer
from .modules.nn_utils import make_divisible
from .modules.nn_base import MyNetwork
class AttentiveNasStaticModel(MyNetwork):
def __init__(self, first_conv, blocks, last_conv, classifier, resolution, use_v3_head=True):
super(AttentiveNasStaticModel, self).__init__()
self.first_conv = first_conv
self.blocks = nn.ModuleList(blocks)
self.last_conv = last_conv
self.classifier = classifier
self.resolution = resolution #input size
self.use_v3_head = use_v3_head
def forward(self, x):
# resize input to target resolution first
if x.size(-1) != self.resolution:
x = torch.nn.functional.interpolate(x, size=self.resolution, mode='bicubic')
x = self.first_conv(x)
for block in self.blocks:
x = block(x)
x = self.last_conv(x)
if not self.use_v3_head:
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = torch.squeeze(x)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
for block in self.blocks:
_str += block.module_str + '\n'
#_str += self.last_conv.module_str + '\n'
_str += self.classifier.module_str
return _str
@property
def config(self):
return {
'name': AttentiveNasStaticModel.__name__,
'bn': self.get_bn_param(),
'first_conv': self.first_conv.config,
'blocks': [
block.config for block in self.blocks
],
#'last_conv': self.last_conv.config,
'classifier': self.classifier.config,
'resolution': self.resolution
}
def weight_initialization(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
@staticmethod
def build_from_config(config):
raise NotImplementedError
def reset_running_stats_for_calibration(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
m.training = True
m.momentum = None # cumulative moving average
m.reset_running_stats()
|
AttentiveNAS-main
|
models/attentive_nas_static_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .attentive_nas_dynamic_model import AttentiveNasDynamicModel
def create_model(args, arch=None):
n_classes = int(getattr(args, 'n_classes', 1000))
bn_momentum = getattr(args, 'bn_momentum', 0.1)
bn_eps = getattr(args, 'bn_eps', 1e-5)
dropout = getattr(args, 'dropout', 0)
drop_connect = getattr(args, 'drop_connect', 0)
if arch is None:
arch = args.arch
if arch == 'attentive_nas_dynamic_model':
model = AttentiveNasDynamicModel(
args.supernet_config,
n_classes = n_classes,
bn_param = (bn_momentum, bn_eps),
)
elif arch == 'attentive_nas_static_model':
supernet = AttentiveNasDynamicModel(
args.supernet_config,
n_classes = n_classes,
bn_param = (bn_momentum, bn_eps),
)
# load from pretrained models
supernet.load_weights_from_pretrained_models(args.pareto_models.supernet_checkpoint_path)
# subsample a static model with weights inherited from the supernet dynamic model
supernet.set_active_subnet(
resolution=args.active_subnet.resolution,
width = args.active_subnet.width,
depth = args.active_subnet.depth,
kernel_size = args.active_subnet.kernel_size,
expand_ratio = args.active_subnet.expand_ratio
)
model = supernet.get_active_subnet()
# house-keeping stuff
model.set_bn_param(momentum=bn_momentum, eps=bn_eps)
del supernet
else:
raise ValueError(arch)
return model
|
AttentiveNAS-main
|
models/model_factory.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from collections import OrderedDict
import torch.nn as nn
from .nn_utils import get_same_padding, build_activation, make_divisible, drop_connect
from .nn_base import MyModule
from .activations import *
def set_layer_from_config(layer_config):
if layer_config is None:
return None
name2layer = {
ConvBnActLayer.__name__: ConvBnActLayer,
IdentityLayer.__name__: IdentityLayer,
LinearLayer.__name__: LinearLayer,
MBInvertedConvLayer.__name__: MBInvertedConvLayer,
}
layer_name = layer_config.pop('name')
layer = name2layer[layer_name]
return layer.build_from_config(layer_config)
class SELayer(nn.Module):
REDUCTION = 4
def __init__(self, channel):
super(SELayer, self).__init__()
self.channel = channel
self.reduction = SELayer.REDUCTION
num_mid = make_divisible(self.channel // self.reduction, divisor=8)
self.fc = nn.Sequential(OrderedDict([
('reduce', nn.Conv2d(self.channel, num_mid, 1, 1, 0, bias=True)),
('relu', nn.ReLU(inplace=True)),
('expand', nn.Conv2d(num_mid, self.channel, 1, 1, 0, bias=True)),
('h_sigmoid', Hsigmoid(inplace=True)),
]))
def forward(self, x):
#x: N, C, H, W
y = x.mean(3, keepdim=True).mean(2, keepdim=True) # N, C, 1, 1
y = self.fc(y)
return x * y
class ConvBnActLayer(MyModule):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, dilation=1, groups=1, bias=False,
use_bn=True, act_func='relu'):
super(ConvBnActLayer, self).__init__()
# default normal 3x3_Conv with bn and relu
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.groups = groups
self.bias = bias
self.use_bn = use_bn
self.act_func = act_func
pad = get_same_padding(self.kernel_size)
self.conv = nn.Conv2d(in_channels, out_channels, self.kernel_size,
stride, pad, dilation=dilation, groups=groups, bias=bias
)
if self.use_bn:
self.bn = nn.BatchNorm2d(out_channels)
self.act = build_activation(self.act_func, inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.act:
x = self.act(x)
return x
@property
def module_str(self):
if isinstance(self.kernel_size, int):
kernel_size = (self.kernel_size, self.kernel_size)
else:
kernel_size = self.kernel_size
if self.groups == 1:
if self.dilation > 1:
conv_str = '%dx%d_DilatedConv' % (kernel_size[0], kernel_size[1])
else:
conv_str = '%dx%d_Conv' % (kernel_size[0], kernel_size[1])
else:
if self.dilation > 1:
conv_str = '%dx%d_DilatedGroupConv' % (kernel_size[0], kernel_size[1])
else:
conv_str = '%dx%d_GroupConv' % (kernel_size[0], kernel_size[1])
conv_str += '_O%d' % self.out_channels
return conv_str
@property
def config(self):
return {
'name': ConvBnActLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'kernel_size': self.kernel_size,
'stride': self.stride,
'dilation': self.dilation,
'groups': self.groups,
'bias': self.bias,
'use_bn': self.use_bn,
'act_func': self.act_func,
}
@staticmethod
def build_from_config(config):
return ConvBnActLayer(**config)
class IdentityLayer(MyModule):
def __init__(self, ):
super(IdentityLayer, self).__init__()
def forward(self, x):
return x
@property
def module_str(self):
return 'Identity'
@property
def config(self):
return {
'name': IdentityLayer.__name__,
}
@staticmethod
def build_from_config(config):
return IdentityLayer(**config)
class LinearLayer(MyModule):
def __init__(self, in_features, out_features, bias=True):
super(LinearLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
#self.dropout_rate = dropout_rate
#if self.dropout_rate > 0:
# self.dropout = nn.Dropout(self.dropout_rate, inplace=True)
#else:
# self.dropout = None
self.linear = nn.Linear(in_features, out_features, bias)
def forward(self, x):
#if dropout is not None:
# x = self.dropout(x)
return self.linear(x)
@property
def module_str(self):
return '%dx%d_Linear' % (self.in_features, self.out_features)
@property
def config(self):
return {
'name': LinearLayer.__name__,
'in_features': self.in_features,
'out_features': self.out_features,
'bias': self.bias,
#'dropout_rate': self.dropout_rate,
}
@staticmethod
def build_from_config(config):
return LinearLayer(**config)
class ShortcutLayer(MyModule):
def __init__(self, in_channels, out_channels, reduction=1):
super(ShortcutLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.reduction = reduction
self.conv = nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False)
def forward(self, x):
if self.reduction > 1:
padding = 0 if x.size(-1) % 2 == 0 else 1
x = F.avg_pool2d(x, self.reduction, padding=padding)
if self.in_channels != self.out_channels:
x = self.conv(x)
return x
@property
def module_str(self):
if self.in_channels == self.out_channels and self.reduction == 1:
conv_str = 'IdentityShortcut'
else:
if self.reduction == 1:
conv_str = '%d-%d_Shortcut' % (self.in_channels, self.out_channels)
else:
conv_str = '%d-%d_R%d_Shortcut' % (self.in_channels, self.out_channels, self.reduction)
return conv_str
@property
def config(self):
return {
'name': ShortcutLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'reduction': self.reduction,
}
@staticmethod
def build_from_config(config):
return ShortcutLayer(**config)
class MBInvertedConvLayer(MyModule):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False, channels_per_group=1):
super(MBInvertedConvLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.expand_ratio = expand_ratio
self.mid_channels = mid_channels
self.act_func = act_func
self.use_se = use_se
self.channels_per_group = channels_per_group
if self.mid_channels is None:
feature_dim = round(self.in_channels * self.expand_ratio)
else:
feature_dim = self.mid_channels
if self.expand_ratio == 1:
self.inverted_bottleneck = None
else:
self.inverted_bottleneck = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)),
('bn', nn.BatchNorm2d(feature_dim)),
('act', build_activation(self.act_func, inplace=True)),
]))
assert feature_dim % self.channels_per_group == 0
active_groups = feature_dim // self.channels_per_group
pad = get_same_padding(self.kernel_size)
depth_conv_modules = [
('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=active_groups, bias=False)),
('bn', nn.BatchNorm2d(feature_dim)),
('act', build_activation(self.act_func, inplace=True))
]
if self.use_se:
depth_conv_modules.append(('se', SELayer(feature_dim)))
self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))
self.point_linear = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
('bn', nn.BatchNorm2d(out_channels)),
]))
def forward(self, x):
if self.inverted_bottleneck:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
@property
def module_str(self):
if self.mid_channels is None:
expand_ratio = self.expand_ratio
else:
expand_ratio = self.mid_channels // self.in_channels
layer_str = '%dx%d_MBConv%d_%s' % (self.kernel_size, self.kernel_size, expand_ratio, self.act_func.upper())
if self.use_se:
layer_str = 'SE_' + layer_str
layer_str += '_O%d' % self.out_channels
return layer_str
@property
def config(self):
return {
'name': MBInvertedConvLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'kernel_size': self.kernel_size,
'stride': self.stride,
'expand_ratio': self.expand_ratio,
'mid_channels': self.mid_channels,
'act_func': self.act_func,
'use_se': self.use_se,
'channels_per_group': self.channels_per_group,
}
@staticmethod
def build_from_config(config):
return MBInvertedConvLayer(**config)
class MobileInvertedResidualBlock(MyModule):
def __init__(self, mobile_inverted_conv, shortcut, drop_connect_rate=0):
super(MobileInvertedResidualBlock, self).__init__()
self.mobile_inverted_conv = mobile_inverted_conv
self.shortcut = shortcut
self.drop_connect_rate = drop_connect_rate
def forward(self, x):
in_channel = x.size(1)
if self.mobile_inverted_conv is None: # or isinstance(self.mobile_inverted_conv, ZeroLayer):
res = x
elif self.shortcut is None: # or isinstance(self.shortcut, ZeroLayer):
res = self.mobile_inverted_conv(x)
else:
im = self.shortcut(x)
x = self.mobile_inverted_conv(x)
if self.drop_connect_rate > 0 and in_channel == im.size(1) and self.shortcut.reduction == 1:
x = drop_connect(x, p=self.drop_connect_rate, training=self.training)
res = x + im
return res
@property
def module_str(self):
return '(%s, %s)' % (
self.mobile_inverted_conv.module_str if self.mobile_inverted_conv is not None else None,
self.shortcut.module_str if self.shortcut is not None else None
)
@property
def config(self):
return {
'name': MobileInvertedResidualBlock.__name__,
'mobile_inverted_conv': self.mobile_inverted_conv.config if self.mobile_inverted_conv is not None else None,
'shortcut': self.shortcut.config if self.shortcut is not None else None,
}
@staticmethod
def build_from_config(config):
mobile_inverted_conv = set_layer_from_config(config['mobile_inverted_conv'])
shortcut = set_layer_from_config(config['shortcut'])
return MobileInvertedResidualBlock(mobile_inverted_conv, shortcut)
|
AttentiveNAS-main
|
models/modules/static_layers.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import math
import torch
import torch.nn as nn
try:
from fvcore.common.file_io import PathManager
except:
pass
class MyModule(nn.Module):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
class MyNetwork(MyModule):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
def zero_last_gamma(self):
raise NotImplementedError
""" implemented methods """
def set_bn_param(self, momentum, eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
if momentum is not None:
m.momentum = float(momentum)
else:
m.momentum = None
m.eps = float(eps)
return
def get_bn_param(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
return {
'momentum': m.momentum,
'eps': m.eps,
}
return None
def init_model(self, model_init):
""" Conv2d, BatchNorm2d, BatchNorm1d, Linear, """
for m in self.modules():
if isinstance(m, nn.Conv2d):
if model_init == 'he_fout':
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif model_init == 'he_fin':
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
else:
raise NotImplementedError
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.uniform_(-stdv, stdv)
if m.bias is not None:
m.bias.data.zero_()
def get_parameters(self, keys=None, mode='include', exclude_set=None):
if exclude_set is None:
exclude_set = {}
if keys is None:
for name, param in self.named_parameters():
if name not in exclude_set:
yield param
elif mode == 'include':
for name, param in self.named_parameters():
flag = False
for key in keys:
if key in name:
flag = True
break
if flag and name not in exclude_set:
yield param
elif mode == 'exclude':
for name, param in self.named_parameters():
flag = True
for key in keys:
if key in name:
flag = False
break
if flag and name not in exclude_set:
yield param
else:
raise ValueError('do not support: %s' % mode)
def weight_parameters(self, exclude_set=None):
return self.get_parameters(exclude_set=exclude_set)
def load_weights_from_pretrained_models(self, checkpoint_path, load_from_ema=False):
try:
with PathManager.open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
except:
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
assert isinstance(checkpoint, dict)
pretrained_state_dicts = checkpoint['state_dict']
if load_from_ema and 'state_dict_ema' in checkpoint:
pretrained_state_dicts = checkpoint['state_dict_ema']
for k, v in self.state_dict().items():
name = k
if not load_from_ema:
name = 'module.' + k if not k.startswith('module') else k
v.copy_(pretrained_state_dicts[name])
|
AttentiveNAS-main
|
models/modules/nn_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch
import torch.nn as nn
import torch.nn.functional as F
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3., inplace=self.inplace) / 6.
#class Swish(nn.Module):
# def __init__(self, inplace=True):
# super(Swish, self).__init__()
# self.inplace = inplace
#
# def forward(self, x):
# return x * torch.sigmoid(x)
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., inplace=self.inplace) / 6.
|
AttentiveNAS-main
|
models/modules/activations.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from collections import OrderedDict
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .static_layers import MBInvertedConvLayer, ConvBnActLayer, LinearLayer, SELayer, ShortcutLayer
from .dynamic_ops import DynamicSeparableConv2d, DynamicPointConv2d, DynamicBatchNorm2d, DynamicLinear, DynamicSE
from .nn_utils import int2list, get_net_device, copy_bn, build_activation, make_divisible
from .nn_base import MyModule, MyNetwork
class DynamicMBConvLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list,
kernel_size_list=3, expand_ratio_list=6, stride=1, act_func='relu6', use_se=False, channels_per_group=1):
super(DynamicMBConvLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.kernel_size_list = int2list(kernel_size_list, 1)
self.expand_ratio_list = int2list(expand_ratio_list, 1)
self.stride = stride
self.act_func = act_func
self.use_se = use_se
self.channels_per_group = channels_per_group
# build modules
max_middle_channel = round(max(self.in_channel_list) * max(self.expand_ratio_list))
if max(self.expand_ratio_list) == 1:
self.inverted_bottleneck = None
else:
self.inverted_bottleneck = nn.Sequential(OrderedDict([
('conv', DynamicPointConv2d(max(self.in_channel_list), max_middle_channel)),
('bn', DynamicBatchNorm2d(max_middle_channel)),
('act', build_activation(self.act_func, inplace=True)),
]))
self.depth_conv = nn.Sequential(OrderedDict([
('conv', DynamicSeparableConv2d(max_middle_channel, self.kernel_size_list, stride=self.stride, channels_per_group=self.channels_per_group)),
('bn', DynamicBatchNorm2d(max_middle_channel)),
('act', build_activation(self.act_func, inplace=True))
]))
if self.use_se:
self.depth_conv.add_module('se', DynamicSE(max_middle_channel))
self.point_linear = nn.Sequential(OrderedDict([
('conv', DynamicPointConv2d(max_middle_channel, max(self.out_channel_list))),
('bn', DynamicBatchNorm2d(max(self.out_channel_list))),
]))
self.active_kernel_size = max(self.kernel_size_list)
self.active_expand_ratio = max(self.expand_ratio_list)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
in_channel = x.size(1)
if self.inverted_bottleneck is not None:
self.inverted_bottleneck.conv.active_out_channel = \
make_divisible(round(in_channel * self.active_expand_ratio), 8)
self.depth_conv.conv.active_kernel_size = self.active_kernel_size
self.point_linear.conv.active_out_channel = self.active_out_channel
if self.inverted_bottleneck is not None:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
@property
def module_str(self):
if self.use_se:
return 'SE(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)
else:
return '(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)
@property
def config(self):
return {
'name': DynamicMBConvLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'kernel_size_list': self.kernel_size_list,
'expand_ratio_list': self.expand_ratio_list,
'stride': self.stride,
'act_func': self.act_func,
'use_se': self.use_se,
'channels_per_group': self.channels_per_group,
}
@staticmethod
def build_from_config(config):
return DynamicMBConvLayer(**config)
############################################################################################
def get_active_subnet(self, in_channel, preserve_weight=True):
middle_channel = make_divisible(round(in_channel * self.active_expand_ratio), 8)
channels_per_group = self.depth_conv.conv.channels_per_group
# build the new layer
sub_layer = MBInvertedConvLayer(
in_channel, self.active_out_channel, self.active_kernel_size, self.stride, self.active_expand_ratio,
act_func=self.act_func, mid_channels=middle_channel, use_se=self.use_se, channels_per_group=channels_per_group
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
# copy weight from current layer
if sub_layer.inverted_bottleneck is not None:
sub_layer.inverted_bottleneck.conv.weight.data.copy_(
self.inverted_bottleneck.conv.conv.weight.data[:middle_channel, :in_channel, :, :]
)
copy_bn(sub_layer.inverted_bottleneck.bn, self.inverted_bottleneck.bn.bn)
sub_layer.depth_conv.conv.weight.data.copy_(
self.depth_conv.conv.get_active_filter(middle_channel, self.active_kernel_size).data
)
copy_bn(sub_layer.depth_conv.bn, self.depth_conv.bn.bn)
if self.use_se:
se_mid = make_divisible(middle_channel // SELayer.REDUCTION, divisor=8)
sub_layer.depth_conv.se.fc.reduce.weight.data.copy_(
self.depth_conv.se.fc.reduce.weight.data[:se_mid, :middle_channel, :, :]
)
sub_layer.depth_conv.se.fc.reduce.bias.data.copy_(self.depth_conv.se.fc.reduce.bias.data[:se_mid])
sub_layer.depth_conv.se.fc.expand.weight.data.copy_(
self.depth_conv.se.fc.expand.weight.data[:middle_channel, :se_mid, :, :]
)
sub_layer.depth_conv.se.fc.expand.bias.data.copy_(self.depth_conv.se.fc.expand.bias.data[:middle_channel])
sub_layer.point_linear.conv.weight.data.copy_(
self.point_linear.conv.conv.weight.data[:self.active_out_channel, :middle_channel, :, :]
)
copy_bn(sub_layer.point_linear.bn, self.point_linear.bn.bn)
return sub_layer
def re_organize_middle_weights(self, expand_ratio_stage=0):
raise NotImplementedError
#importance = torch.sum(torch.abs(self.point_linear.conv.conv.weight.data), dim=(0, 2, 3))
#if expand_ratio_stage > 0:
# sorted_expand_list = copy.deepcopy(self.expand_ratio_list)
# sorted_expand_list.sort(reverse=True)
# target_width = sorted_expand_list[expand_ratio_stage]
# target_width = round(max(self.in_channel_list) * target_width)
# importance[target_width:] = torch.arange(0, target_width - importance.size(0), -1)
#
#sorted_importance, sorted_idx = torch.sort(importance, dim=0, descending=True)
#self.point_linear.conv.conv.weight.data = torch.index_select(
# self.point_linear.conv.conv.weight.data, 1, sorted_idx
#)
#
#adjust_bn_according_to_idx(self.depth_conv.bn.bn, sorted_idx)
#self.depth_conv.conv.conv.weight.data = torch.index_select(
# self.depth_conv.conv.conv.weight.data, 0, sorted_idx
#)
#if self.use_se:
# # se expand: output dim 0 reorganize
# se_expand = self.depth_conv.se.fc.expand
# se_expand.weight.data = torch.index_select(se_expand.weight.data, 0, sorted_idx)
# se_expand.bias.data = torch.index_select(se_expand.bias.data, 0, sorted_idx)
# # se reduce: input dim 1 reorganize
# se_reduce = self.depth_conv.se.fc.reduce
# se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 1, sorted_idx)
# # middle weight reorganize
# se_importance = torch.sum(torch.abs(se_expand.weight.data), dim=(0, 2, 3))
# se_importance, se_idx = torch.sort(se_importance, dim=0, descending=True)
# se_expand.weight.data = torch.index_select(se_expand.weight.data, 1, se_idx)
# se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 0, se_idx)
# se_reduce.bias.data = torch.index_select(se_reduce.bias.data, 0, se_idx)
#
## TODO if inverted_bottleneck is None, the previous layer should be reorganized accordingly
#if self.inverted_bottleneck is not None:
# adjust_bn_according_to_idx(self.inverted_bottleneck.bn.bn, sorted_idx)
# self.inverted_bottleneck.conv.conv.weight.data = torch.index_select(
# self.inverted_bottleneck.conv.conv.weight.data, 0, sorted_idx
# )
# return None
#else:
# return sorted_idx
class DynamicConvBnActLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list, kernel_size=3, stride=1, dilation=1,
use_bn=True, act_func='relu6'):
super(DynamicConvBnActLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.use_bn = use_bn
self.act_func = act_func
self.conv = DynamicPointConv2d(
max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list),
kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation,
)
if self.use_bn:
self.bn = DynamicBatchNorm2d(max(self.out_channel_list))
if self.act_func is not None:
self.act = build_activation(self.act_func, inplace=True)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
self.conv.active_out_channel = self.active_out_channel
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.act_func is not None:
x = self.act(x)
return x
@property
def module_str(self):
return 'DyConv(O%d, K%d, S%d)' % (self.active_out_channel, self.kernel_size, self.stride)
@property
def config(self):
return {
'name': DynamicConvBnActLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'kernel_size': self.kernel_size,
'stride': self.stride,
'dilation': self.dilation,
'use_bn': self.use_bn,
'act_func': self.act_func,
}
@staticmethod
def build_from_config(config):
return DynamicConvBnActLayer(**config)
def get_active_subnet(self, in_channel, preserve_weight=True):
sub_layer = ConvBnActLayer(
in_channel, self.active_out_channel, self.kernel_size, self.stride, self.dilation,
use_bn=self.use_bn, act_func=self.act_func
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.conv.weight.data.copy_(self.conv.conv.weight.data[:self.active_out_channel, :in_channel, :, :])
if self.use_bn:
copy_bn(sub_layer.bn, self.bn.bn)
return sub_layer
class DynamicLinearLayer(MyModule):
def __init__(self, in_features_list, out_features, bias=True):
super(DynamicLinearLayer, self).__init__()
self.in_features_list = int2list(in_features_list)
self.out_features = out_features
self.bias = bias
#self.dropout_rate = dropout_rate
#
#if self.dropout_rate > 0:
# self.dropout = nn.Dropout(self.dropout_rate, inplace=True)
#else:
# self.dropout = None
self.linear = DynamicLinear(
max_in_features=max(self.in_features_list), max_out_features=self.out_features, bias=self.bias
)
def forward(self, x):
#if self.dropout is not None:
# x = self.dropout(x)
return self.linear(x)
@property
def module_str(self):
return 'DyLinear(%d)' % self.out_features
@property
def config(self):
return {
'name': DynamicLinear.__name__,
'in_features_list': self.in_features_list,
'out_features': self.out_features,
'bias': self.bias
}
@staticmethod
def build_from_config(config):
return DynamicLinearLayer(**config)
def get_active_subnet(self, in_features, preserve_weight=True):
#sub_layer = LinearLayer(in_features, self.out_features, self.bias, dropout_rate=self.dropout_rate)
sub_layer = LinearLayer(in_features, self.out_features, self.bias)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.linear.weight.data.copy_(self.linear.linear.weight.data[:self.out_features, :in_features])
if self.bias:
sub_layer.linear.bias.data.copy_(self.linear.linear.bias.data[:self.out_features])
return sub_layer
class DynamicShortcutLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list, reduction=1):
super(DynamicShortcutLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.reduction = reduction
self.conv = DynamicPointConv2d(
max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list),
kernel_size=1, stride=1,
)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
in_channel = x.size(1)
#identity mapping
if in_channel == self.active_out_channel and self.reduction == 1:
return x
#average pooling, if size doesn't match
if self.reduction > 1:
padding = 0 if x.size(-1) % 2 == 0 else 1
x = F.avg_pool2d(x, self.reduction, padding=padding)
#1*1 conv, if #channels doesn't match
if in_channel != self.active_out_channel:
self.conv.active_out_channel = self.active_out_channel
x = self.conv(x)
return x
@property
def module_str(self):
return 'DyShortcut(O%d, R%d)' % (self.active_out_channel, self.reduction)
@property
def config(self):
return {
'name': DynamicShortcutLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'reduction': self.reduction,
}
@staticmethod
def build_from_config(config):
return DynamicShortcutLayer(**config)
def get_active_subnet(self, in_channel, preserve_weight=True):
sub_layer = ShortcutLayer(
in_channel, self.active_out_channel, self.reduction
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.conv.weight.data.copy_(self.conv.conv.weight.data[:self.active_out_channel, :in_channel, :, :])
return sub_layer
|
AttentiveNAS-main
|
models/modules/dynamic_layers.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
AttentiveNAS-main
|
models/modules/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.