python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import numpy as np
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data.mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import ModelEma
from optim_factory import create_optimizer, LayerDecayValueAssigner
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
import models.convnext
import models.convnext_isotropic
def str2bool(v):
"""
Converts string to bool type; enables command line
arguments in the format of '--arg1 true --arg2 false'
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args_parser():
parser = argparse.ArgumentParser('ConvNeXt training and evaluation script for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Per GPU batch size')
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--update_freq', default=1, type=int,
help='gradient accumulation steps')
# Model parameters
parser.add_argument('--model', default='convnext_tiny', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--drop_path', type=float, default=0, metavar='PCT',
help='Drop path rate (default: 0.0)')
parser.add_argument('--input_size', default=224, type=int,
help='image input size')
parser.add_argument('--layer_scale_init_value', default=1e-6, type=float,
help="Layer scale initial values")
# EMA related parameters
parser.add_argument('--model_ema', type=str2bool, default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', type=str2bool, default=False, help='')
parser.add_argument('--model_ema_eval', type=str2bool, default=False, help='Using ema to eval during training.')
# Optimization parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr', type=float, default=4e-3, metavar='LR',
help='learning rate (default: 4e-3), with total batch size 4096')
parser.add_argument('--layer_decay', type=float, default=1.0)
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-6)')
parser.add_argument('--warmup_epochs', type=int, default=20, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', type=str2bool, default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--head_init_scale', default=1.0, type=float,
help='classifier head initial scale, typically adjusted in fine-tuning')
parser.add_argument('--model_key', default='model|module', type=str,
help='which key to load from saved state dict, usually model or model_ema')
parser.add_argument('--model_prefix', default='', type=str)
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--imagenet_default_mean_and_std', type=str2bool, default=True)
parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder'],
type=str, help='ImageNet dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', type=str2bool, default=True)
parser.add_argument('--save_ckpt', type=str2bool, default=True)
parser.add_argument('--save_ckpt_freq', default=1, type=int)
parser.add_argument('--save_ckpt_num', default=3, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', type=str2bool, default=False,
help='Perform evaluation only')
parser.add_argument('--dist_eval', type=str2bool, default=True,
help='Enabling distributed evaluation')
parser.add_argument('--disable_eval', type=str2bool, default=False,
help='Disabling evaluation during training')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', type=str2bool, default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', type=str2bool, default=False)
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--use_amp', type=str2bool, default=False,
help="Use PyTorch's AMP (Automatic Mixed Precision) or not")
# Weights and Biases arguments
parser.add_argument('--enable_wandb', type=str2bool, default=False,
help="enable logging to Weights and Biases")
parser.add_argument('--project', default='convnext', type=str,
help="The name of the W&B project where you're sending the new run.")
parser.add_argument('--wandb_ckpt', type=str2bool, default=False,
help="Save model checkpoints as W&B Artifacts.")
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
if args.disable_eval:
args.dist_eval = False
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False, args=args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
if global_rank == 0 and args.enable_wandb:
wandb_logger = utils.WandbLogger(args)
else:
wandb_logger = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
else:
data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
layer_scale_init_value=args.layer_scale_init_value,
head_init_scale=args.head_init_scale,
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" % num_training_steps_per_epoch)
if args.layer_decay < 1.0 or args.layer_decay > 1.0:
num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value.
assert args.model in ['convnext_small', 'convnext_base', 'convnext_large', 'convnext_xlarge'], \
"Layer Decay impl only supports convnext_small/base/large/xlarge"
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp, skip_list=None,
get_num_layer=assigner.get_layer_id if assigner is not None else None,
get_layer_scale=assigner.get_scale if assigner is not None else None)
loss_scaler = NativeScaler() # if args.use_amp is False, this won't be used
print("Use Cosine LR scheduler")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
if args.eval:
print(f"Eval only mode")
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
print(f"Accuracy of the network on {len(dataset_val)} test images: {test_stats['acc1']:.5f}%")
return
max_accuracy = 0.0
if args.model_ema and args.model_ema_eval:
max_accuracy_ema = 0.0
print("Start training for %d epochs" % args.epochs)
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
if wandb_logger:
wandb_logger.set_steps()
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer,
device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn,
log_writer=log_writer, wandb_logger=wandb_logger, start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values,
num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq,
use_amp=args.use_amp
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
if data_loader_val is not None:
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
print(f"Accuracy of the model on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best", model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
# repeat testing routines for EMA, if ema eval is turned on
if args.model_ema and args.model_ema_eval:
test_stats_ema = evaluate(data_loader_val, model_ema.ema, device, use_amp=args.use_amp)
print(f"Accuracy of the model EMA on {len(dataset_val)} test images: {test_stats_ema['acc1']:.1f}%")
if max_accuracy_ema < test_stats_ema["acc1"]:
max_accuracy_ema = test_stats_ema["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best-ema", model_ema=model_ema)
print(f'Max EMA accuracy: {max_accuracy_ema:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1_ema=test_stats_ema['acc1'], head="perf", step=epoch)
log_stats.update({**{f'test_{k}_ema': v for k, v in test_stats_ema.items()}})
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
if wandb_logger:
wandb_logger.log_epoch_metrics(log_stats)
if wandb_logger and args.wandb_ckpt and args.save_ckpt and args.output_dir:
wandb_logger.log_checkpoints()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('ConvNeXt training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
ConvNeXt-main
|
main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.nadam import Nadam
from timm.optim.novograd import NovoGrad
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
import json
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_convnext(var_name):
"""
Divide [3, 3, 27, 3] layers into 12 groups; each group is three
consecutive blocks, including possible neighboring downsample layers;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
"""
num_max_layer = 12
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
if stage_id == 0:
layer_id = 0
elif stage_id == 1 or stage_id == 2:
layer_id = stage_id + 1
elif stage_id == 3:
layer_id = 12
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
if stage_id == 0 or stage_id == 1:
layer_id = stage_id + 1
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = 12
return layer_id
else:
return num_max_layer + 1
class LayerDecayValueAssigner(object):
def __init__(self, values):
self.values = values
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_convnext(var_name)
def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
# if weight_decay and filter_bias_and_bn:
if filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
|
ConvNeXt-main
|
optim_factory.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from timm.models.registry import register_model
from .convnext import Block, LayerNorm
class ConvNeXtIsotropic(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Isotropic ConvNeXts (Section 3.3 in paper)
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depth (tuple(int)): Number of blocks. Default: 18.
dims (int): Feature dimension. Default: 384
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depth=18, dim=384, drop_path_rate=0.,
layer_scale_init_value=0, head_init_scale=1.,
):
super().__init__()
self.stem = nn.Conv2d(in_chans, dim, kernel_size=16, stride=16)
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.Sequential(*[Block(dim=dim, drop_path=dp_rates[i],
layer_scale_init_value=layer_scale_init_value)
for i in range(depth)])
self.norm = LayerNorm(dim, eps=1e-6) # final norm layer
self.head = nn.Linear(dim, num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@register_model
def convnext_isotropic_small(pretrained=False, **kwargs):
model = ConvNeXtIsotropic(depth=18, dim=384, **kwargs)
if pretrained:
url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_small_1k_224_ema.pth'
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_isotropic_base(pretrained=False, **kwargs):
model = ConvNeXtIsotropic(depth=18, dim=768, **kwargs)
if pretrained:
url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_base_1k_224_ema.pth'
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_isotropic_large(pretrained=False, **kwargs):
model = ConvNeXtIsotropic(depth=36, dim=1024, **kwargs)
if pretrained:
url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_large_1k_224_ema.pth'
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
|
ConvNeXt-main
|
models/convnext_isotropic.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from timm.models.registry import register_model
class Block(nn.Module):
r""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.,
layer_scale_init_value=1e-6, head_init_scale=1.,
):
super().__init__()
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
class LayerNorm(nn.Module):
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
model_urls = {
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
"convnext_tiny_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth",
"convnext_small_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth",
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
}
@register_model
def convnext_tiny(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
if pretrained:
url = model_urls['convnext_tiny_22k'] if in_22k else model_urls['convnext_tiny_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_small(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)
if pretrained:
url = model_urls['convnext_small_22k'] if in_22k else model_urls['convnext_small_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_base(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
if pretrained:
url = model_urls['convnext_base_22k'] if in_22k else model_urls['convnext_base_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_large(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
if pretrained:
url = model_urls['convnext_large_22k'] if in_22k else model_urls['convnext_large_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_xlarge(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
if pretrained:
assert in_22k, "only ImageNet-22K pre-trained ConvNeXt-XL is available; please set in_22k=True"
url = model_urls['convnext_xlarge_22k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
|
ConvNeXt-main
|
models/convnext.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.runner import get_dist_info
def get_num_layer_layer_wise(var_name, num_max_layer=12):
if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"):
return 0
elif var_name.startswith("backbone.downsample_layers"):
stage_id = int(var_name.split('.')[2])
if stage_id == 0:
layer_id = 0
elif stage_id == 1:
layer_id = 2
elif stage_id == 2:
layer_id = 3
elif stage_id == 3:
layer_id = num_max_layer
return layer_id
elif var_name.startswith("backbone.stages"):
stage_id = int(var_name.split('.')[2])
block_id = int(var_name.split('.')[3])
if stage_id == 0:
layer_id = 1
elif stage_id == 1:
layer_id = 2
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = num_max_layer
return layer_id
else:
return num_max_layer + 1
def get_num_layer_stage_wise(var_name, num_max_layer):
if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"):
return 0
elif var_name.startswith("backbone.downsample_layers"):
return 0
elif var_name.startswith("backbone.stages"):
stage_id = int(var_name.split('.')[2])
return stage_id + 1
else:
return num_max_layer - 1
@OPTIMIZER_BUILDERS.register_module()
class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor):
def add_params(self, params, module, prefix='', is_dcn_module=None):
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module
is_dcn_module (int|float|None): If the current module is a
submodule of DCN, `is_dcn_module` will be passed to
control conv_offset layer's learning rate. Defaults to None.
"""
parameter_groups = {}
print(self.paramwise_cfg)
num_layers = self.paramwise_cfg.get('num_layers') + 2
decay_rate = self.paramwise_cfg.get('decay_rate')
decay_type = self.paramwise_cfg.get('decay_type', "layer_wise")
print("Build LearningRateDecayOptimizerConstructor %s %f - %d" % (decay_type, decay_rate, num_layers))
weight_decay = self.base_wd
for name, param in module.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in ('pos_embed', 'cls_token'):
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if decay_type == "layer_wise":
layer_id = get_num_layer_layer_wise(name, self.paramwise_cfg.get('num_layers'))
elif decay_type == "stage_wise":
layer_id = get_num_layer_stage_wise(name, num_layers)
group_name = "layer_%d_%s" % (layer_id, group_name)
if group_name not in parameter_groups:
scale = decay_rate ** (num_layers - layer_id - 1)
parameter_groups[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"param_names": [],
"lr_scale": scale,
"group_name": group_name,
"lr": scale * self.base_lr,
}
parameter_groups[group_name]["params"].append(param)
parameter_groups[group_name]["param_names"].append(name)
rank, _ = get_dist_info()
if rank == 0:
to_display = {}
for key in parameter_groups:
to_display[key] = {
"param_names": parameter_groups[key]["param_names"],
"lr_scale": parameter_groups[key]["lr_scale"],
"lr": parameter_groups[key]["lr"],
"weight_decay": parameter_groups[key]["weight_decay"],
}
print("Param groups = %s" % json.dumps(to_display, indent=2))
params.extend(parameter_groups.values())
|
ConvNeXt-main
|
object_detection/mmcv_custom/layer_decay_optimizer_constructor.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# -*- coding: utf-8 -*-
from .checkpoint import load_checkpoint
from .layer_decay_optimizer_constructor import LearningRateDecayOptimizerConstructor
from .customized_text import CustomizedTextLoggerHook
__all__ = ['load_checkpoint', 'LearningRateDecayOptimizerConstructor', 'CustomizedTextLoggerHook']
|
ConvNeXt-main
|
object_detection/mmcv_custom/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
from collections import OrderedDict
import torch
import mmcv
from mmcv.runner import HOOKS
from mmcv.runner import TextLoggerHook
@HOOKS.register_module()
class CustomizedTextLoggerHook(TextLoggerHook):
"""Customized Text Logger hook.
This logger prints out both lr and layer_0_lr.
"""
def _log_info(self, log_dict, runner):
# print exp name for users to distinguish experiments
# at every ``interval_exp_name`` iterations and the end of each epoch
if runner.meta is not None and 'exp_name' in runner.meta:
if (self.every_n_iters(runner, self.interval_exp_name)) or (
self.by_epoch and self.end_of_epoch(runner)):
exp_info = f'Exp name: {runner.meta["exp_name"]}'
runner.logger.info(exp_info)
if log_dict['mode'] == 'train':
lr_str = {}
for lr_type in ['lr', 'layer_0_lr']:
if isinstance(log_dict[lr_type], dict):
lr_str[lr_type] = []
for k, val in log_dict[lr_type].items():
lr_str.append(f'{lr_type}_{k}: {val:.3e}')
lr_str[lr_type] = ' '.join(lr_str)
else:
lr_str[lr_type] = f'{lr_type}: {log_dict[lr_type]:.3e}'
# by epoch: Epoch [4][100/1000]
# by iter: Iter [100/100000]
if self.by_epoch:
log_str = f'Epoch [{log_dict["epoch"]}]' \
f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t'
else:
log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t'
log_str += f'{lr_str["lr"]}, {lr_str["layer_0_lr"]}, '
if 'time' in log_dict.keys():
self.time_sec_tot += (log_dict['time'] * self.interval)
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += f'eta: {eta_str}, '
log_str += f'time: {log_dict["time"]:.3f}, ' \
f'data_time: {log_dict["data_time"]:.3f}, '
# statistic memory
if torch.cuda.is_available():
log_str += f'memory: {log_dict["memory"]}, '
else:
# val/test time
# here 1000 is the length of the val dataloader
# by epoch: Epoch[val] [4][1000]
# by iter: Iter[val] [1000]
if self.by_epoch:
log_str = f'Epoch({log_dict["mode"]}) ' \
f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t'
else:
log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t'
log_items = []
for name, val in log_dict.items():
# TODO: resolve this hack
# these items have been in log_str
if name in [
'mode', 'Epoch', 'iter', 'lr', 'layer_0_lr', 'time', 'data_time',
'memory', 'epoch'
]:
continue
if isinstance(val, float):
val = f'{val:.4f}'
log_items.append(f'{name}: {val}')
log_str += ', '.join(log_items)
runner.logger.info(log_str)
def log(self, runner):
if 'eval_iter_num' in runner.log_buffer.output:
# this doesn't modify runner.iter and is regardless of by_epoch
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
else:
cur_iter = self.get_iter(runner, inner_iter=True)
log_dict = OrderedDict(
mode=self.get_mode(runner),
epoch=self.get_epoch(runner),
iter=cur_iter)
# record lr and layer_0_lr
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['layer_0_lr'] = min(cur_lr)
log_dict['lr'] = max(cur_lr)
else:
assert isinstance(cur_lr, dict)
log_dict['lr'], log_dict['layer_0_lr'] = {}, {}
for k, lr_ in cur_lr.items():
assert isinstance(lr_, list)
log_dict['layer_0_lr'].update({k: min(lr_)})
log_dict['lr'].update({k: max(lr_)})
if 'time' in runner.log_buffer.output:
# statistic memory
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
log_dict = dict(log_dict, **runner.log_buffer.output)
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
return log_dict
|
ConvNeXt-main
|
object_detection/mmcv_custom/customized_text.py
|
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import time
from tempfile import TemporaryDirectory
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.parallel import is_module_wrapper
from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
try:
import apex
except:
print('apex is not installed')
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 4 fields: ``meta``, ``state_dict`` and
``optimizer``, ``amp``. By default ``meta`` will contain version
and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
# save amp state dict in the checkpoint
# checkpoint['amp'] = apex.amp.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
|
ConvNeXt-main
|
object_detection/mmcv_custom/runner/checkpoint.py
|
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='CustomizedTextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
ConvNeXt-main
|
object_detection/configs/_base_/default_runtime.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# model settings
model = dict(
type='MaskRCNN',
pretrained=None,
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.2,
layer_scale_init_value=1e-6,
out_indices=[0, 1, 2, 3],
),
neck=dict(
type='FPN',
in_channels=[128, 256, 512, 1024],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
ConvNeXt-main
|
object_detection/configs/_base_/models/mask_rcnn_convnext_fpn.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# model settings
model = dict(
type='CascadeRCNN',
pretrained=None,
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.2,
layer_scale_init_value=1e-6,
out_indices=[0, 1, 2, 3],
),
neck=dict(
type='FPN',
in_channels=[128, 256, 512, 1024],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
ConvNeXt-main
|
object_detection/configs/_base_/models/cascade_mask_rcnn_convnext_fpn.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/mask_rcnn_convnext_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
neck=dict(in_channels=[96, 192, 384, 768]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6})
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
object_detection/configs/convnext/mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_adamw_3x_coco_in1k.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/cascade_mask_rcnn_convnext_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 27, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
neck=dict(in_channels=[96, 192, 384, 768]),
roi_head=dict(
bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12})
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
object_detection/configs/convnext/cascade_mask_rcnn_convnext_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/cascade_mask_rcnn_convnext_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 27, 3],
dims=[192, 384, 768, 1536],
drop_path_rate=0.7,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
neck=dict(in_channels=[192, 384, 768, 1536]),
roi_head=dict(
bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12})
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=False,
)
|
ConvNeXt-main
|
object_detection/configs/convnext/cascade_mask_rcnn_convnext_large_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/cascade_mask_rcnn_convnext_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 27, 3],
dims=[128, 256, 512, 1024],
drop_path_rate=0.7,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
neck=dict(in_channels=[128, 256, 512, 1024]),
roi_head=dict(
bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.8,
'decay_type': 'layer_wise',
'num_layers': 12})
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
object_detection/configs/convnext/cascade_mask_rcnn_convnext_base_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/cascade_mask_rcnn_convnext_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 27, 3],
dims=[256, 512, 1024, 2048],
drop_path_rate=0.8,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
neck=dict(in_channels=[256, 512, 1024, 2048]),
roi_head=dict(
bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline), samples_per_gpu=2)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12})
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=False,
)
|
ConvNeXt-main
|
object_detection/configs/convnext/cascade_mask_rcnn_convnext_xlarge_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/cascade_mask_rcnn_convnext_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
neck=dict(in_channels=[96, 192, 384, 768]),
roi_head=dict(
bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 6})
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
object_detection/configs/convnext/cascade_mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/cascade_mask_rcnn_convnext_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 27, 3],
dims=[128, 256, 512, 1024],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
neck=dict(in_channels=[128, 256, 512, 1024]),
roi_head=dict(
bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.8,
'decay_type': 'layer_wise',
'num_layers': 12})
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
object_detection/configs/convnext/cascade_mask_rcnn_convnext_base_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py
|
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .trident_resnet import TridentResNet
from .swin_transformer import SwinTransformer
from .convnext import ConvNeXt
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net',
'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet',
'ResNeSt', 'TridentResNet', 'SwinTransformer', 'ConvNeXt'
]
|
ConvNeXt-main
|
object_detection/mmdet/models/backbones/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from mmcv_custom import load_checkpoint
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
class Block(nn.Module):
r""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
@BACKBONES.register_module()
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3],
):
super().__init__()
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.out_indices = out_indices
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def forward_features(self, x):
outs = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
x_out = norm_layer(x)
outs.append(x_out)
return tuple(outs)
def forward(self, x):
x = self.forward_features(x)
return x
class LayerNorm(nn.Module):
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
|
ConvNeXt-main
|
object_detection/mmdet/models/backbones/convnext.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.runner import get_dist_info
def get_num_layer_layer_wise(var_name, num_max_layer=12):
if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"):
return 0
elif var_name.startswith("backbone.downsample_layers"):
stage_id = int(var_name.split('.')[2])
if stage_id == 0:
layer_id = 0
elif stage_id == 1:
layer_id = 2
elif stage_id == 2:
layer_id = 3
elif stage_id == 3:
layer_id = num_max_layer
return layer_id
elif var_name.startswith("backbone.stages"):
stage_id = int(var_name.split('.')[2])
block_id = int(var_name.split('.')[3])
if stage_id == 0:
layer_id = 1
elif stage_id == 1:
layer_id = 2
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = num_max_layer
return layer_id
else:
return num_max_layer + 1
def get_num_layer_stage_wise(var_name, num_max_layer):
if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"):
return 0
elif var_name.startswith("backbone.downsample_layers"):
return 0
elif var_name.startswith("backbone.stages"):
stage_id = int(var_name.split('.')[2])
return stage_id + 1
else:
return num_max_layer - 1
@OPTIMIZER_BUILDERS.register_module()
class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor):
def add_params(self, params, module, prefix='', is_dcn_module=None):
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module
is_dcn_module (int|float|None): If the current module is a
submodule of DCN, `is_dcn_module` will be passed to
control conv_offset layer's learning rate. Defaults to None.
"""
parameter_groups = {}
print(self.paramwise_cfg)
num_layers = self.paramwise_cfg.get('num_layers') + 2
decay_rate = self.paramwise_cfg.get('decay_rate')
decay_type = self.paramwise_cfg.get('decay_type', "layer_wise")
print("Build LearningRateDecayOptimizerConstructor %s %f - %d" % (decay_type, decay_rate, num_layers))
weight_decay = self.base_wd
for name, param in module.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in ('pos_embed', 'cls_token'):
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if decay_type == "layer_wise":
layer_id = get_num_layer_layer_wise(name, self.paramwise_cfg.get('num_layers'))
elif decay_type == "stage_wise":
layer_id = get_num_layer_stage_wise(name, num_layers)
group_name = "layer_%d_%s" % (layer_id, group_name)
if group_name not in parameter_groups:
scale = decay_rate ** (num_layers - layer_id - 1)
parameter_groups[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"param_names": [],
"lr_scale": scale,
"group_name": group_name,
"lr": scale * self.base_lr,
}
parameter_groups[group_name]["params"].append(param)
parameter_groups[group_name]["param_names"].append(name)
rank, _ = get_dist_info()
if rank == 0:
to_display = {}
for key in parameter_groups:
to_display[key] = {
"param_names": parameter_groups[key]["param_names"],
"lr_scale": parameter_groups[key]["lr_scale"],
"lr": parameter_groups[key]["lr"],
"weight_decay": parameter_groups[key]["weight_decay"],
}
print("Param groups = %s" % json.dumps(to_display, indent=2))
params.extend(parameter_groups.values())
|
ConvNeXt-main
|
semantic_segmentation/mmcv_custom/layer_decay_optimizer_constructor.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# -*- coding: utf-8 -*-
from .checkpoint import load_checkpoint
from .layer_decay_optimizer_constructor import LearningRateDecayOptimizerConstructor
from .resize_transform import SETR_Resize
from .apex_runner.optimizer import DistOptimizerHook
from .train_api import train_segmentor
from .customized_text import CustomizedTextLoggerHook
__all__ = ['load_checkpoint', 'LearningRateDecayOptimizerConstructor', 'SETR_Resize', 'DistOptimizerHook', 'train_segmentor', 'CustomizedTextLoggerHook']
|
ConvNeXt-main
|
semantic_segmentation/mmcv_custom/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
from collections import OrderedDict
import torch
import mmcv
from mmcv.runner import HOOKS
from mmcv.runner import TextLoggerHook
@HOOKS.register_module()
class CustomizedTextLoggerHook(TextLoggerHook):
"""Customized Text Logger hook.
This logger prints out both lr and layer_0_lr.
"""
def _log_info(self, log_dict, runner):
# print exp name for users to distinguish experiments
# at every ``interval_exp_name`` iterations and the end of each epoch
if runner.meta is not None and 'exp_name' in runner.meta:
if (self.every_n_iters(runner, self.interval_exp_name)) or (
self.by_epoch and self.end_of_epoch(runner)):
exp_info = f'Exp name: {runner.meta["exp_name"]}'
runner.logger.info(exp_info)
if log_dict['mode'] == 'train':
lr_str = {}
for lr_type in ['lr', 'layer_0_lr']:
if isinstance(log_dict[lr_type], dict):
lr_str[lr_type] = []
for k, val in log_dict[lr_type].items():
lr_str.append(f'{lr_type}_{k}: {val:.3e}')
lr_str[lr_type] = ' '.join(lr_str)
else:
lr_str[lr_type] = f'{lr_type}: {log_dict[lr_type]:.3e}'
# by epoch: Epoch [4][100/1000]
# by iter: Iter [100/100000]
if self.by_epoch:
log_str = f'Epoch [{log_dict["epoch"]}]' \
f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t'
else:
log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t'
log_str += f'{lr_str["lr"]}, {lr_str["layer_0_lr"]}, '
if 'time' in log_dict.keys():
self.time_sec_tot += (log_dict['time'] * self.interval)
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += f'eta: {eta_str}, '
log_str += f'time: {log_dict["time"]:.3f}, ' \
f'data_time: {log_dict["data_time"]:.3f}, '
# statistic memory
if torch.cuda.is_available():
log_str += f'memory: {log_dict["memory"]}, '
else:
# val/test time
# here 1000 is the length of the val dataloader
# by epoch: Epoch[val] [4][1000]
# by iter: Iter[val] [1000]
if self.by_epoch:
log_str = f'Epoch({log_dict["mode"]}) ' \
f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t'
else:
log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t'
log_items = []
for name, val in log_dict.items():
# TODO: resolve this hack
# these items have been in log_str
if name in [
'mode', 'Epoch', 'iter', 'lr', 'layer_0_lr', 'time', 'data_time',
'memory', 'epoch'
]:
continue
if isinstance(val, float):
val = f'{val:.4f}'
log_items.append(f'{name}: {val}')
log_str += ', '.join(log_items)
runner.logger.info(log_str)
def log(self, runner):
if 'eval_iter_num' in runner.log_buffer.output:
# this doesn't modify runner.iter and is regardless of by_epoch
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
else:
cur_iter = self.get_iter(runner, inner_iter=True)
log_dict = OrderedDict(
mode=self.get_mode(runner),
epoch=self.get_epoch(runner),
iter=cur_iter)
# record lr and layer_0_lr
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['layer_0_lr'] = min(cur_lr)
log_dict['lr'] = max(cur_lr)
else:
assert isinstance(cur_lr, dict)
log_dict['lr'], log_dict['layer_0_lr'] = {}, {}
for k, lr_ in cur_lr.items():
assert isinstance(lr_, list)
log_dict['layer_0_lr'].update({k: min(lr_)})
log_dict['lr'].update({k: max(lr_)})
if 'time' in runner.log_buffer.output:
# statistic memory
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
log_dict = dict(log_dict, **runner.log_buffer.output)
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
return log_dict
|
ConvNeXt-main
|
semantic_segmentation/mmcv_custom/customized_text.py
|
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import time
from tempfile import TemporaryDirectory
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.parallel import is_module_wrapper
from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
try:
import apex
except:
print('apex is not installed')
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 4 fields: ``meta``, ``state_dict`` and
``optimizer``, ``amp``. By default ``meta`` will contain version
and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
# save amp state dict in the checkpoint
# checkpoint['amp'] = apex.amp.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
|
ConvNeXt-main
|
semantic_segmentation/mmcv_custom/apex_runner/checkpoint.py
|
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='CustomizedTextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
|
ConvNeXt-main
|
semantic_segmentation/configs/_base_/default_runtime.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.2,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
type='UPerHead',
in_channels=[128, 256, 512, 1024],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=384,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
|
ConvNeXt-main
|
semantic_segmentation/configs/_base_/models/upernet_convnext.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 27, 3],
dims=[192, 384, 768, 1536],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[192, 384, 768, 1536],
num_classes=150,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426)),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=False,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_large_640_160k_ade20k_ss.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[96, 192, 384, 768],
num_classes=150,
),
auxiliary_head=dict(
in_channels=384,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341)),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 6})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ss.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 27, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.3,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[96, 192, 384, 768],
num_classes=150,
),
auxiliary_head=dict(
in_channels=384,
num_classes=150
),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_small_512_160k_ade20k_ms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 27, 3],
dims=[128, 256, 512, 1024],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[128, 256, 512, 1024],
num_classes=150,
),
auxiliary_head=dict(
in_channels=512,
num_classes=150
),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_base_512_160k_ade20k_ms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 27, 3],
dims=[256, 512, 1024, 2048],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[256, 512, 1024, 2048],
num_classes=150,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426)),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.00008, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=False,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_xlarge_640_160k_ade20k_ss.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 27, 3],
dims=[128, 256, 512, 1024],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[128, 256, 512, 1024],
num_classes=150,
),
auxiliary_head=dict(
in_channels=512,
num_classes=150
),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_base_640_160k_ade20k_ms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[96, 192, 384, 768],
num_classes=150,
),
auxiliary_head=dict(
in_channels=384,
num_classes=150
),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 6})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
in_chans=3,
depths=[3, 3, 27, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.3,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[96, 192, 384, 768],
num_classes=150,
),
auxiliary_head=dict(
in_channels=384,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341)),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_small_512_160k_ade20k_ss.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 27, 3],
dims=[128, 256, 512, 1024],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[128, 256, 512, 1024],
num_classes=150,
),
auxiliary_head=dict(
in_channels=512,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341)),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_base_512_160k_ade20k_ss.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 27, 3],
dims=[256, 512, 1024, 2048],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[256, 512, 1024, 2048],
num_classes=150,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.00008, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=False,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_xlarge_640_160k_ade20k_ms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 27, 3],
dims=[128, 256, 512, 1024],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[128, 256, 512, 1024],
num_classes=150,
),
auxiliary_head=dict(
in_channels=512,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426)),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_base_640_160k_ade20k_ss.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 27, 3],
dims=[192, 384, 768, 1536],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[192, 384, 768, 1536],
num_classes=150,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 12})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=False,
)
|
ConvNeXt-main
|
semantic_segmentation/configs/convnext/upernet_convnext_large_640_160k_ade20k_ms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from mmcv_custom import load_checkpoint
from mmseg.utils import get_root_logger
from mmseg.models.builder import BACKBONES
class Block(nn.Module):
r""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
@BACKBONES.register_module()
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3],
):
super().__init__()
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.out_indices = out_indices
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def forward_features(self, x):
outs = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
x_out = norm_layer(x)
outs.append(x_out)
return tuple(outs)
def forward(self, x):
x = self.forward_features(x)
return x
class LayerNorm(nn.Module):
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
|
ConvNeXt-main
|
semantic_segmentation/backbone/convnext.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
from util import str2bool
parser = argparse.ArgumentParser(description='RL')
# PPO & other optimization arguments.
parser.add_argument(
'--algo',
type=str,
default='ppo',
choices=['ppo', 'a2c', 'acktr', 'ucb', 'mixreg'],
help='Which RL algorithm to use.')
parser.add_argument(
'--lr',
type=float,
default=1e-4,
help='Learning rate')
parser.add_argument(
'--eps',
type=float,
default=1e-5,
help='RMSprop optimizer epsilon.')
parser.add_argument(
'--alpha',
type=float,
default=0.99,
help='RMSprop optimizer alpha.')
parser.add_argument(
'--gamma',
type=float,
default=0.995,
help='Discount factor for rewards.')
parser.add_argument(
'--use_gae',
type=str2bool, nargs='?', const=True, default=True,
help='Use generalized advantage estimator.')
parser.add_argument(
'--gae_lambda',
type=float,
default=0.95,
help='GAE lambda parameter.')
parser.add_argument(
'--entropy_coef',
type=float,
default=0.0,
help='Entropy bonus coefficient for student.')
parser.add_argument(
'--adv_entropy_coef',
type=float,
default=0.0,
help='Entropy bonus coefficient for teacher.')
parser.add_argument(
'--value_loss_coef',
type=float,
default=0.5,
help='Value loss coefficient.')
parser.add_argument(
'--max_grad_norm',
type=float,
default=0.5,
help='Max norm of student gradients.')
parser.add_argument(
'--adv_max_grad_norm',
type=float,
default=0.5,
help='Max norm of teacher gradients.')
parser.add_argument(
'--normalize_returns',
type=str2bool, nargs='?', const=True, default=False,
help='Whether to normalize student returns.')
parser.add_argument(
'--adv_normalize_returns',
type=str2bool, nargs='?', const=True, default=False,
help='Whether to normalize teacher returns.')
parser.add_argument(
'--use_popart',
type=str2bool, nargs='?', const=True, default=False,
help='Whether to normalize student values via PopArt.')
parser.add_argument(
'--adv_use_popart',
type=str2bool, nargs='?', const=True, default=False,
help='Whether to normalize teacher values using PopArt.')
parser.add_argument(
'--seed',
type=int,
default=1,
help='Experiment random seed.')
parser.add_argument(
'--num_processes',
type=int,
default=32,
help='How many training CPU processes to use for experience collection.')
parser.add_argument(
'--num_steps',
type=int,
default=256,
help='Rollout horizon for A2C-style algorithms.')
parser.add_argument(
'--ppo_epoch',
type=int,
default=5,
help='Number of PPO epochs.')
parser.add_argument(
'--adv_ppo_epoch',
type=int,
default=5,
help='Number of PPO epochs used by teacher.')
parser.add_argument(
'--num_mini_batch',
type=int,
default=1,
help='Number of batches for PPO for student.')
parser.add_argument(
'--adv_num_mini_batch',
type=int,
default=1,
help='Number of batches for PPO for teacher.')
parser.add_argument(
'--clip_param',
type=float,
default=0.2,
help='PPO advantage clipping.')
parser.add_argument(
'--clip_value_loss',
type=str2bool,
default=True,
help='PPO value loss clipping.')
parser.add_argument(
'--clip_reward',
type=float,
default=None,
help="Amount to clip student rewards. By default no clipping.")
parser.add_argument(
'--adv_clip_reward',
type=float,
default=None,
help="Amount to clip teacher rewards. By default no clipping.")
parser.add_argument(
'--num_env_steps',
type=int,
default=500000,
help='Number of environment steps for training.')
# Architecture arguments.
parser.add_argument(
'--recurrent_arch',
type=str,
default='lstm',
choices=['gru', 'lstm'],
help='RNN architecture for student and teacher.')
parser.add_argument(
'--recurrent_agent',
type=str2bool, nargs='?', const=True, default=True,
help='Use a RNN architecture for student.')
parser.add_argument(
'--recurrent_adversary_env',
type=str2bool, nargs='?', const=True, default=False,
help='Use a RNN architecture for teacher.')
parser.add_argument(
'--recurrent_hidden_size',
type=int,
default=256,
help='Recurrent hidden state size.')
# === UED arguments ===
parser.add_argument(
'--ued_algo',
type=str,
default='paired',
choices=['domain_randomization', 'minimax',
'paired', 'flexible_paired',
'alp_gmm'],
help='UED algorithm')
parser.add_argument(
'--protagonist_plr',
type=str2bool, nargs='?', const=True, default=False,
help="PLR via protagonist's trajectories.")
parser.add_argument(
'--antagonist_plr',
type=str2bool, nargs='?', const=True, default=False,
help="PLR via antagonist's lotrajectoriesss. If protagonist_plr is True, each agent trains using their own.")
parser.add_argument(
'--use_reset_random_dr',
type=str2bool, nargs='?', const=True, default=False,
help='''
Domain randomization (DR) resets using reset random.
If False, DR resets using a uniformly random adversary policy.
Defaults to False for legacy reasons.''')
# PLR arguments.
parser.add_argument(
"--use_plr",
type=str2bool, nargs='?', const=True, default=False,
help='Whether to use PLR.'
)
parser.add_argument(
"--level_replay_strategy",
type=str,
default='value_l1',
choices=['off', 'random', 'uniform', 'sequential',
'policy_entropy', 'least_confidence', 'min_margin',
'gae', 'value_l1', 'signed_value_loss', 'positive_value_loss',
'grounded_signed_value_loss', 'grounded_positive_value_loss',
'one_step_td_error', 'alt_advantage_abs',
'tscl_window'],
help="PLR score function.")
parser.add_argument(
"--level_replay_eps",
type=float,
default=0.05,
help="PLR epsilon for eps-greedy sampling. (Not typically used.)")
parser.add_argument(
"--level_replay_score_transform",
type=str,
default='rank',
choices=['constant', 'max', 'eps_greedy', 'rank', 'power', 'softmax', 'match', 'match_rank'],
help="PLR score transform.")
parser.add_argument(
"--level_replay_temperature",
type=float,
default=0.1,
help="PLR replay distribution temperature.")
parser.add_argument(
"--level_replay_schedule",
type=str,
default='proportionate',
help="PLR schedule for annealing the replay rate.")
parser.add_argument(
"--level_replay_rho",
type=float,
default=1.0,
help="Minimum fill ratio for PLR buffer before sampling replays.")
parser.add_argument(
"--level_replay_prob",
type=float,
default=0.,
help="Probability of sampling a replay level instead of a new level.")
parser.add_argument(
"--level_replay_alpha",
type=float,
default=1.0,
help="PLR level score EWA smoothing factor.")
parser.add_argument(
"--staleness_coef",
type=float,
default=0.3,
help="Staleness-sampling weighting.")
parser.add_argument(
"--staleness_transform",
type=str,
default='power',
choices=['max', 'eps_greedy', 'rank', 'power', 'softmax'],
help="Staleness score transform.")
parser.add_argument(
"--staleness_temperature",
type=float,
default=1.0,
help="Staleness distribution temperature.")
parser.add_argument(
"--train_full_distribution",
type=str2bool, nargs='?', const=True, default=True,
help='Train on the full distribution of levels.')
parser.add_argument(
"--level_replay_seed_buffer_size",
type=int,
default=4000,
help="Size of PLR level buffer.")
parser.add_argument(
"--level_replay_seed_buffer_priority",
type=str,
default='replay_support',
choices=['score', 'replay_support'],
help="How to prioritize level buffer members when capacity is reached.")
parser.add_argument(
"--reject_unsolvable_seeds",
type=str2bool, nargs='?', const=True, default=False,
help='Do not add unsolvable seeds to the PLR buffer.')
parser.add_argument(
"--no_exploratory_grad_updates",
type=str2bool, nargs='?', const=True, default=False,
help='Turns on Robust PLR: Only perform gradient updates for episodes on replay levels.'
)
# ACCEL arguments.
parser.add_argument(
"--use_editor",
type=str2bool, nargs='?', const=True, default=False,
help='Turns on ACCEL: Evaluate mutated replay levels for entry in PLR buffer.')
parser.add_argument(
"--level_editor_prob",
type=float,
default=0.,
help="Probability of mutating a replayed level under PLR.")
parser.add_argument(
"--level_editor_method",
type=str,
default='random',
choices=['random'],
help="Method for mutating levels. ACCEL simply uses random mutations.")
parser.add_argument(
"--base_levels",
type=str,
default='batch',
choices=['batch', 'easy'],
help="What kind of replayed level under PLR do we edit?")
parser.add_argument(
"--num_edits",
type=int,
default=0.,
help="Number of edits to make each time a level is mutated.")
# Fine-tuning arguments.
parser.add_argument(
'--xpid_finetune',
default=None,
help='Checkpoint directory containing model for fine-tuning.')
parser.add_argument(
'--model_finetune',
default='model',
help='Name of .tar to load for fine-tuning.')
# Hardware arguments.
parser.add_argument(
'--no_cuda',
type=str2bool, nargs='?', const=True, default=False,
help='Disables CUDA training.')
# Logging arguments.
parser.add_argument(
'--xpid',
default='latest',
help='Name for the training run. Used for the name of the output results directory.')
parser.add_argument(
'--log_dir',
default='~/logs/dcd/',
help='Directory in which to save experimental outputs.')
parser.add_argument(
'--log_interval',
type=int,
default=1,
help='Log training stats every this many updates.')
parser.add_argument(
"--checkpoint_interval",
type=int,
default=100,
help="Save model every this many updates.")
parser.add_argument(
"--archive_interval",
type=int,
default=0,
help="Save an archived checkpoint every this many updates.")
parser.add_argument(
"--checkpoint_basis",
type=str,
default="num_updates",
choices=["num_updates", "student_grad_updates"],
help=f'''Archive interval basis.
num_updates: By # update cycles (full rollout cycle across all agents);
student_grad_updates: By # grad updates performed by the student agent.''')
parser.add_argument(
"--weight_log_interval",
type=int,
default=0,
help="Save level weights every this many updates. *Only for PLR with a fixed level buffer.*")
parser.add_argument(
"--screenshot_interval",
type=int,
default=5000,
help="Save screenshot of the training environment every this many updates.")
parser.add_argument(
"--screenshot_batch_size",
type=int,
default=1,
help="Number of training environments to screenshot each screenshot_interval.")
parser.add_argument(
'--render',
type=str2bool, nargs='?', const=True, default=False,
help='Render to environment to screen.')
parser.add_argument(
"--checkpoint",
type=str2bool, nargs='?', const=True, default=False,
help="Begin training from checkpoint. Needed for preemptible training on clusters.")
parser.add_argument(
"--disable_checkpoint",
type=str2bool, nargs='?', const=True, default=False,
help="Disable checkpointing.")
parser.add_argument(
'--log_grad_norm',
type=str2bool, nargs='?', const=True, default=False,
help="Log the gradient norm of the actor-critic.")
parser.add_argument(
'--log_action_complexity',
type=str2bool, nargs='?', const=True, default=False,
help="Log action-trajectory complexity metrics throughout training.")
parser.add_argument(
'--log_replay_complexity',
type=str2bool, nargs='?', const=True, default=False,
help="Log complexity metrics of replay levels.")
parser.add_argument(
'--log_plr_buffer_stats',
type=str2bool, nargs='?', const=True, default=False,
help="Log PLR buffer stats.")
parser.add_argument(
"--verbose",
type=str2bool, nargs='?', const=True, default=False,
help="Whether to print logs to stdout.")
# Evaluation arguments.
parser.add_argument(
'--test_interval',
type=int,
default=250,
help='Evaluate on test environments every this many updates.')
parser.add_argument(
'--test_num_episodes',
type=int,
default=10,
help='Number of test episodes per environment.')
parser.add_argument(
'--test_num_processes',
type=int,
default=2,
help='Number of test processes per environment.')
parser.add_argument(
'--test_env_names',
type=str,
default='MultiGrid-SixteenRooms-v0,MultiGrid-Labyrinth-v0,MultiGrid-Maze-v0',
help='CSV string of test environments for evaluation during training.')
# Environment arguments.
parser.add_argument(
'--env_name',
type=str,
default='MultiGrid-GoalLastAdversarial-v0',
help='Environment to train on.')
parser.add_argument(
'--handle_timelimits',
type=str2bool, nargs='?', const=True, default=False,
help="Bootstrap off of early termination states. Requires env to be wrapped by envs.wrappers.TimeLimit.")
parser.add_argument(
'--singleton_env',
type=str2bool, nargs='?', const=True, default=False,
help="When using a fixed env, whether the same environment should also be reused across workers.")
parser.add_argument(
'--use_global_critic',
type=str2bool, nargs='?', const=True, default=False,
help="Student's critic is fully observable. *Only for MultiGrid.*")
parser.add_argument(
'--use_global_policy',
type=str2bool, nargs='?', const=True, default=False,
help="Student's policy is fully observable. *Only for MultiGrid.*")
# CarRacing-specific arguments.
parser.add_argument(
'--grayscale',
type=str2bool, nargs='?', const=True, default=False,
help="Convert observations to grayscale for CarRacing.")
parser.add_argument(
'--crop_frame',
type=str2bool, nargs='?', const=True, default=False,
help="Convert observations to grayscale for CarRacing.")
parser.add_argument(
'--reward_shaping',
type=str2bool, nargs='?', const=True, default=False,
help="Use custom shaped rewards for CarRacing.")
parser.add_argument(
'--num_action_repeat',
type=int, default=1,
help="Repeat actions this many times for CarRacing.")
parser.add_argument(
'--frame_stack',
type=int, default=1,
help="Number of observation frames to stack for CarRacing.")
parser.add_argument(
'--num_control_points',
type=int, default=12,
help="Number of bezier control points for CarRacing-Bezier environments.")
parser.add_argument(
'--min_rad_ratio',
type=float, default=0.333333333,
help="Default minimum radius ratio for CarRacing-Classic (polar coordinates).")
parser.add_argument(
'--max_rad_ratio',
type=float, default=1.0,
help="Default minimum radius ratio for CarRacing-Classic (polar coordinates).")
parser.add_argument(
'--use_skip',
type=str2bool, nargs='?', const=True, default=False,
help="CarRacing teacher can use a skip action.")
parser.add_argument(
'--choose_start_pos',
type=str2bool, nargs='?', const=True, default=False,
help="CarRacing teacher also chooses the start position.")
parser.add_argument(
'--use_sketch',
type=str2bool, nargs='?', const=True, default=True,
help="CarRacing teacher designs tracks on a downsampled grid.")
parser.add_argument(
'--use_categorical_adv',
type=str2bool, nargs='?', const=True, default=False,
help="CarRacing teacher uses a categorical policy.")
parser.add_argument(
'--sparse_rewards',
type=str2bool, nargs='?', const=True, default=False,
help="Use sparse rewards + goal placement for CarRacing.")
parser.add_argument(
'--num_goal_bins',
type=int, default=1,
help="Number of goal bins when using sparse rewards for CarRacing.")
|
dcd-main
|
arguments.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
import time
import timeit
import logging
from arguments import parser
import torch
import gym
import matplotlib as mpl
import matplotlib.pyplot as plt
from baselines.logger import HumanOutputFormat
display = None
if sys.platform.startswith('linux'):
print('Setting up virtual display')
import pyvirtualdisplay
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900), color_depth=24)
display.start()
from envs.multigrid import *
from envs.multigrid.adversarial import *
from envs.box2d import *
from envs.bipedalwalker import *
from envs.runners.adversarial_runner import AdversarialRunner
from util import make_agent, FileWriter, safe_checkpoint, create_parallel_env, make_plr_args, save_images
from eval import Evaluator
if __name__ == '__main__':
os.environ["OMP_NUM_THREADS"] = "1"
args = parser.parse_args()
# === Configure logging ==
if args.xpid is None:
args.xpid = "lr-%s" % time.strftime("%Y%m%d-%H%M%S")
log_dir = os.path.expandvars(os.path.expanduser(args.log_dir))
filewriter = FileWriter(
xpid=args.xpid, xp_args=args.__dict__, rootdir=log_dir
)
screenshot_dir = os.path.join(log_dir, args.xpid, 'screenshots')
if not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir, exist_ok=True)
def log_stats(stats):
filewriter.log(stats)
if args.verbose:
HumanOutputFormat(sys.stdout).writekvs(stats)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
else:
logging.disable(logging.CRITICAL)
# === Determine device ====
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if args.cuda else "cpu")
if 'cuda' in device.type:
torch.backends.cudnn.benchmark = True
print('Using CUDA\n')
# === Create parallel envs ===
venv, ued_venv = create_parallel_env(args)
is_training_env = args.ued_algo in ['paired', 'flexible_paired', 'minimax']
is_paired = args.ued_algo in ['paired', 'flexible_paired']
agent = make_agent(name='agent', env=venv, args=args, device=device)
adversary_agent, adversary_env = None, None
if is_paired:
adversary_agent = make_agent(name='adversary_agent', env=venv, args=args, device=device)
if is_training_env:
adversary_env = make_agent(name='adversary_env', env=venv, args=args, device=device)
if args.ued_algo == 'domain_randomization' and args.use_plr and not args.use_reset_random_dr:
adversary_env = make_agent(name='adversary_env', env=venv, args=args, device=device)
adversary_env.random()
# === Create runner ===
plr_args = None
if args.use_plr:
plr_args = make_plr_args(args, venv.observation_space, venv.action_space)
train_runner = AdversarialRunner(
args=args,
venv=venv,
agent=agent,
ued_venv=ued_venv,
adversary_agent=adversary_agent,
adversary_env=adversary_env,
flexible_protagonist=False,
train=True,
plr_args=plr_args,
device=device)
# === Configure checkpointing ===
timer = timeit.default_timer
initial_update_count = 0
last_logged_update_at_restart = -1
checkpoint_path = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (log_dir, args.xpid, "model.tar"))
)
## This is only used for the first iteration of finetuning
if args.xpid_finetune:
model_fname = f'{args.model_finetune}.tar'
base_checkpoint_path = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (log_dir, args.xpid_finetune, model_fname))
)
def checkpoint(index=None):
if args.disable_checkpoint:
return
safe_checkpoint({'runner_state_dict': train_runner.state_dict()},
checkpoint_path,
index=index,
archive_interval=args.archive_interval)
logging.info("Saved checkpoint to %s", checkpoint_path)
# === Load checkpoint ===
if args.checkpoint and os.path.exists(checkpoint_path):
checkpoint_states = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
last_logged_update_at_restart = filewriter.latest_tick() # ticks are 0-indexed updates
train_runner.load_state_dict(checkpoint_states['runner_state_dict'])
initial_update_count = train_runner.num_updates
logging.info(f"Resuming preempted job after {initial_update_count} updates\n") # 0-indexed next update
elif args.xpid_finetune and not os.path.exists(checkpoint_path):
checkpoint_states = torch.load(base_checkpoint_path)
state_dict = checkpoint_states['runner_state_dict']
agent_state_dict = state_dict.get('agent_state_dict')
optimizer_state_dict = state_dict.get('optimizer_state_dict')
train_runner.agents['agent'].algo.actor_critic.load_state_dict(agent_state_dict['agent'])
train_runner.agents['agent'].algo.optimizer.load_state_dict(optimizer_state_dict['agent'])
# === Set up Evaluator ===
evaluator = None
if args.test_env_names:
evaluator = Evaluator(
args.test_env_names.split(','),
num_processes=args.test_num_processes,
num_episodes=args.test_num_episodes,
frame_stack=args.frame_stack,
grayscale=args.grayscale,
num_action_repeat=args.num_action_repeat,
use_global_critic=args.use_global_critic,
use_global_policy=args.use_global_policy,
device=device)
# === Train ===
last_checkpoint_idx = getattr(train_runner, args.checkpoint_basis)
update_start_time = timer()
num_updates = int(args.num_env_steps) // args.num_steps // args.num_processes
for j in range(initial_update_count, num_updates):
stats = train_runner.run()
# === Perform logging ===
if train_runner.num_updates <= last_logged_update_at_restart:
continue
log = (j % args.log_interval == 0) or j == num_updates - 1
save_screenshot = \
args.screenshot_interval > 0 and \
(j % args.screenshot_interval == 0)
if log:
# Eval
test_stats = {}
if evaluator is not None and (j % args.test_interval == 0 or j == num_updates - 1):
test_stats = evaluator.evaluate(train_runner.agents['agent'])
stats.update(test_stats)
else:
stats.update({k:None for k in evaluator.get_stats_keys()})
update_end_time = timer()
num_incremental_updates = 1 if j == 0 else args.log_interval
sps = num_incremental_updates*(args.num_processes * args.num_steps) / (update_end_time - update_start_time)
update_start_time = update_end_time
stats.update({'sps': sps})
stats.update(test_stats) # Ensures sps column is always before test stats
log_stats(stats)
checkpoint_idx = getattr(train_runner, args.checkpoint_basis)
if checkpoint_idx != last_checkpoint_idx:
is_last_update = j == num_updates - 1
if is_last_update or \
(train_runner.num_updates > 0 and checkpoint_idx % args.checkpoint_interval == 0):
checkpoint(checkpoint_idx)
logging.info(f"\nSaved checkpoint after update {j}")
logging.info(f"\nLast update: {is_last_update}")
elif train_runner.num_updates > 0 and args.archive_interval > 0 \
and checkpoint_idx % args.archive_interval == 0:
checkpoint(checkpoint_idx)
logging.info(f"\nArchived checkpoint after update {j}")
if save_screenshot:
level_info = train_runner.sampled_level_info
if args.env_name.startswith('BipedalWalker'):
encodings = venv.get_level()
df = bipedalwalker_df_from_encodings(args.env_name, encodings)
if args.use_editor and level_info:
df.to_csv(os.path.join(
screenshot_dir,
f"update{j}-replay{level_info['level_replay']}-n_edits{level_info['num_edits'][0]}.csv"))
else:
df.to_csv(os.path.join(
screenshot_dir,
f'update{j}.csv'))
else:
venv.reset_agent()
images = venv.get_images()
if args.use_editor and level_info:
save_images(
images[:args.screenshot_batch_size],
os.path.join(
screenshot_dir,
f"update{j}-replay{level_info['level_replay']}-n_edits{level_info['num_edits'][0]}.png"),
normalize=True, channels_first=False)
else:
save_images(
images[:args.screenshot_batch_size],
os.path.join(screenshot_dir, f'update{j}.png'),
normalize=True, channels_first=False)
plt.close()
evaluator.close()
venv.close()
if display:
display.stop()
|
dcd-main
|
train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
import csv
import json
import argparse
import fnmatch
import re
from collections import defaultdict
import numpy as np
import torch
from baselines.common.vec_env import DummyVecEnv
from baselines.logger import HumanOutputFormat
from tqdm import tqdm
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
from envs.registration import make as gym_make
from envs.multigrid.maze import *
from envs.multigrid.crossing import *
from envs.multigrid.fourrooms import *
from envs.multigrid.mst_maze import *
from envs.box2d import *
from envs.bipedalwalker import *
from envs.wrappers import VecMonitor, VecPreprocessImageWrapper, ParallelAdversarialVecEnv, \
MultiGridFullyObsWrapper, VecFrameStack, CarRacingWrapper
from util import DotDict, str2bool, make_agent, create_parallel_env, is_discrete_actions
from arguments import parser
"""
Example usage:
python -m eval \
--env_name=MultiGrid-SixteenRooms-v0 \
--xpid=<xpid> \
--base_path="~/logs/dcd" \
--result_path="eval_results/"
--verbose
"""
def parse_args():
parser = argparse.ArgumentParser(description='Eval')
parser.add_argument(
'--base_path',
type=str,
default='~/logs/dcd',
help='Base path to experiment results directories.')
parser.add_argument(
'--xpid',
type=str,
default='latest',
help='Experiment ID (result directory name) for evaluation.')
parser.add_argument(
'--prefix',
type=str,
default=None,
help='Experiment ID prefix for evaluation (evaluate all matches).'
)
parser.add_argument(
'--env_names',
type=str,
default='MultiGrid-Labyrinth-v0',
help='CSV string of evaluation environments.')
parser.add_argument(
'--result_path',
type=str,
default='eval_results/',
help='Relative path to evaluation results directory.')
parser.add_argument(
'--benchmark',
type=str,
default=None,
choices=['maze', 'f1', 'bipedal', 'poetrose'],
help="Name of benchmark for evaluation.")
parser.add_argument(
'--accumulator',
type=str,
default=None,
help="Function for accumulating across multiple evaluation runs.")
parser.add_argument(
'--singleton_env',
type=str2bool, nargs='?', const=True, default=False,
help="When using a fixed env, whether the same environment should also be reused across workers.")
parser.add_argument(
'--seed',
type=int,
default=1,
help='Random seed.')
parser.add_argument(
'--max_seeds',
type=int,
default=None,
help='Maximum number of matched experiment IDs to evaluate.')
parser.add_argument(
'--num_processes',
type=int,
default=2,
help='Number of CPU processes to use.')
parser.add_argument(
'--max_num_processes',
type=int,
default=10,
help='Maximum number of CPU processes to use.')
parser.add_argument(
'--num_episodes',
type=int,
default=100,
help='Number of evaluation episodes per xpid per environment.')
parser.add_argument(
'--model_tar',
type=str,
default='model',
help='Name of .tar to evaluate.')
parser.add_argument(
'--model_name',
type=str,
default='agent',
choices=['agent', 'adversary_agent'],
help='Which agent to evaluate.')
parser.add_argument(
'--deterministic',
type=str2bool, nargs='?', const=True, default=False,
help="Evaluate policy greedily.")
parser.add_argument(
'--verbose',
type=str2bool, nargs='?', const=True, default=False,
help="Show logging messages in stdout")
parser.add_argument(
'--render',
type=str2bool, nargs='?', const=True, default=False,
help="Render environment in first evaluation process to screen.")
parser.add_argument(
'--record_video',
type=str2bool, nargs='?', const=True, default=False,
help="Record video of first environment evaluation process.")
return parser.parse_args()
class Evaluator(object):
def __init__(self,
env_names,
num_processes,
num_episodes=10,
record_video=False,
device='cpu',
**kwargs):
self.kwargs = kwargs # kwargs for env wrappers
self._init_parallel_envs(
env_names, num_processes, device=device, record_video=record_video, **kwargs)
self.num_episodes = num_episodes
if 'Bipedal' in env_names[0]:
self.solved_threshold = 230
else:
self.solved_threshold = 0
def get_stats_keys(self):
keys = []
for env_name in self.env_names:
keys += [f'solved_rate:{env_name}', f'test_returns:{env_name}']
return keys
@staticmethod
def make_env(env_name, record_video=False, **kwargs):
if env_name in ['BipedalWalker-v3', 'BipedalWalkerHardcore-v3']:
env = gym.make(env_name)
else:
env = gym_make(env_name)
is_multigrid = env_name.startswith('MultiGrid')
is_car_racing = env_name.startswith('CarRacing')
if is_car_racing:
grayscale = kwargs.get('grayscale', False)
num_action_repeat = kwargs.get('num_action_repeat', 8)
nstack = kwargs.get('frame_stack', 4)
crop = kwargs.get('crop_frame', False)
env = CarRacingWrapper(
env=env,
grayscale=grayscale,
reward_shaping=False,
num_action_repeat=num_action_repeat,
nstack=nstack,
crop=crop,
eval_=True)
if record_video:
from gym.wrappers.monitor import Monitor
env = Monitor(env, "videos/", force=True)
print('Recording video!', flush=True)
if is_multigrid and kwargs.get('use_global_policy'):
env = MultiGridFullyObsWrapper(env, is_adversarial=False)
return env
@staticmethod
def wrap_venv(venv, env_name, device='cpu'):
is_multigrid = env_name.startswith('MultiGrid') or env_name.startswith('MiniGrid')
is_car_racing = env_name.startswith('CarRacing')
is_bipedal = env_name.startswith('BipedalWalker')
obs_key = None
scale = None
if is_multigrid:
obs_key = 'image'
scale = 10.0
# Channels first
transpose_order = [2,0,1]
if is_bipedal:
transpose_order = None
venv = VecMonitor(venv=venv, filename=None, keep_buf=100)
venv = VecPreprocessImageWrapper(venv=venv, obs_key=obs_key,
transpose_order=transpose_order, scale=scale, device=device)
return venv
def _init_parallel_envs(self, env_names, num_processes, device=None, record_video=False, **kwargs):
self.env_names = env_names
self.num_processes = num_processes
self.device = device
self.venv = {env_name:None for env_name in env_names}
make_fn = []
for env_name in env_names:
make_fn = [lambda: Evaluator.make_env(env_name, record_video, **kwargs)]*self.num_processes
venv = ParallelAdversarialVecEnv(make_fn, adversary=False, is_eval=True)
venv = Evaluator.wrap_venv(venv, env_name, device=device)
self.venv[env_name] = venv
self.is_discrete_actions = is_discrete_actions(self.venv[env_names[0]])
def close(self):
for _, venv in self.venv.items():
venv.close()
def evaluate(self,
agent,
deterministic=False,
show_progress=False,
render=False,
accumulator='mean'):
# Evaluate agent for N episodes
venv = self.venv
env_returns = {}
env_solved_episodes = {}
for env_name, venv in self.venv.items():
returns = []
solved_episodes = 0
obs = venv.reset()
recurrent_hidden_states = torch.zeros(
self.num_processes, agent.algo.actor_critic.recurrent_hidden_state_size, device=self.device)
if agent.algo.actor_critic.is_recurrent and agent.algo.actor_critic.rnn.arch == 'lstm':
recurrent_hidden_states = (recurrent_hidden_states, torch.zeros_like(recurrent_hidden_states))
masks = torch.ones(self.num_processes, 1, device=self.device)
pbar = None
if show_progress:
pbar = tqdm(total=self.num_episodes)
while len(returns) < self.num_episodes:
# Sample actions
with torch.no_grad():
_, action, _, recurrent_hidden_states = agent.act(
obs, recurrent_hidden_states, masks, deterministic=deterministic)
# Observe reward and next obs
action = action.cpu().numpy()
if not self.is_discrete_actions:
action = agent.process_action(action)
obs, reward, done, infos = venv.step(action)
masks = torch.tensor(
[[0.0] if done_ else [1.0] for done_ in done],
dtype=torch.float32,
device=self.device)
for i, info in enumerate(infos):
if 'episode' in info.keys():
returns.append(info['episode']['r'])
if returns[-1] > self.solved_threshold:
solved_episodes += 1
if pbar:
pbar.update(1)
# zero hidden states
if agent.is_recurrent:
recurrent_hidden_states[0][i].zero_()
recurrent_hidden_states[1][i].zero_()
if len(returns) >= self.num_episodes:
break
if render:
venv.render_to_screen()
if pbar:
pbar.close()
env_returns[env_name] = returns
env_solved_episodes[env_name] = solved_episodes
stats = {}
for env_name in self.env_names:
if accumulator == 'mean':
stats[f"solved_rate:{env_name}"] = env_solved_episodes[env_name]/self.num_episodes
if accumulator == 'mean':
stats[f"test_returns:{env_name}"] = np.mean(env_returns[env_name])
else:
stats[f"test_returns:{env_name}"] = env_returns[env_name]
return stats
def _get_f1_env_names():
env_names = [f'CarRacingF1-{name}-v0' for name, cls in formula1.__dict__.items() if isinstance(cls, RaceTrack)]
env_names.remove('CarRacingF1-LagunaSeca-v0')
return env_names
def _get_zs_minigrid_env_names():
env_names = [
'MultiGrid-SixteenRooms-v0',
'MultiGrid-SixteenRoomsFewerDoors-v0'
'MultiGrid-Labyrinth-v0',
'MultiGrid-Labyrinth2-v0',
'MultiGrid-Maze-v0',
'MultiGrid-Maze2-v0',
"MultiGrid-LargeCorridor-v0",
"MultiGrid-PerfectMazeMedium-v0",
"MultiGrid-PerfectMazeLarge-v0",
"MultiGrid-PerfectMazeXL-v0",
]
return env_names
def _get_bipedal_env_names():
env_names = [
"BipedalWalker-v3",
"BipedalWalkerHardcore-v3",
"BipedalWalker-Med-Stairs-v0",
"BipedalWalker-Med-PitGap-v0",
"BipedalWalker-Med-StumpHeight-v0",
"BipedalWalker-Med-Roughness-v0",
]
return env_names
def _get_poet_rose_env_names():
env_names = [f'BipedalWalker-POET-Rose-{id}-v0' for id in ['1a', '1b', '2a', '2b', '3a', '3b']]
return env_names
if __name__ == '__main__':
os.environ["OMP_NUM_THREADS"] = "1"
display = None
if sys.platform.startswith('linux'):
print('Setting up virtual display')
import pyvirtualdisplay
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900), color_depth=24)
display.start()
args = DotDict(vars(parse_args()))
args.num_processes = min(args.num_processes, args.num_episodes)
# === Determine device ====
device = 'cpu'
# === Load checkpoint ===
# Load meta.json into flags object
base_path = os.path.expandvars(os.path.expanduser(args.base_path))
xpids = [args.xpid]
if args.prefix is not None:
all_xpids = fnmatch.filter(os.listdir(base_path), f"{args.prefix}*")
filter_re = re.compile('.*_[0-9]*$')
xpids = [x for x in all_xpids if filter_re.match(x)]
# Set up results management
os.makedirs(args.result_path, exist_ok=True)
if args.prefix is not None:
result_fname = args.prefix
else:
result_fname = args.xpid
result_fname = f"{result_fname}-{args.model_tar}-{args.model_name}"
result_fpath = os.path.join(args.result_path, result_fname)
if os.path.exists(f'{result_fpath}.csv'):
result_fpath = os.path.join(args.result_path, f'{result_fname}_redo')
result_fpath = f'{result_fpath}.csv'
csvout = open(result_fpath, 'w', newline='')
csvwriter = csv.writer(csvout)
env_results = defaultdict(list)
# Get envs
if args.benchmark == 'maze':
env_names = _get_zs_minigrid_env_names()
elif args.benchmark == 'f1':
env_names = _get_f1_env_names()
elif args.benchmark == 'bipedal':
env_names = _get_bipedal_env_names()
elif args.benchmark == 'poetrose':
env_names = _get_poet_rose_env_names()
else:
env_names = args.env_names.split(',')
num_envs = len(env_names)
if num_envs*args.num_processes > args.max_num_processes:
chunk_size = args.max_num_processes//args.num_processes
else:
chunk_size = num_envs
num_chunks = int(np.ceil(num_envs/chunk_size))
if args.record_video:
num_chunks = 1
chunk_size = 1
args.num_processes = 1
num_seeds = 0
for xpid in xpids:
if args.max_seeds is not None and num_seeds >= args.max_seeds:
break
xpid_dir = os.path.join(base_path, xpid)
meta_json_path = os.path.join(xpid_dir, 'meta.json')
model_tar = f'{args.model_tar}.tar'
checkpoint_path = os.path.join(xpid_dir, model_tar)
if os.path.exists(checkpoint_path):
meta_json_file = open(meta_json_path)
xpid_flags = DotDict(json.load(meta_json_file)['args'])
make_fn = [lambda: Evaluator.make_env(env_names[0])]
dummy_venv = ParallelAdversarialVecEnv(make_fn, adversary=False, is_eval=True)
dummy_venv = Evaluator.wrap_venv(dummy_venv, env_name=env_names[0], device=device)
# Load the agent
agent = make_agent(name='agent', env=dummy_venv, args=xpid_flags, device=device)
try:
checkpoint = torch.load(checkpoint_path, map_location='cpu')
except:
continue
model_name = args.model_name
if 'runner_state_dict' in checkpoint:
agent.algo.actor_critic.load_state_dict(checkpoint['runner_state_dict']['agent_state_dict'][model_name])
else:
agent.algo.actor_critic.load_state_dict(checkpoint)
num_seeds += 1
# Evaluate environment batch in increments of chunk size
for i in range(num_chunks):
start_idx = i*chunk_size
env_names_ = env_names[start_idx:start_idx+chunk_size]
# Evaluate the model
xpid_flags.update(args)
xpid_flags.update({"use_skip": False})
evaluator = Evaluator(env_names_,
num_processes=args.num_processes,
num_episodes=args.num_episodes,
frame_stack=xpid_flags.frame_stack,
grayscale=xpid_flags.grayscale,
use_global_critic=xpid_flags.use_global_critic,
record_video=args.record_video)
stats = evaluator.evaluate(agent,
deterministic=args.deterministic,
show_progress=args.verbose,
render=args.render,
accumulator=args.accumulator)
for k,v in stats.items():
if args.accumulator:
env_results[k].append(v)
else:
env_results[k] += v
evaluator.close()
else:
print(f'No model path {checkpoint_path}')
output_results = {}
for k,_ in stats.items():
results = env_results[k]
output_results[k] = f'{np.mean(results):.2f} +/- {np.std(results):.2f}'
q1 = np.percentile(results, 25, interpolation='midpoint')
q3 = np.percentile(results, 75, interpolation='midpoint')
median = np.median(results)
output_results[f'iq_{k}'] = f'{q1:.2f}--{median:.2f}--{q3:.2f}'
print(f"{k}: {output_results[k]}")
HumanOutputFormat(sys.stdout).writekvs(output_results)
if args.accumulator:
csvwriter.writerow(['metric',] + [x for x in range(num_seeds)])
else:
csvwriter.writerow(['metric',] + [x for x in range(num_seeds*args.num_episodes)])
for k,v in env_results.items():
row = [k,] + v
csvwriter.writerow(row)
if display:
display.stop()
|
dcd-main
|
eval.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from algos import PPO, RolloutStorage, ACAgent
from models import \
MultigridNetwork, MultigridGlobalCriticNetwork, \
CarRacingNetwork, \
CarRacingBezierAdversaryEnvNetwork, \
BipedalWalkerStudentPolicy, \
BipedalWalkerAdversaryPolicy
def model_for_multigrid_agent(
env,
agent_type='agent',
recurrent_arch=None,
recurrent_hidden_size=256,
use_global_critic=False,
use_global_policy=False):
if agent_type == 'adversary_env':
adversary_observation_space = env.adversary_observation_space
adversary_action_space = env.adversary_action_space
adversary_max_timestep = adversary_observation_space['time_step'].high[0] + 1
adversary_random_z_dim = adversary_observation_space['random_z'].shape[0]
model = MultigridNetwork(
observation_space=adversary_observation_space,
action_space=adversary_action_space,
conv_filters=128,
scalar_fc=10,
scalar_dim=adversary_max_timestep,
random_z_dim=adversary_random_z_dim,
recurrent_arch=recurrent_arch,
recurrent_hidden_size=recurrent_hidden_size)
else:
observation_space = env.observation_space
action_space = env.action_space
num_directions = observation_space['direction'].high[0] + 1
model_kwargs = dict(
observation_space=observation_space,
action_space=action_space,
scalar_fc=5,
scalar_dim=num_directions,
recurrent_arch=recurrent_arch,
recurrent_hidden_size=recurrent_hidden_size)
model_constructor = MultigridNetwork
if use_global_critic:
model_constructor = MultigridGlobalCriticNetwork
if use_global_policy:
model_kwargs.update({'use_global_policy': True})
model = model_constructor(**model_kwargs)
return model
def model_for_car_racing_agent(
env,
agent_type='agent',
use_skip=False,
choose_start_pos=False,
use_popart=False,
adv_use_popart=False,
use_categorical_adv=False,
use_goal=False,
num_goal_bins=1):
if agent_type == 'adversary_env':
adversary_observation_space = env.adversary_observation_space
adversary_action_space = env.adversary_action_space
model = CarRacingBezierAdversaryEnvNetwork(
observation_space=adversary_observation_space,
action_space=adversary_action_space,
use_categorical=use_categorical_adv,
use_skip=use_skip,
choose_start_pos=choose_start_pos,
use_popart=adv_use_popart,
use_goal=use_goal,
num_goal_bins=num_goal_bins)
else:
action_space = env.action_space
obs_shape = env.observation_space.shape
model = CarRacingNetwork(
obs_shape=obs_shape,
action_space = action_space,
hidden_size=100,
use_popart=use_popart)
return model
def model_for_bipedalwalker_agent(
env,
agent_type='agent',
recurrent_arch=False):
if 'adversary_env' in agent_type:
adversary_observation_space = env.adversary_observation_space
adversary_action_space = env.adversary_action_space
model = BipedalWalkerAdversaryPolicy(
observation_space=adversary_observation_space,
action_space=adversary_action_space)
else:
model = BipedalWalkerStudentPolicy(
obs_shape=env.observation_space.shape,
action_space=env.action_space,
recurrent=recurrent_arch)
return model
def model_for_env_agent(
env_name,
env,
agent_type='agent',
recurrent_arch=None,
recurrent_hidden_size=256,
use_global_critic=False,
use_global_policy=False,
use_skip=False,
choose_start_pos=False,
use_popart=False,
adv_use_popart=False,
use_categorical_adv=False,
use_goal=False,
num_goal_bins=1):
assert agent_type in ['agent', 'adversary_agent', 'adversary_env']
if env_name.startswith('MultiGrid'):
model = model_for_multigrid_agent(
env=env,
agent_type=agent_type,
recurrent_arch=recurrent_arch,
recurrent_hidden_size=recurrent_hidden_size,
use_global_critic=use_global_critic,
use_global_policy=use_global_policy)
elif env_name.startswith('CarRacing'):
model = model_for_car_racing_agent(
env=env,
agent_type=agent_type,
use_skip=use_skip,
choose_start_pos=choose_start_pos,
use_popart=use_popart,
adv_use_popart=adv_use_popart,
use_categorical_adv=use_categorical_adv,
use_goal=use_goal,
num_goal_bins=num_goal_bins)
elif env_name.startswith('BipedalWalker'):
model = model_for_bipedalwalker_agent(
env=env,
agent_type=agent_type,
recurrent_arch=recurrent_arch)
else:
raise ValueError(f'Unsupported environment {env_name}.')
return model
def make_agent(name, env, args, device='cpu'):
# Create model instance
is_adversary_env = 'env' in name
if is_adversary_env:
observation_space = env.adversary_observation_space
action_space = env.adversary_action_space
num_steps = observation_space['time_step'].high[0]
recurrent_arch = args.recurrent_adversary_env and args.recurrent_arch
entropy_coef = args.adv_entropy_coef
ppo_epoch = args.adv_ppo_epoch
num_mini_batch = args.adv_num_mini_batch
max_grad_norm = args.adv_max_grad_norm
use_popart = vars(args).get('adv_use_popart', False)
else:
observation_space = env.observation_space
action_space = env.action_space
num_steps = args.num_steps
recurrent_arch = args.recurrent_agent and args.recurrent_arch
entropy_coef = args.entropy_coef
ppo_epoch = args.ppo_epoch
num_mini_batch = args.num_mini_batch
max_grad_norm = args.max_grad_norm
use_popart = vars(args).get('use_popart', False)
recurrent_hidden_size = args.recurrent_hidden_size
actor_critic = model_for_env_agent(
args.env_name, env, name,
recurrent_arch=recurrent_arch,
recurrent_hidden_size=recurrent_hidden_size,
use_global_critic=args.use_global_critic,
use_global_policy=vars(args).get('use_global_policy', False),
use_skip=vars(args).get('use_skip', False),
choose_start_pos=vars(args).get('choose_start_pos', False),
use_popart=vars(args).get('use_popart', False),
adv_use_popart=vars(args).get('adv_use_popart', False),
use_categorical_adv=vars(args).get('use_categorical_adv', False),
use_goal=vars(args).get('sparse_rewards', False),
num_goal_bins=vars(args).get('num_goal_bins', 1))
algo = None
storage = None
agent = None
use_proper_time_limits = \
hasattr(env, 'get_max_episode_steps') \
and env.get_max_episode_steps() is not None \
and vars(args).get('handle_timelimits', False)
if args.algo == 'ppo':
# Create PPO
algo = PPO(
actor_critic=actor_critic,
clip_param=args.clip_param,
ppo_epoch=ppo_epoch,
num_mini_batch=num_mini_batch,
value_loss_coef=args.value_loss_coef,
entropy_coef=entropy_coef,
lr=args.lr,
eps=args.eps,
max_grad_norm=max_grad_norm,
clip_value_loss=args.clip_value_loss,
log_grad_norm=args.log_grad_norm
)
# Create storage
storage = RolloutStorage(
model=actor_critic,
num_steps=num_steps,
num_processes=args.num_processes,
observation_space=observation_space,
action_space=action_space,
recurrent_hidden_state_size=args.recurrent_hidden_size,
recurrent_arch=args.recurrent_arch,
use_proper_time_limits=use_proper_time_limits,
use_popart=use_popart
)
agent = ACAgent(algo=algo, storage=storage).to(device)
else:
raise ValueError(f'Unsupported RL algorithm {algo}.')
return agent
|
dcd-main
|
util/make_agent.py
|
# Copyright (c) 2015 Peter Onrejka
#
# Licensed under the GNU General Public License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/gpl-license
#
# This file is a modified version of
# https://github.com/sical/polygons_complexity/blob/master/complexity.py
# which itself is based on
# https://github.com/pondrejk/PolygonComplexity/blob/master/PolygonComplexity.py
import math
import os
import pandas as pd
import geopandas as gpd
import shapely
def get_notches(poly):
"""
Determine the number of notches in a polygon object and calculate
normalized notches of polygon
Based on:
"Measuring the Complexity of Polygonal Objects"
(Thomas Brinkhoff, Hans-Peter Kriegel, Ralf Schneider, Alexander Braun)
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.73.1045&rep=rep1&type=pdf
https://github.com/pondrejk/PolygonComplexity/blob/master/PolygonComplexity.py
@poly (Shapely Polygon object)
Returns normalized notches
"""
notches = 0
coords = list(poly.exterior.coords)
for i, pt in enumerate(coords[:-1]):
x_diff = coords[i+1][0] - pt[0]
y_diff = coords[i+1][1] - pt[1]
angle = math.atan2(y_diff, x_diff)
if angle < 0:
angle += 2*math.pi
if angle > math.pi:
notches += 1
if notches != 0:
notches_norm = notches / (len(coords)-3)
else:
notches_norm = 0
return notches_norm
def get_stats(gdf, coeff_ampl, coeff_conv):
"""
Get polygon's amplitude of vibration:
ampl(pol) = (boundary(pol) - boundary(convexhull(pol))) / boundary(pol)
Get deviation from convex hull:
conv(pol) = (area(convexhull(pol)) - area(pol)) / area(convexhull(pol))
Measure complexity
Based on:
"Measuring the Complexity of Polygonal Objects"
(Thomas Brinkhoff, Hans-Peter Kriegel, Ralf Schneider, Alexander Braun)
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.73.1045&rep=rep1&type=pdf
https://github.com/pondrejk/PolygonComplexity/blob/master/PolygonComplexity.py
Get area, centroid, distance from each others, boudary, convex hull,
perimeter, number of vertices.
Returns tuple with dict of stats values and GeoDataframe with stats
"""
nb = gdf['geometry'].count()
gdf['area'] = gdf['geometry'].area
tot_area = gdf['area'].sum()
gdf['centroid'] = gdf['geometry'].centroid
gdf['boundary'] = gdf['geometry'].boundary
gdf['convex_hull'] = gdf['geometry'].convex_hull
gdf['convex_boundary'] = gdf['geometry'].convex_hull.boundary
gdf['convex_area'] = gdf['geometry'].convex_hull.area
gdf['nbvertices'] = gdf['geometry'].apply(lambda x: len(list(x.exterior.coords)))
gdf['notches'] = gdf['geometry'].apply(lambda x: get_notches(x))
gdf['amplitude'] = gdf.apply(
lambda x:(
x['boundary'].length - x['convex_boundary'].length
) / (x['boundary'].length + 1e-3),
axis=1)
gdf['convex'] = gdf.apply(
lambda x: (
x['convex_area'] - x['area']
) / (x['convex_area'] + 1e-3),
axis=1)
gdf['complexity'] = gdf.apply(
lambda x: coeff_ampl*x['amplitude'] * x['notches'] + coeff_conv * x['convex'],
axis=1
)
mean_amplitude = gdf['amplitude'].mean()
mean_convex = gdf['convex'].mean()
mean_norm_notches = gdf['notches'].mean()
mean_complexity = gdf['complexity'].mean()
gdf['perimeter'] = gdf['geometry'].length
tot_perimeter = gdf['perimeter'].sum()
if ("lat" in gdf.columns) or ("lon" in gdf.columns):
columns_drop = ["boundary", "convex_hull", "convex_boundary", "convex_area", "centroid", "lat", "lon"]
else:
columns_drop = ["boundary", "convex_hull", "convex_boundary", "convex_area", "centroid"]
gdf = gdf.drop(columns_drop, axis=1)
gdf = gdf.reset_index()
if nb > 1:
gdf = gdf.sort_values(by='perimeter', ascending=False)
gdf = gdf.iloc[[0]]
return {
'area':tot_area,
'perimeter':tot_perimeter,
'amplitude': mean_amplitude,
'convex': mean_convex,
'notches': mean_norm_notches,
'complexity': mean_complexity
}, gdf
def complexity(points, coeff_ampl=0.8, coeff_conv=0.2):
polygon = shapely.geometry.Polygon(points)
gdf = gpd.GeoDataFrame(geometry=gpd.GeoSeries([polygon]))
dict_complexity, gdf = get_stats(gdf, coeff_ampl, coeff_conv)
return dict_complexity
|
dcd-main
|
util/geo_complexity.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
import shutil
import collections
import timeit
import random
import numpy as np
import torch
from torchvision import utils as vutils
from envs.registration import make as gym_make
from .make_agent import make_agent
from .filewriter import FileWriter
from envs.wrappers import ParallelAdversarialVecEnv, VecMonitor, VecNormalize, \
VecPreprocessImageWrapper, VecFrameStack, MultiGridFullyObsWrapper, CarRacingWrapper, TimeLimit
class DotDict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __init__(self, dct):
for key, value in dct.items():
if hasattr(value, 'keys'):
value = DotDict(value)
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, state):
self.update(state)
self.__dict__ = self
def array_to_csv(a):
return ','.join([str(v) for v in a])
def cprint(condition, *args, **kwargs):
if condition:
print(*args, **kwargs)
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def safe_checkpoint(state_dict, path, index=None, archive_interval=None):
filename, ext = os.path.splitext(path)
path_tmp = f'{filename}_tmp{ext}'
torch.save(state_dict, path_tmp)
os.replace(path_tmp, path)
if index is not None and archive_interval is not None and archive_interval > 0:
if index % archive_interval == 0:
archive_path = f'{filename}_{index}{ext}'
shutil.copy(path, archive_path)
def cleanup_log_dir(log_dir, pattern='*'):
try:
os.makedirs(log_dir)
except OSError:
files = glob.glob(os.path.join(log_dir, pattern))
for f in files:
os.remove(f)
def seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def save_images(images, path=None, normalize=False, channels_first=False):
if path is None:
return
if isinstance(images, (list, tuple)):
images = torch.tensor(np.stack(images), dtype=torch.float)
elif isinstance(images, np.ndarray):
images = torch.tensor(images, dtype=torch.float)
if normalize:
images = images/255
if not channels_first:
if len(images.shape) == 4:
images = images.permute(0,3,1,2)
else:
images = images.permute(2,0,1)
grid = vutils.make_grid(images)
vutils.save_image(grid, path)
def get_obs_at_index(obs, i):
if isinstance(obs, dict):
return {k: obs[k][i] for k in obs.keys()}
else:
return obs[i]
def set_obs_at_index(obs, obs_, i):
if isinstance(obs, dict):
for k in obs.keys():
obs[k][i] = obs_[k].squeeze(0)
else:
obs[i] = obs_[0].squeeze(0)
def is_discrete_actions(env, adversary=False):
if adversary:
return env.adversary_action_space.__class__.__name__ == 'Discrete'
else:
return env.action_space.__class__.__name__ == 'Discrete'
def _make_env(args):
env_kwargs = {'seed': args.seed}
if args.singleton_env:
env_kwargs.update({
'fixed_environment': True})
if args.env_name.startswith('CarRacing'):
env_kwargs.update({
'n_control_points': args.num_control_points,
'min_rad_ratio': args.min_rad_ratio,
'max_rad_ratio': args.max_rad_ratio,
'use_categorical': args.use_categorical_adv,
'use_sketch': args.use_sketch,
'clip_reward': args.clip_reward,
'sparse_rewards': args.sparse_rewards,
'num_goal_bins': args.num_goal_bins,
})
if args.env_name.startswith('CarRacing'):
# Hack: This TimeLimit sandwich allows truncated obs to be passed
# up the hierarchy with all necessary preprocessing.
env = gym_make(args.env_name, **env_kwargs)
max_episode_steps = env._max_episode_steps
reward_shaping = args.reward_shaping and not args.sparse_rewards
assert max_episode_steps % args.num_action_repeat == 0
return TimeLimit(CarRacingWrapper(env,
grayscale=args.grayscale,
reward_shaping=reward_shaping,
num_action_repeat=args.num_action_repeat,
nstack=args.frame_stack,
crop=args.crop_frame),
max_episode_steps=max_episode_steps//args.num_action_repeat)
elif args.env_name.startswith('MultiGrid'):
env = gym_make(args.env_name, **env_kwargs)
if args.use_global_critic or args.use_global_policy:
max_episode_steps = env._max_episode_steps
env = TimeLimit(MultiGridFullyObsWrapper(env),
max_episode_steps=max_episode_steps)
return env
else:
return gym_make(args.env_name, **env_kwargs)
def create_parallel_env(args, adversary=True):
is_multigrid = args.env_name.startswith('MultiGrid')
is_car_racing = args.env_name.startswith('CarRacing')
is_bipedalwalker = args.env_name.startswith('BipedalWalker')
make_fn = lambda: _make_env(args)
venv = ParallelAdversarialVecEnv([make_fn]*args.num_processes, adversary=adversary)
venv = VecMonitor(venv=venv, filename=None, keep_buf=100)
venv = VecNormalize(venv=venv, ob=False, ret=args.normalize_returns)
obs_key = None
scale = None
transpose_order = [2,0,1] # Channels first
if is_multigrid:
obs_key = 'image'
scale = 10.0
if is_car_racing:
ued_venv = VecPreprocessImageWrapper(venv=venv) # move to tensor
if is_bipedalwalker:
transpose_order = None
venv = VecPreprocessImageWrapper(venv=venv, obs_key=obs_key,
transpose_order=transpose_order, scale=scale)
if is_multigrid or is_bipedalwalker:
ued_venv = venv
if args.singleton_env:
seeds = [args.seed]*args.num_processes
else:
seeds = [i for i in range(args.num_processes)]
venv.set_seed(seeds)
return venv, ued_venv
def is_dense_reward_env(env_name):
if env_name.startswith('CarRacing'):
return True
else:
return False
def make_plr_args(args, obs_space, action_space):
return dict(
seeds=[],
obs_space=obs_space,
action_space=action_space,
num_actors=args.num_processes,
strategy=args.level_replay_strategy,
replay_schedule=args.level_replay_schedule,
score_transform=args.level_replay_score_transform,
temperature=args.level_replay_temperature,
eps=args.level_replay_eps,
rho=args.level_replay_rho,
replay_prob=args.level_replay_prob,
alpha=args.level_replay_alpha,
staleness_coef=args.staleness_coef,
staleness_transform=args.staleness_transform,
staleness_temperature=args.staleness_temperature,
sample_full_distribution=args.train_full_distribution,
seed_buffer_size=args.level_replay_seed_buffer_size,
seed_buffer_priority=args.level_replay_seed_buffer_priority,
use_dense_rewards=is_dense_reward_env(args.env_name),
gamma=args.gamma
)
|
dcd-main
|
util/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import csv
import datetime
import json
import logging
import os
import time
from typing import Dict
import numpy as np
def gather_metadata() -> Dict:
date_start = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
# Gathering git metadata.
try:
import git
try:
repo = git.Repo(search_parent_directories=True)
git_sha = repo.commit().hexsha
git_data = dict(
commit=git_sha,
branch=None if repo.head.is_detached else repo.active_branch.name,
is_dirty=repo.is_dirty(),
path=repo.git_dir,
)
except git.InvalidGitRepositoryError:
git_data = None
except ImportError:
git_data = None
# Gathering slurm metadata.
if "SLURM_JOB_ID" in os.environ:
slurm_env_keys = [k for k in os.environ if k.startswith("SLURM")]
slurm_data = {}
for k in slurm_env_keys:
d_key = k.replace("SLURM_", "").replace("SLURMD_", "").lower()
slurm_data[d_key] = os.environ[k]
else:
slurm_data = None
return dict(
date_start=date_start,
date_end=None,
successful=False,
git=git_data,
slurm=slurm_data,
env=os.environ.copy(),
)
class FileWriter:
def __init__(
self,
xpid: str = None,
xp_args: dict = None,
rootdir: str = "~/logs",
symlink_to_latest: bool = True,
seeds=None,
):
if not xpid:
# Make unique id.
xpid = "{proc}_{unixtime}".format(
proc=os.getpid(), unixtime=int(time.time())
)
self.xpid = xpid
self._tick = 0
# Metadata gathering.
if xp_args is None:
xp_args = {}
self.metadata = gather_metadata()
# We need to copy the args, otherwise when we close the file writer
# (and rewrite the args) we might have non-serializable objects (or
# other unwanted side-effects).
self.metadata["args"] = copy.deepcopy(xp_args)
self.metadata["xpid"] = self.xpid
formatter = logging.Formatter("%(message)s")
self._logger = logging.getLogger("logs/out")
train_full_distribution = xp_args.get('train_full_distribution', False)
seed_buffer_size = xp_args.get('level_replay_seed_buffer_size', 0)
self.record_seed_diffs = \
train_full_distribution and seed_buffer_size > 0
self.seeds = None
if not self.record_seed_diffs and seeds:
self.seeds = [str(seed) for seed in seeds]
# To stdout handler.
shandle = logging.StreamHandler()
shandle.setFormatter(formatter)
self._logger.addHandler(shandle)
self._logger.setLevel(logging.INFO)
rootdir = os.path.expandvars(os.path.expanduser(rootdir))
# To file handler.
self.basepath = os.path.join(rootdir, self.xpid)
if not os.path.exists(self.basepath):
self._logger.info("Creating log directory: %s", self.basepath)
os.makedirs(self.basepath, exist_ok=True)
else:
self._logger.info("Found log directory: %s", self.basepath)
if symlink_to_latest:
# Add 'latest' as symlink unless it exists and is no symlink.
symlink = os.path.join(rootdir, "latest")
try:
if os.path.islink(symlink):
os.remove(symlink)
if not os.path.exists(symlink):
os.symlink(self.basepath, symlink)
self._logger.info("Symlinked log directory: %s", symlink)
except OSError:
# os.remove() or os.symlink() raced. Don't do anything.
pass
self.paths = dict(
msg="{base}/out.log".format(base=self.basepath),
logs="{base}/logs.csv".format(base=self.basepath),
fields="{base}/fields.csv".format(base=self.basepath),
meta="{base}/meta.json".format(base=self.basepath),
level_weights="{base}/level_weights.csv".format(base=self.basepath),
level_seeds="{base}/level_seeds.csv".format(base=self.basepath),
final_test_eval="{base}/final_test_eval.csv".format(base=self.basepath)
)
self._logger.info("Saving arguments to %s", self.paths["meta"])
if os.path.exists(self.paths["meta"]):
self._logger.warning(
"Path to meta file already exists. " "Not overriding meta."
)
else:
self._save_metadata()
self._logger.info("Saving messages to %s", self.paths["msg"])
if os.path.exists(self.paths["msg"]):
self._logger.warning(
"Path to message file already exists. " "New data will be appended."
)
fhandle = logging.FileHandler(self.paths["msg"])
fhandle.setFormatter(formatter)
self._logger.addHandler(fhandle)
self._logger.info("Saving logs data to %s", self.paths["logs"])
self._logger.info("Saving logs' fields to %s", self.paths["fields"])
self.fieldnames = ["_tick", "_time"]
self.final_test_eval_fieldnames = ['num_test_seeds', 'mean_episode_return', 'median_episode_return']
self.level_seeds_fieldnames = ['new_seeds', 'new_seed_indices']
if os.path.exists(self.paths["logs"]):
self._logger.warning(
"Path to log file already exists. " "New data will be appended."
)
# Override default fieldnames.
with open(self.paths["fields"], "r") as csvfile:
reader = csv.reader(csvfile)
lines = list(reader)
if len(lines) > 0:
self.fieldnames = lines[-1]
# Override default tick: use the last tick from the logs file plus 1.
with open(self.paths["logs"], "r") as csvfile:
reader = csv.reader(csvfile)
lines = list(reader)
# Need at least two lines in order to read the last tick:
# the first is the csv header and the second is the first line
# of data.
if len(lines) > 1:
self._tick = int(lines[-1][0]) + 1
self._fieldfile = open(self.paths["fields"], "a")
self._fieldwriter = csv.writer(self._fieldfile)
self._logfile = open(self.paths["logs"], "a")
self._logwriter = csv.DictWriter(self._logfile, fieldnames=self.fieldnames)
self._levelweightsfile = open(self.paths["level_weights"], "a")
self._levelweightswriter = csv.writer(self._levelweightsfile)
self._levelseedsfile = open(self.paths["level_seeds"], "a")
self._levelseedswriter = csv.DictWriter(self._levelseedsfile, fieldnames=self.level_seeds_fieldnames)
self._finaltestfile = open(self.paths["final_test_eval"], "a")
self._finaltestwriter = csv.DictWriter(self._finaltestfile, fieldnames=self.final_test_eval_fieldnames)
if self.seeds and not self.record_seed_diffs:
self._levelweightsfile.write("# %s\n" % ",".join(self.seeds))
self._levelweightsfile.flush()
self._finaltestwriter.writeheader()
self._finaltestfile.flush()
def log(self, to_log: Dict, tick: int = None, verbose: bool = False) -> None:
if tick is not None:
raise NotImplementedError
else:
to_log["_tick"] = self._tick
self._tick += 1
to_log["_time"] = time.time()
old_len = len(self.fieldnames)
for k in to_log:
if k not in self.fieldnames:
self.fieldnames.append(k)
if old_len != len(self.fieldnames):
self._fieldwriter.writerow(self.fieldnames)
self._logger.info("Updated log fields: %s", self.fieldnames)
if to_log["_tick"] == 0:
self._logfile.write("# %s\n" % ",".join(self.fieldnames))
if verbose:
self._logger.info(
"LOG | %s",
", ".join(["{}: {}".format(k, to_log[k]) for k in sorted(to_log)]),
)
self._logwriter.writerow(to_log)
self._logfile.flush()
def log_level_weights(self, weights, seeds=None):
if self.record_seed_diffs:
if self.seeds is None:
self.seeds = seeds.copy()
level_seed_log = {
'new_seeds': " ".join([str(s) for s in self.seeds]),
'new_seed_indices': " ".join([str(i) for i in range(len(self.seeds))]),
}
else:
new_seed_indices = np.nonzero(self.seeds - seeds)[0]
new_seeds = seeds[new_seed_indices]
self.seeds = seeds.copy()
level_seed_log = {
'new_seeds': " ".join([str(s) for s in new_seeds]),
'new_seed_indices': " ".join([str(i) for i in new_seed_indices]),
}
self._levelseedswriter.writerow(level_seed_log)
self._levelseedsfile.flush()
self._levelweightswriter.writerow(weights)
self._levelweightsfile.flush()
def log_final_test_eval(self, to_log):
self._finaltestwriter.writerow(to_log)
self._finaltestfile.flush()
def close(self, successful: bool = True) -> None:
self.metadata["date_end"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S.%f"
)
self.metadata["successful"] = successful
self._save_metadata()
for f in [self._logfile, self._fieldfile]:
f.close()
def _save_metadata(self) -> None:
with open(self.paths["meta"], "w") as jsonfile:
json.dump(self.metadata, jsonfile, indent=4, sort_keys=True)
def latest_tick(self):
with open(self.paths["logs"], "r") as logsfile:
csvreader = csv.reader(logsfile)
for row in csvreader:
pass
if row:
return int(row[0])
else:
return 0
|
dcd-main
|
util/filewriter.py
|
# Copyright (c) 2017 Debajyoti Nandi
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is taken from
# https://github.com/deehzee/unionfind/blob/master/unionfind.py
"""
A union-find disjoint set data structure.
"""
# Third-party libraries
import numpy as np
class UnionFind(object):
"""Union-find disjoint sets datastructure.
Union-find is a data structure that maintains disjoint set
(called connected components or components in short) membership,
and makes it easier to merge (union) two components, and to find
if two elements are connected (i.e., belong to the same
component).
This implements the "weighted-quick-union-with-path-compression"
union-find algorithm. Only works if elements are immutable
objects.
Worst case for union and find: :math:`(N + M \log^* N)`, with
:math:`N` elements and :math:`M` unions. The function
:math:`\log^*` is the number of times needed to take :math:`\log`
of a number until reaching 1. In practice, the amortized cost of
each operation is nearly linear [1]_.
Terms
-----
Component
Elements belonging to the same disjoint set
Connected
Two elements are connected if they belong to the same component.
Union
The operation where two components are merged into one.
Root
An internal representative of a disjoint set.
Find
The operation to find the root of a disjoint set.
Parameters
----------
elements : NoneType or container, optional, default: None
The initial list of elements.
Attributes
----------
n_elts : int
Number of elements.
n_comps : int
Number of distjoint sets or components.
Implements
----------
__len__
Calling ``len(uf)`` (where ``uf`` is an instance of ``UnionFind``)
returns the number of elements.
__contains__
For ``uf`` an instance of ``UnionFind`` and ``x`` an immutable object,
``x in uf`` returns ``True`` if ``x`` is an element in ``uf``.
__getitem__
For ``uf`` an instance of ``UnionFind`` and ``i`` an integer,
``res = uf[i]`` returns the element stored in the ``i``-th index.
If ``i`` is not a valid index an ``IndexError`` is raised.
__setitem__
For ``uf`` and instance of ``UnionFind``, ``i`` an integer and ``x``
an immutable object, ``uf[i] = x`` changes the element stored at the
``i``-th index. If ``i`` is not a valid index an ``IndexError`` is
raised.
.. [1] http://algs4.cs.princeton.edu/lectures/
"""
def __init__(self, elements=None):
self.n_elts = 0 # current num of elements
self.n_comps = 0 # the number of disjoint sets or components
self._next = 0 # next available id
self._elts = [] # the elements
self._indx = {} # dict mapping elt -> index in _elts
self._par = [] # parent: for the internal tree structure
self._siz = [] # size of the component - correct only for roots
if elements is None:
elements = []
for elt in elements:
self.add(elt)
def __repr__(self):
return (
'<UnionFind:\n\telts={},\n\tsiz={},\n\tpar={},\nn_elts={},n_comps={}>'
.format(
self._elts,
self._siz,
self._par,
self.n_elts,
self.n_comps,
))
def __len__(self):
return self.n_elts
def __contains__(self, x):
return x in self._indx
def __getitem__(self, index):
if index < 0 or index >= self._next:
raise IndexError('index {} is out of bound'.format(index))
return self._elts[index]
def __setitem__(self, index, x):
if index < 0 or index >= self._next:
raise IndexError('index {} is out of bound'.format(index))
self._elts[index] = x
def add(self, x):
"""Add a single disjoint element.
Parameters
----------
x : immutable object
Returns
-------
None
"""
if x in self:
return
self._elts.append(x)
self._indx[x] = self._next
self._par.append(self._next)
self._siz.append(1)
self._next += 1
self.n_elts += 1
self.n_comps += 1
def find(self, x):
"""Find the root of the disjoint set containing the given element.
Parameters
----------
x : immutable object
Returns
-------
int
The (index of the) root.
Raises
------
ValueError
If the given element is not found.
"""
if x not in self._indx:
raise ValueError('{} is not an element'.format(x))
p = self._indx[x]
while p != self._par[p]:
# path compression
q = self._par[p]
self._par[p] = self._par[q]
p = q
return p
def connected(self, x, y):
"""Return whether the two given elements belong to the same component.
Parameters
----------
x : immutable object
y : immutable object
Returns
-------
bool
True if x and y are connected, false otherwise.
"""
return self.find(x) == self.find(y)
def union(self, x, y):
"""Merge the components of the two given elements into one.
Parameters
----------
x : immutable object
y : immutable object
Returns
-------
None
"""
# Initialize if they are not already in the collection
for elt in [x, y]:
if elt not in self:
self.add(elt)
xroot = self.find(x)
yroot = self.find(y)
if xroot == yroot:
return
if self._siz[xroot] < self._siz[yroot]:
self._par[xroot] = yroot
self._siz[yroot] += self._siz[xroot]
else:
self._par[yroot] = xroot
self._siz[xroot] += self._siz[yroot]
self.n_comps -= 1
def component(self, x):
"""Find the connected component containing the given element.
Parameters
----------
x : immutable object
Returns
-------
set
Raises
------
ValueError
If the given element is not found.
"""
if x not in self:
raise ValueError('{} is not an element'.format(x))
elts = np.array(self._elts)
vfind = np.vectorize(self.find)
roots = vfind(elts)
return set(elts[roots == self.find(x)])
def components(self):
"""Return the list of connected components.
Returns
-------
list
A list of sets.
"""
elts = np.array(self._elts)
vfind = np.vectorize(self.find)
roots = vfind(elts)
distinct_roots = set(roots)
return [set(elts[roots == root]) for root in distinct_roots]
# comps = []
# for root in distinct_roots:
# mask = (roots == root)
# comp = set(elts[mask])
# comps.append(comp)
# return comps
def component_mapping(self):
"""Return a dict mapping elements to their components.
The returned dict has the following semantics:
`elt -> component containing elt`
If x, y belong to the same component, the comp(x) and comp(y)
are the same objects (i.e., share the same reference). Changing
comp(x) will reflect in comp(y). This is done to reduce
memory.
But this behaviour should not be relied on. There may be
inconsitency arising from such assumptions or lack thereof.
If you want to do any operation on these sets, use caution.
For example, instead of
::
s = uf.component_mapping()[item]
s.add(stuff)
# This will have side effect in other sets
do
::
s = set(uf.component_mapping()[item]) # or
s = uf.component_mapping()[item].copy()
s.add(stuff)
or
::
s = uf.component_mapping()[item]
s = s | {stuff} # Now s is different
Returns
-------
dict
A dict with the semantics: `elt -> component contianing elt`.
"""
elts = np.array(self._elts)
vfind = np.vectorize(self.find)
roots = vfind(elts)
distinct_roots = set(roots)
comps = {}
for root in distinct_roots:
mask = (roots == root)
comp = set(elts[mask])
comps.update({x: comp for x in comp})
# Change ^this^, if you want a different behaviour:
# If you don't want to share the same set to different keys:
# comps.update({x: set(comp) for x in comp})
return comps
|
dcd-main
|
util/unionfind.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .level_sampler import LevelSampler
from .level_store import LevelStore
|
dcd-main
|
level_replay/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple, defaultdict, deque
import numpy as np
import torch
INT32_MAX = 2147483647
class LevelStore(object):
"""
Manages a mapping between level index --> level, where the level
may be represented by any arbitrary data structure. Typically, we can
represent any given level as a string.
"""
def __init__(self, max_size=None, data_info={}):
self.max_size = max_size
self.seed2level = defaultdict()
self.level2seed = defaultdict()
self.seed2parent = defaultdict()
self.next_seed = 1
self.levels = set()
self.data_info = data_info
def __len__(self):
return len(self.levels)
def _insert(self, level, parent_seed=None):
if level is None:
return None
if level not in self.levels:
# FIFO if max size constraint
if self.max_size is not None:
while len(self.levels) >= self.max_size:
first_idx = list(self.seed2level)[0]
self._remove(first_idx)
seed = self.next_seed
self.seed2level[seed] = level
if parent_seed is not None:
self.seed2parent[seed] = \
self.seed2parent[parent_seed] + [self.seed2level[parent_seed]]
else:
self.seed2parent[seed] = []
self.level2seed[level] = seed
self.levels.add(level)
self.next_seed += 1
return seed
else:
return self.level2seed[level]
def insert(self, level, parent_seeds=None):
if hasattr(level, '__iter__'):
idx = []
for i, l in enumerate(level):
ps = None
if parent_seeds is not None:
ps = parent_seeds[i]
idx.append(self._insert(l, ps))
return idx
else:
return self._insert(level)
def _remove(self, level_seed):
if level_seed is None or level_seed < 0:
return
level = self.seed2level[level_seed]
self.levels.remove(level)
del self.seed2level[level_seed]
del self.level2seed[level]
del self.seed2parent[level_seed]
def remove(self, level_seed):
if hasattr(level_seed, '__iter__'):
for i in level_seed:
self._remove(i)
else:
self._remove(level_seed)
def reconcile_seeds(self, level_seeds):
old_seeds = set(self.seed2level)
new_seeds = set(level_seeds)
# Don't update if empty seeds
if len(new_seeds) == 1 and -1 in new_seeds:
return
ejected_seeds = old_seeds - new_seeds
for seed in ejected_seeds:
self._remove(seed)
def get_level(self, level_seed):
level = self.seed2level[level_seed]
if self.data_info:
if self.data_info.get('numpy', False):
dtype = self.data_info['dtype']
shape = self.data_info['shape']
level = np.frombuffer(level, dtype=dtype).reshape(*shape)
return level
|
dcd-main
|
level_replay/level_store.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple, defaultdict, deque
import queue
import numpy as np
import torch
INT32_MAX = 2147483647
np.seterr(all='raise')
class LevelSampler():
def __init__(
self,
seeds,
obs_space,
action_space,
num_actors=1,
strategy='random',
max_score_coef=0.0,
replay_schedule='fixed',
score_transform='power',
temperature=1.0,
eps=0.05,
rho=1.0,
replay_prob=0.95,
alpha=1.0,
staleness_coef=0,
staleness_transform='power',
staleness_temperature=1.0,
sample_full_distribution=False,
seed_buffer_size=0,
seed_buffer_priority='replay_support',
use_dense_rewards=False,
tscl_window_size=0,
gamma=0.999):
"""
Inputs:
seeds: List, Seeds that can be sampled.
rho: float, Minimum probability of sampling a replay level.
Note math.round(rho * len(seeds)) will first be sampled before sampling replay levels.
alpha: Smoothing factor for updating scores using an exponential weighted average.
obs_space: Gym env observation space.
action_space: Gym env action space.
strategy: Sampling strategy (random, sequential, policy entropy).
"""
self.obs_space = obs_space
self.action_space = action_space
self.num_actors = num_actors
self.strategy = strategy
self.max_score_coef = max_score_coef
self.replay_schedule = replay_schedule
self.score_transform = score_transform
self.temperature = temperature
self.eps = eps
self.rho = rho
self.replay_prob = replay_prob # replay prob
self.alpha = alpha
self.staleness_coef = staleness_coef
self.staleness_transform = staleness_transform
self.staleness_temperature = staleness_temperature
self.gamma = gamma
self.use_dense_rewards = use_dense_rewards
# Track seeds and scores as in np arrays backed by shared memory
self.seed_buffer_size = seed_buffer_size if not seeds else len(seeds)
N = self.seed_buffer_size
self._init_seed_index(seeds)
self.unseen_seed_weights = np.array([1.]*N)
self.seed_scores = np.array([0.]*N, dtype=np.float)
self.partial_seed_scores = np.zeros((num_actors, N), dtype=np.float)
self.partial_seed_max_scores = np.ones((num_actors, N), dtype=np.float)*float('-inf')
self.partial_seed_steps = np.zeros((num_actors, N), dtype=np.int32)
self.seed_staleness = np.array([0.]*N, dtype=np.float)
self.running_sample_count = 0
self.next_seed_index = 0 # Only used for sequential strategy
self.track_solvable = False
# Handle grounded value losses
self.grounded_values = None
if self.strategy.startswith('grounded'):
self.grounded_values = np.array([np.NINF]*N, dtype=np.float)
# Only used for infinite seed setting
self.sample_full_distribution = sample_full_distribution
if self.sample_full_distribution:
self.seed2actor = defaultdict(set)
self.working_seed_buffer_size = 0
self.seed_buffer_priority = seed_buffer_priority
self.staging_seed_set = set()
self.working_seed_set = set()
self.seed2timestamp_buffer = {} # Buffer seeds are unique across actors
self.partial_seed_scores_buffer = [{} for _ in range(num_actors)]
self.partial_seed_max_scores_buffer = [{} for _ in range(num_actors)]
self.partial_seed_steps_buffer = [{} for _ in range(num_actors)]
# TSCL specific data structures
if self.strategy.startswith('tscl'):
self.tscl_window_size = tscl_window_size
self.tscl_return_window = [deque(maxlen=self.tscl_window_size) for _ in range(N)]
self.tscl_episode_window = [deque(maxlen=self.tscl_window_size) for _ in range(N)]
self.unseen_seed_weights = np.zeros(N) # Force uniform distribution over seeds
def seed_range(self):
if not self.sample_full_distribution:
return (int(min(self.seeds)), int(max(self.seeds)))
else:
return (0, INT32_MAX)
def _init_seed_index(self, seeds):
if seeds:
self.seeds = np.array(seeds, dtype=np.int64)
self.seed2index = {seed: i for i, seed in enumerate(seeds)}
else:
self.seeds = np.zeros(self.seed_buffer_size, dtype=np.int64) - 1
self.seed2index = {}
def _init_solvable_tracking(self):
"""
Prepare data structures for tracking seed solvability.
Currently only used with externally observed seeds.
"""
self.track_solvable = True
self.staging_seed2solvable = {}
self.seed_solvable = np.ones(self.seed_buffer_size, dtype=np.bool)
@property
def _proportion_filled(self):
if self.sample_full_distribution:
return self.working_seed_buffer_size/self.seed_buffer_size
else:
num_unseen = (self.unseen_seed_weights > 0).sum()
proportion_seen = (len(self.seeds) - num_unseen)/len(self.seeds)
return proportion_seen
def update_with_rollouts(self, rollouts):
if self.strategy in ['random', 'off']:
return
# Update with a RolloutStorage object
if self.strategy == 'uniform':
score_function = self._uniform
elif self.strategy == 'policy_entropy':
score_function = self._average_entropy
elif self.strategy == 'least_confidence':
score_function = self._average_least_confidence
elif self.strategy == 'min_margin':
score_function = self._average_min_margin
elif self.strategy == 'gae':
score_function = self._average_gae
elif self.strategy == 'value_l1':
score_function = self._average_value_l1
elif self.strategy == 'signed_value_loss':
score_function = self._average_signed_value_loss
elif self.strategy == 'positive_value_loss':
score_function = self._average_positive_value_loss
elif self.strategy == 'grounded_signed_value_loss':
score_function = self._average_grounded_signed_value_loss
elif self.strategy == 'grounded_positive_value_loss':
score_function = self._average_grounded_positive_value_loss
elif self.strategy == 'one_step_td_error':
score_function = self._one_step_td_error
elif self.strategy == 'alt_advantage_abs':
score_function = self._average_alt_advantage_abs
elif self.strategy == 'tscl_window':
score_function = self._tscl_window
else:
raise ValueError(f'Unsupported strategy, {self.strategy}')
self._update_with_rollouts(rollouts, score_function)
def update_seed_score(self, actor_index, seed, score, max_score, num_steps):
if self.sample_full_distribution and seed in self.staging_seed_set:
score, seed_idx = self._partial_update_seed_score_buffer(actor_index, seed, score, num_steps, done=True)
else:
score, seed_idx = self._partial_update_seed_score(actor_index, seed, score, max_score, num_steps, done=True)
return score, seed_idx
def _partial_update_seed_score(self, actor_index, seed, score, max_score, num_steps, done=False):
seed_idx = self.seed2index.get(seed, -1)
if seed_idx < 0:
return 0, None
partial_score = self.partial_seed_scores[actor_index][seed_idx]
partial_max_score = self.partial_seed_max_scores[actor_index][seed_idx]
partial_num_steps = self.partial_seed_steps[actor_index][seed_idx]
running_num_steps = partial_num_steps + num_steps
merged_score = partial_score + (score - partial_score)*num_steps/float(running_num_steps)
merged_max_score = max(partial_max_score, max_score)
if done:
self.partial_seed_scores[actor_index][seed_idx] = 0. # zero partial score, partial num_steps
self.partial_seed_max_scores[actor_index][seed_idx] = float('-inf')
self.partial_seed_steps[actor_index][seed_idx] = 0
self.unseen_seed_weights[seed_idx] = 0. # No longer unseen
old_score = self.seed_scores[seed_idx]
total_score = self.max_score_coef*merged_max_score + (1 - self.max_score_coef)*merged_score
self.seed_scores[seed_idx] = (1 - self.alpha)*old_score + self.alpha*total_score
else:
self.partial_seed_scores[actor_index][seed_idx] = merged_score
self.partial_seed_max_scores[actor_index][seed_idx] = merged_max_score
self.partial_seed_steps[actor_index][seed_idx] = running_num_steps
return merged_score, seed_idx
@property
def _next_buffer_index(self):
if self._proportion_filled < 1.0:
return self.working_seed_buffer_size
else:
if self.seed_buffer_priority == 'replay_support':
return self.sample_weights().argmin()
else:
return self.seed_scores.argmin()
def _partial_update_seed_score_buffer(self, actor_index, seed, score, num_steps, done=False):
seed_idx = -1
self.seed2actor[seed].add(actor_index)
partial_score = self.partial_seed_scores_buffer[actor_index].get(seed, 0)
partial_num_steps = self.partial_seed_steps_buffer[actor_index].get(seed, 0)
running_num_steps = partial_num_steps + num_steps
merged_score = partial_score + (score - partial_score)*num_steps/float(running_num_steps)
if done:
# Move seed into working seed data structures
seed_idx = self._next_buffer_index
if self.seed_scores[seed_idx] <= merged_score or self.unseen_seed_weights[seed_idx] > 0:
self.unseen_seed_weights[seed_idx] = 0. # Unmask this index
self.working_seed_set.discard(self.seeds[seed_idx])
self.working_seed_set.add(seed)
self.seeds[seed_idx] = seed
self.seed2index[seed] = seed_idx
self.seed_scores[seed_idx] = merged_score
self.partial_seed_scores[:,seed_idx] = 0.
self.partial_seed_steps[:,seed_idx] = 0
self.seed_staleness[seed_idx] = self.running_sample_count - self.seed2timestamp_buffer[seed]
self.working_seed_buffer_size = min(self.working_seed_buffer_size + 1, self.seed_buffer_size)
if self.track_solvable:
self.seed_solvable[seed_idx] = self.staging_seed2solvable.get(seed, True)
else:
seed_idx = None
# Zero partial score, partial num_steps, remove seed from staging data structures
for a in self.seed2actor[seed]:
self.partial_seed_scores_buffer[a].pop(seed, None)
self.partial_seed_steps_buffer[a].pop(seed, None)
del self.seed2timestamp_buffer[seed]
del self.seed2actor[seed]
self.staging_seed_set.remove(seed)
if self.track_solvable:
del self.staging_seed2solvable[seed]
else:
self.partial_seed_scores_buffer[actor_index][seed] = merged_score
self.partial_seed_steps_buffer[actor_index][seed] = running_num_steps
return merged_score, seed_idx
def _uniform(self, **kwargs):
return 1.0,1.0
def _average_entropy(self, **kwargs):
episode_logits = kwargs['episode_logits']
num_actions = self.action_space.n
max_entropy = -(1./num_actions)*np.log(1./num_actions)*num_actions
scores = -torch.exp(episode_logits)*episode_logits.sum(-1)/max_entropy
mean_score = scores.mean().item()
max_score = scores.max().item()
return mean_score, max_score
def _average_least_confidence(self, **kwargs):
episode_logits = kwargs['episode_logits']
scores = 1 - torch.exp(episode_logits.max(-1, keepdim=True)[0])
mean_score = scores.mean().item()
max_score = scores.max().item()
return mean_score, max_score
def _average_min_margin(self, **kwargs):
episode_logits = kwargs['episode_logits']
top2_confidence = torch.exp(episode_logits.topk(2, dim=-1)[0])
scores = top2_confidence[:,0] - top2_confidence[:,1]
mean_score = 1 - scores.mean().item()
max_score = 1 - scores.min().item()
return mean_score, max_score
def _average_gae(self, **kwargs):
returns = kwargs['returns']
value_preds = kwargs['value_preds']
advantages = returns - value_preds
mean_score = advantages.mean().item()
max_score = advantages.max().item()
return mean_score, max_score
def _average_value_l1(self, **kwargs):
returns = kwargs['returns']
value_preds = kwargs['value_preds']
abs_advantages = (returns - value_preds).abs()
mean_score = abs_advantages.mean().item()
max_score = abs_advantages.max().item()
return mean_score, max_score
def _average_signed_value_loss(self, **kwargs):
returns = kwargs['returns']
value_preds = kwargs['value_preds']
advantages = returns - value_preds
mean_score = advantages.mean().item()
max_score = advantages.max().item()
return mean_score, max_score
def _average_positive_value_loss(self, **kwargs):
returns = kwargs['returns']
value_preds = kwargs['value_preds']
clipped_advantages = (returns - value_preds).clamp(0)
mean_score = clipped_advantages.mean().item()
max_score = clipped_advantages.max().item()
return mean_score, max_score
def _average_grounded_signed_value_loss(self, **kwargs):
"""
Currently assumes sparse reward s.t. reward is 0 everywhere except final step
"""
seed = kwargs['seed']
seed_idx = self.seed2index.get(seed, None)
actor_idx= kwargs['actor_index']
done = kwargs['done']
value_preds = kwargs['value_preds']
episode_logits = kwargs['episode_logits']
partial_steps = 0
if self.sample_full_distribution and seed in self.partial_seed_steps_buffer[actor_idx]:
partial_steps = self.partial_seed_steps_buffer[actor_idx][seed]
elif seed_idx is not None:
partial_steps = self.partial_seed_steps[actor_idx][seed_idx]
else:
partial_steps = 0
new_steps = len(episode_logits)
total_steps = partial_steps + new_steps
grounded_value = kwargs.get('grounded_value', None)
if done and grounded_value is not None:
if self.use_dense_rewards:
advantages = grounded_value - value_preds[0]
else:
advantages = grounded_value - value_preds
mean_score = (total_steps/new_steps)*advantages.mean().item()
max_score = advantages.max().item()
else:
mean_score, max_score = 0,0
return mean_score, max_score
def _average_grounded_positive_value_loss(self, **kwargs):
"""
Currently assumes sparse reward s.t. reward is 0 everywhere except final step
"""
seed = kwargs['seed']
seed_idx = self.seed2index.get(seed, None)
actor_idx= kwargs['actor_index']
done = kwargs['done']
value_preds = kwargs['value_preds']
episode_logits = kwargs['episode_logits']
partial_steps = 0
if self.sample_full_distribution and seed in self.partial_seed_steps_buffer[actor_idx]:
partial_steps = self.partial_seed_steps_buffer[actor_idx][seed]
elif seed_idx is not None:
partial_steps = self.partial_seed_steps[actor_idx][seed_idx]
else:
partial_steps = 0
new_steps = len(episode_logits)
total_steps = partial_steps + new_steps
grounded_value = kwargs.get('grounded_value', None)
if done and grounded_value is not None:
if self.use_dense_rewards:
advantages = grounded_value - value_preds[0]
else:
advantages = grounded_value - value_preds
advantages = advantages.clamp(0)
mean_score = (total_steps/new_steps)*advantages.mean().item()
max_score = advantages.max().item()
else:
mean_score, max_score = 0,0
return mean_score, max_score
def _one_step_td_error(self, **kwargs):
rewards = kwargs['rewards']
value_preds = kwargs['value_preds']
max_t = len(rewards)
if max_t > 1:
td_errors = (rewards[:-1] + self.gamma*value_preds[1:max_t] - value_preds[:max_t-1]).abs()
else:
td_errors = rewards[0] - value_preds[0]
mean_score = td_errors.mean().item()
max_score = td_errors.max().item()
return mean_score, max_score
def _average_alt_advantage_abs(self, **kwargs):
returns = kwargs['alt_returns']
value_preds = kwargs['value_preds']
abs_advantages = (returns - value_preds).abs()
mean_score = abs_advantages.mean().item()
max_score = abs_advantages.max().item()
return mean_score, max_score
def _tscl_window(self, **kwargs):
rewards = kwargs['rewards']
seed = kwargs['seed']
seed_idx = self.seed2index.get(seed, -1)
assert(seed_idx >= 0)
# add rewards to the seed window
episode_total_reward = rewards.sum().item()
self.tscl_return_window[seed_idx].append(episode_total_reward)
self.tscl_episode_window[seed_idx].append(self.running_sample_count)
# compute linear regression coeficient in the window
x = self.tscl_episode_window[seed]
y = self.tscl_return_window[seed]
A = np.vstack([x, np.ones(len(x))]).T
c,_ = np.linalg.lstsq(A, y, rcond=None)[0]
c = abs(c)
return c, c
@property
def requires_value_buffers(self):
return self.strategy in [
'gae', 'value_l1',
'signed_value_loss', 'positive_value_loss',
'grounded_signed_value_loss', 'grounded_positive_value_loss',
'one_step_td_error', 'alt_advantage_abs',
'tscl_window']
@property
def _has_working_seed_buffer(self):
return not self.sample_full_distribution or (self.sample_full_distribution and self.seed_buffer_size > 0)
def _update_with_rollouts(self, rollouts, score_function):
if not self._has_working_seed_buffer:
return
level_seeds = rollouts.level_seeds
policy_logits = rollouts.action_log_dist
total_steps, num_actors = policy_logits.shape[:2]
done = ~(rollouts.masks > 0)
# early_done = ~(rollouts.bad_masks > 0)
cliffhanger = ~(rollouts.cliffhanger_masks > 0)
for actor_index in range(num_actors):
start_t = 0
done_steps = done[:,actor_index].nonzero()[:,0]
for t in done_steps:
if not start_t < total_steps: break
if t == 0: # if t is 0, then this done step caused a full update of previous seed last cycle
continue
seed_t = level_seeds[start_t,actor_index].item()
score_function_kwargs = {}
score_function_kwargs['actor_index'] = actor_index
score_function_kwargs['done'] = True
episode_logits = policy_logits[start_t:t,actor_index]
score_function_kwargs['episode_logits'] = torch.log_softmax(episode_logits, -1)
score_function_kwargs['seed'] = seed_t
if self.requires_value_buffers:
score_function_kwargs['returns'] = rollouts.returns[start_t:t,actor_index]
if self.strategy == 'alt_advantage_abs':
score_function_kwargs['alt_returns'] = rollouts.alt_returns[start_t:t,actor_index]
score_function_kwargs['rewards'] = rollouts.rewards[start_t:t,actor_index]
if rollouts.use_popart:
score_function_kwargs['value_preds'] = rollouts.denorm_value_preds[start_t:t,actor_index]
else:
score_function_kwargs['value_preds'] = rollouts.value_preds[start_t:t,actor_index]
# Only perform score updates on non-cliffhanger episodes ending in 'done'
if not cliffhanger[t,actor_index]:
# Update grounded values (highest achieved return per seed)
grounded_value = None
if self.grounded_values is not None:
seed_idx = self.seed2index.get(seed_t, None)
score_function_kwargs['seed_idx'] = seed_idx
grounded_value_ = rollouts.rewards[start_t:t].sum(0)[actor_index]
if seed_idx is not None:
grounded_value = max(self.grounded_values[seed_idx], grounded_value_)
else:
grounded_value = grounded_value_ # Should this be discounted?
score_function_kwargs['grounded_value'] = grounded_value
score, max_score = score_function(**score_function_kwargs)
num_steps = len(episode_logits)
_, seed_idx = self.update_seed_score(actor_index, seed_t, score, max_score, num_steps)
# Track grounded value for future reference
if seed_idx is not None and self.grounded_values is not None and grounded_value is not None:
self.grounded_values[seed_idx] = grounded_value
start_t = t.item()
if start_t < total_steps:
seed_t = level_seeds[start_t,actor_index].item()
score_function_kwargs = {}
score_function_kwargs['actor_index'] = actor_index
score_function_kwargs['done'] = False
episode_logits = policy_logits[start_t:,actor_index]
score_function_kwargs['episode_logits'] = torch.log_softmax(episode_logits, -1)
score_function_kwargs['seed'] = seed_t
if self.requires_value_buffers:
score_function_kwargs['returns'] = rollouts.returns[start_t:,actor_index]
if self.strategy == 'alt_advantage_abs':
score_function_kwargs['alt_returns'] = rollouts.alt_returns[start_t:,actor_index]
score_function_kwargs['rewards'] = rollouts.rewards[start_t:,actor_index]
if rollouts.use_popart:
score_function_kwargs['value_preds'] = rollouts.denorm_value_preds[start_t:t,actor_index]
else:
score_function_kwargs['value_preds'] = rollouts.value_preds[start_t:,actor_index]
score, max_score = score_function(**score_function_kwargs)
num_steps = len(episode_logits)
if self.sample_full_distribution and seed_t in self.staging_seed_set:
self._partial_update_seed_score_buffer(actor_index, seed_t, score, num_steps)
else:
self._partial_update_seed_score(actor_index, seed_t, score, max_score, num_steps)
def after_update(self):
if not self._has_working_seed_buffer:
return
# Reset partial updates, since weights have changed, and thus logits are now stale
for actor_index in range(self.partial_seed_scores.shape[0]):
for seed_idx in range(self.partial_seed_scores.shape[1]):
if self.partial_seed_scores[actor_index][seed_idx] != 0:
self.update_seed_score(actor_index, self.seeds[seed_idx], 0, float('-inf'), 0)
self.partial_seed_scores.fill(0)
self.partial_seed_steps.fill(0)
# Likewise, reset partial update buffers
if self.sample_full_distribution:
for actor_index in range(self.num_actors):
actor_staging_seeds = list(self.partial_seed_scores_buffer[actor_index].keys())
for seed in actor_staging_seeds:
if self.partial_seed_scores_buffer[actor_index][seed] > 0:
self.update_seed_score(actor_index, seed, 0, float('-inf'), 0)
def _update_staleness(self, selected_idx):
if self.staleness_coef > 0:
self.seed_staleness = self.seed_staleness + 1
self.seed_staleness[selected_idx] = 0
def sample_replay_decision(self):
if self.sample_full_distribution:
proportion_filled = self._proportion_filled
if self.seed_buffer_size > 0:
if self.replay_schedule == 'fixed':
if proportion_filled >= self.rho and np.random.rand() < self.replay_prob:
return True
else:
return False
else:
if proportion_filled >= self.rho and np.random.rand() < min(proportion_filled, self.replay_prob):
return True
else:
return False
else:
# If seed buffer has length 0, then just sample new random seed each time
return False
elif self.replay_schedule == 'fixed':
proportion_seen = self._proportion_filled
if proportion_seen >= self.rho:
# Sample replay level with fixed replay_prob OR if all levels seen
if np.random.rand() < self.replay_prob or not proportion_seen < 1.0:
return True
# Otherwise, sample a new level
return False
else: # Default to proportionate schedule
proportion_seen = self._proportion_filled
if proportion_seen >= self.rho and np.random.rand() < proportion_seen:
return True
else:
return False
@property
def is_warm(self):
return self._proportion_filled >= self.rho
def observe_external_unseen_sample(self, seeds, solvable=None):
for i, seed in enumerate(seeds):
self.running_sample_count += 1
if not (seed in self.staging_seed_set or seed in self.working_seed_set):
self.seed2timestamp_buffer[seed] = self.running_sample_count
self.staging_seed_set.add(seed)
if solvable is not None:
if not self.track_solvable: # lazy init of solvable tracking
self._init_solvable_tracking()
self.staging_seed2solvable[seed] = solvable[i]
else:
seed_idx = self.seed2index.get(seed, None)
if seed_idx is not None:
self._update_staleness(seed_idx)
def sample_replay_level(self, update_staleness=True):
return self._sample_replay_level(update_staleness=update_staleness)
def _sample_replay_level(self, update_staleness=True):
sample_weights = self.sample_weights()
if np.isclose(np.sum(sample_weights), 0):
sample_weights = np.ones_like(self.seeds, dtype=np.float)/len(self.seeds)
sample_weights = sample_weights*(1-self.unseen_seed_weights)
sample_weights /= np.sum(sample_weights)
elif np.sum(sample_weights, 0) != 1.0:
sample_weights = sample_weights/np.sum(sample_weights,0)
seed_idx = np.random.choice(range(len(self.seeds)), 1, p=sample_weights)[0]
seed = self.seeds[seed_idx]
if update_staleness:
self._update_staleness(seed_idx)
return int(seed)
def _sample_unseen_level(self):
if self.sample_full_distribution:
seed = int(np.random.randint(1,INT32_MAX))
# Ensure unique new seed outside of working and staging set
while seed in self.staging_seed_set or seed in self.working_seed_set:
seed = int(np.random.randint(1,INT32_MAX))
self.seed2timestamp_buffer[seed] = self.running_sample_count
self.staging_seed_set.add(seed)
else:
sample_weights = self.unseen_seed_weights/self.unseen_seed_weights.sum()
seed_idx = np.random.choice(range(len(self.seeds)), 1, p=sample_weights)[0]
seed = self.seeds[seed_idx]
self._update_staleness(seed_idx)
return int(seed)
def sample(self, strategy=None):
if strategy == 'full_distribution':
raise ValueError('One-off sampling via full_distribution strategy is not supported.')
self.running_sample_count += 1
if not strategy:
strategy = self.strategy
if not self.sample_full_distribution:
if strategy == 'random':
seed_idx = np.random.choice(range((len(self.seeds))))
seed = self.seeds[seed_idx]
return int(seed)
if strategy == 'sequential':
seed_idx = self.next_seed_index
self.next_seed_index = (self.next_seed_index + 1) % len(self.seeds)
seed = self.seeds[seed_idx]
return int(seed)
replay_decision = self.sample_replay_decision()
if replay_decision:
return self._sample_replay_level()
else:
return self._sample_unseen_level()
def sample_weights(self):
weights = self._score_transform(self.score_transform, self.temperature, self.seed_scores)
weights = weights * (1-self.unseen_seed_weights) # zero out unseen levels
z = np.sum(weights)
if z > 0:
weights /= z
else:
weights = np.ones_like(weights, dtype=np.float)/len(weights)
weights = weights * (1-self.unseen_seed_weights)
weights /= np.sum(weights)
staleness_weights = 0
if self.staleness_coef > 0:
staleness_weights = self._score_transform(self.staleness_transform, self.staleness_temperature, self.seed_staleness)
staleness_weights = staleness_weights * (1-self.unseen_seed_weights)
z = np.sum(staleness_weights)
if z > 0:
staleness_weights /= z
else:
staleness_weights = 1./len(staleness_weights)*(1-self.unseen_seed_weights)
weights = (1 - self.staleness_coef)*weights + self.staleness_coef*staleness_weights
return weights
def _score_transform(self, transform, temperature, scores):
if transform == 'constant':
weights = np.ones_like(scores)
if transform == 'max':
weights = np.zeros_like(scores)
scores = scores[:]
scores[self.unseen_seed_weights > 0] = -float('inf') # only argmax over seen levels
argmax = np.random.choice(np.flatnonzero(np.isclose(scores, scores.max())))
weights[argmax] = 1.
elif transform == 'eps_greedy':
weights = np.zeros_like(scores)
weights[scores.argmax()] = 1. - self.eps
weights += self.eps/len(self.seeds)
elif transform == 'rank':
temp = np.flip(scores.argsort())
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(temp)) + 1
weights = 1/ranks ** (1./temperature)
elif transform == 'power':
eps = 0 if self.staleness_coef > 0 else 1e-3
weights = (np.array(scores).clip(0) + eps) ** (1./temperature)
elif transform == 'softmax':
weights = np.exp(np.array(scores)/temperature)
elif transform == 'match':
weights = np.array([(1-score)*score for score in scores])
weights = weights ** (1./temperature)
elif transform == 'match_rank':
weights = np.array([(1-score)*score for score in scores])
temp = np.flip(weights.argsort())
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(temp)) + 1
weights = 1/ranks ** (1./temperature)
return weights
@property
def solvable_mass(self):
if self.track_solvable:
sample_weights = self.sample_weights()
return np.sum(sample_weights[self.seed_solvable])
else:
return 1.
@property
def max_score(self):
return max(self.seed_scores)
|
dcd-main
|
level_replay/level_sampler.py
|
# Copyright (c) 2020 Tianshou contributors
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is a modified version of
# https://github.com/marlbenchmark/on-policy/blob/0fc8a9355bb7ce2589716eeb543f498edcc91dc6/onpolicy/algorithms/utils/popart.py
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import DeviceAwareModule
class PopArt(DeviceAwareModule):
def __init__(self, input_shape, output_shape, norm_axes=1, beta=0.99999, epsilon=1e-5):
super(PopArt, self).__init__()
self.beta = beta
self.epsilon = epsilon
self.norm_axes = norm_axes
self.input_shape = input_shape
self.output_shape = output_shape
self.weight = nn.Parameter(torch.Tensor(output_shape, input_shape))
self.bias = nn.Parameter(torch.Tensor(output_shape))
self.stddev = nn.Parameter(torch.ones(output_shape), requires_grad=False)
self.mean = nn.Parameter(torch.zeros(output_shape), requires_grad=False)
self.mean_sq = nn.Parameter(torch.zeros(output_shape), requires_grad=False)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
self.mean.zero_()
self.mean_sq.zero_()
self.debiasing_term.zero_()
def forward(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(self.device)
return F.linear(input_vector, self.weight, self.bias)
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(self.device)
old_mean, old_stddev = self.mean, self.stddev
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))
self.stddev.data = (self.mean_sq - self.mean ** 2).sqrt().clamp(min=1e-4)
self.weight.data = self.weight * old_stddev / self.stddev
self.bias.data = (old_stddev * self.bias + old_mean - self.mean) / self.stddev
def debiased_mean_var(self):
debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
def normalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(self.device)
mean, var = self.debiased_mean_var()
out = (input_vector - mean) / torch.sqrt(var)
return out
def denormalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(self.device)
mean, var = self.debiased_mean_var()
out = input_vector * torch.sqrt(var) + mean
return out
|
dcd-main
|
models/popart.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .distributions import Categorical
from .common import *
class MultigridNetwork(DeviceAwareModule):
"""
Actor-Critic module
"""
def __init__(self,
observation_space,
action_space,
actor_fc_layers=(32, 32),
value_fc_layers=(32, 32),
conv_filters=16,
conv_kernel_size=3,
scalar_fc=5,
scalar_dim=4,
random_z_dim=0,
xy_dim=0,
recurrent_arch='lstm',
recurrent_hidden_size=256,
random=False):
super(MultigridNetwork, self).__init__()
self.random = random
self.action_space = action_space
num_actions = action_space.n
# Image embeddings
obs_shape = observation_space['image'].shape
m = obs_shape[-2] # x input dim
n = obs_shape[-1] # y input dim
c = obs_shape[-3] # channel input dim
self.image_conv = nn.Sequential(
Conv2d_tf(3, conv_filters, kernel_size=conv_kernel_size, stride=1, padding='valid'),
nn.Flatten(),
nn.ReLU()
)
self.image_embedding_size = (n-conv_kernel_size+1)*(m-conv_kernel_size+1)*conv_filters
self.preprocessed_input_size = self.image_embedding_size
# x, y positional embeddings
self.xy_embed = None
self.xy_dim = xy_dim
if xy_dim:
self.preprocessed_input_size += 2*xy_dim
# Scalar embedding
self.scalar_embed = None
self.scalar_dim = scalar_dim
if scalar_dim:
self.scalar_embed = nn.Linear(scalar_dim, scalar_fc)
self.preprocessed_input_size += scalar_fc
self.preprocessed_input_size += random_z_dim
self.base_output_size = self.preprocessed_input_size
# RNN
self.rnn = None
if recurrent_arch:
self.rnn = RNN(
input_size=self.preprocessed_input_size,
hidden_size=recurrent_hidden_size,
arch=recurrent_arch)
self.base_output_size = recurrent_hidden_size
# Policy head
self.actor = nn.Sequential(
make_fc_layers_with_hidden_sizes(actor_fc_layers, input_size=self.base_output_size),
Categorical(actor_fc_layers[-1], num_actions)
)
# Value head
self.critic = nn.Sequential(
make_fc_layers_with_hidden_sizes(value_fc_layers, input_size=self.base_output_size),
init_(nn.Linear(value_fc_layers[-1], 1))
)
apply_init_(self.modules())
self.train()
@property
def is_recurrent(self):
return self.rnn is not None
@property
def recurrent_hidden_state_size(self):
# """Size of rnn_hx."""
if self.rnn is not None:
return self.rnn.recurrent_hidden_state_size
else:
return 0
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def _forward_base(self, inputs, rnn_hxs, masks):
# Unpack input key values
image = inputs.get('image')
scalar = inputs.get('direction')
if scalar is None:
scalar = inputs.get('time_step')
x = inputs.get('x')
y = inputs.get('y')
in_z = inputs.get('random_z', torch.tensor([], device=self.device))
in_image = self.image_conv(image)
if self.xy_embed:
x = one_hot(self.xy_dim, x, device=self.device)
y = one_hot(self.xy_dim, y, device=self.device)
in_x = self.xy_embed(x)
in_y = self.xy_embed(y)
else:
in_x = torch.tensor([], device=self.device)
in_y = torch.tensor([], device=self.device)
if self.scalar_embed:
in_scalar = one_hot(self.scalar_dim, scalar).to(self.device)
in_scalar = self.scalar_embed(in_scalar)
else:
in_scalar = torch.tensor([], device=self.device)
in_embedded = torch.cat((in_image, in_x, in_y, in_scalar, in_z), dim=-1)
if self.rnn is not None:
core_features, rnn_hxs = self.rnn(in_embedded, rnn_hxs, masks)
else:
core_features = in_embedded
return core_features, rnn_hxs
def act(self, inputs, rnn_hxs, masks, deterministic=False):
if self.random:
B = inputs['image'].shape[0]
action = torch.zeros((B,1), dtype=torch.int64, device=self.device)
values = torch.zeros((B,1), device=self.device)
action_log_dist = torch.ones(B, self.action_space.n, device=self.device)
for b in range(B):
action[b] = self.action_space.sample()
return values, action, action_log_dist, rnn_hxs
core_features, rnn_hxs = self._forward_base(inputs, rnn_hxs, masks)
dist = self.actor(core_features)
value = self.critic(core_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_dist = dist.logits
return value, action, action_log_dist, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
core_features, rnn_hxs = self._forward_base(inputs, rnn_hxs, masks)
return self.critic(core_features)
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
core_features, rnn_hxs = self._forward_base(inputs, rnn_hxs, masks)
dist = self.actor(core_features)
value = self.critic(core_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
|
dcd-main
|
models/multigrid_models.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .multigrid_models import MultigridNetwork
from .multigrid_global_critic_models import MultigridGlobalCriticNetwork
from .car_racing_models import CarRacingNetwork, CarRacingBezierAdversaryEnvNetwork
from .walker_models import BipedalWalkerStudentPolicy, BipedalWalkerAdversaryPolicy
|
dcd-main
|
models/__init__.py
|
# Copyright (c) 2017 Roberta Raileanu
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is a modified version of:
# https://github.com/rraileanu/auto-drac/blob/master/ucb_rl2_meta/model.py
import math
import torch
import torch.nn as nn
from .common import init
class FixedCategorical(torch.distributions.Categorical):
"""
Categorical distribution object
"""
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
class Categorical(nn.Module):
"""
Categorical distribution (NN module)
"""
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
init_ = lambda m: init(
m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
gain=0.01)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
|
dcd-main
|
models/distributions.py
|
# Copyright (c) 2017 Ilya Kostrikov
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is a modified version of
# https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/model.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
init_relu_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
init_tanh_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
def apply_init_(modules, gain=None):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
if gain:
nn.init.xavier_uniform_(m.weight, gain=gain)
else:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Flatten(nn.Module):
"""
Flatten a tensor
"""
def forward(self, x):
return x.reshape(x.size(0), -1)
class DeviceAwareModule(nn.Module):
@property
def device(self):
return next(self.parameters()).device
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get("padding", "same")
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(
0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size
)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == "valid":
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=0,
dilation=self.dilation,
groups=self.groups,
)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=self.dilation,
groups=self.groups,
)
class RNN(nn.Module):
"""
Actor-Critic network (base class)
"""
def __init__(self, input_size, hidden_size=128, arch='lstm'):
super().__init__()
self.arch = arch
self.is_lstm = arch == 'lstm'
self._hidden_size = hidden_size
if arch == 'gru':
self.rnn = nn.GRU(input_size, hidden_size)
elif arch == 'lstm':
self.rnn = nn.LSTM(input_size, hidden_size)
else:
raise ValueError(f'Unsupported RNN architecture {arch}.')
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
@property
def output_size(self):
return self._hidden_size
def forward(self, x, hxs, masks):
if self.is_lstm:
# Since nn.LSTM defaults to all zero states if passed None state
hidden_batch_size = x.size(0) if hxs is None else hxs[0].size(0)
else:
hidden_batch_size = hxs.size(0)
if x.size(0) == hidden_batch_size:
masked_hxs = tuple((h*masks).unsqueeze(0) for h in hxs) if self.is_lstm \
else (hxs*masks).unsqueeze(0)
x, hxs = self.rnn(x.unsqueeze(0), masked_hxs)
x = x.squeeze(0)
hxs = tuple(h.squeeze(0) for h in hxs) if self.is_lstm else hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs[0].size(0) if self.is_lstm else hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = (h.unsqueeze(0) for h in hxs) if self.is_lstm else hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
masked_hxs = tuple(h*masks[start_idx].view(1, -1, 1) for h in hxs) if self.is_lstm \
else hxs*masks[start_idx].view(1, -1, 1)
rnn_scores, hxs = self.rnn(
x[start_idx:end_idx],
masked_hxs)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = tuple(h.squeeze(0) for h in hxs) if self.is_lstm else hxs.squeeze(0)
return x, hxs
def one_hot(dim, inputs, device='cpu'):
one_hot = torch.nn.functional.one_hot(inputs.long(), dim).squeeze(1).float()
return one_hot
def make_fc_layers_with_hidden_sizes(sizes, input_size):
fc_layers = []
for i, layer_size in enumerate(sizes[:-1]):
input_size = input_size if i == 0 else sizes[0]
output_size = sizes[i+1]
fc_layers.append(init_tanh_(nn.Linear(input_size, output_size)))
fc_layers.append(nn.Tanh())
return nn.Sequential(
*fc_layers
)
|
dcd-main
|
models/common.py
|
# Copyright (c) 2017 Ilya Kostrikov
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is a modified version code found in
# https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Beta
from gym.spaces import MultiDiscrete
from collections import namedtuple
from .common import *
from torch.distributions.normal import Normal
from .popart import PopArt
# Necessary for my KFAC implementation.
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return self.mean
class DiagGaussian(DeviceAwareModule):
def __init__(self, num_inputs, num_outputs):
super(DiagGaussian, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
init_ = lambda m: init(
m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
gain=0.01)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
## Policy from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/model.py
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class BipedalWalkerStudentPolicy(DeviceAwareModule):
def __init__(self, obs_shape, action_space, recurrent=False,base_kwargs=None):
super(BipedalWalkerStudentPolicy, self).__init__()
if base_kwargs is None:
base_kwargs = {}
self.base = MLPBase(obs_shape[0], recurrent=recurrent, **base_kwargs)
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(self.base.output_size, num_outputs)
if recurrent:
RNN = namedtuple("RNN", 'arch')
self.rnn = RNN('gru')
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs):
value, action, action_log_probs, rnn_hxs = self.act(inputs, rnn_hxs=None, masks=None, deterministic=False)
return action
def act(self, inputs, rnn_hxs, masks, deterministic=False):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class BipedalWalkerAdversaryPolicy(DeviceAwareModule):
def __init__(self, observation_space, action_space, editor=False, random=False, base_kwargs=None):
super(BipedalWalkerAdversaryPolicy, self).__init__()
if base_kwargs is None:
base_kwargs = {}
self.random = random
self.design_dim = observation_space['image'].shape[0]
self.random_z_dim = observation_space['random_z'].shape[0]
obs_dim = self.design_dim + self.random_z_dim + 1
self.base = MLPBase(obs_dim, **base_kwargs)
self.editor = editor
self.action_dim = action_space.shape[0]
if self.editor:
self.dist = [Categorical(self.base.output_size, x) for x in action_space.nvec]
self.action_space = action_space
else:
self.dist = DiagGaussian(self.base.output_size, self.action_dim)
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def preprocess(self, inputs):
obs = torch.cat([inputs['image'], inputs['random_z'], inputs['time_step']], axis=1)
return obs
def act(self, inputs, rnn_hxs, masks, deterministic=False):
inputs = self.preprocess(inputs)
if self.random:
if self.editor:
B = inputs.shape[0]
action = torch.zeros((B, 2), dtype=torch.int64, device=self.device)
action_log_dist = torch.ones(B, self.action_space.nvec[0] + self.action_space.nvec[1], device=self.device)
for b in range(B):
action[b] = torch.tensor(self.action_space.sample()).to(self.device)
else:
action = torch.tensor(np.random.uniform(-1,1, inputs.shape[0]), device=self.device).reshape(-1,1)
action_log_dist = torch.ones(inputs.shape[0], self.action_dim, device=self.device)
values = torch.zeros(inputs.shape[0], 1, device=self.device)
return values, action, action_log_dist, rnn_hxs
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
if self.editor:
dist = [fwd(actor_features) for fwd in self.dist]
else:
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action = F.tanh(action)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
inputs = self.preprocess(inputs)
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
inputs = self.preprocess(inputs)
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.rnn = nn.GRU(recurrent_input_size, hidden_size)
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.rnn(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, hxs = self.rnn(
x[start_idx:end_idx],
hxs * masks[start_idx].view(1, -1, 1))
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = hxs.squeeze(0)
return x, hxs
class MLPBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
|
dcd-main
|
models/walker_models.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Beta
from .common import *
from .distributions import FixedCategorical, Categorical
from .popart import PopArt
class CarRacingNetwork(DeviceAwareModule):
def __init__(self,
obs_shape,
action_space,
hidden_size=100,
crop=False,
use_popart=False):
super(CarRacingNetwork, self).__init__()
m = obs_shape[-2] # x input dim
n = obs_shape[-1] # y input dim
c = obs_shape[-3] # channel input dim
self.action_low = np.array(action_space.low, dtype=np.float32)
self.action_high = np.array(action_space.high, dtype=np.float32)
self.action_dim = action_space.shape[0]
self.crop = crop
if self.crop:
self.image_embed = nn.Sequential( # input shape (4, 84, 84)
nn.Conv2d(c, 8, kernel_size=2, stride=2),
nn.ReLU(), # activation
nn.Conv2d(8, 16, kernel_size=2, stride=2), # (8, 42, 42)
nn.ReLU(), # activation
nn.Conv2d(16, 32, kernel_size=2, stride=2), # (16, 21, 21)
nn.ReLU(), # activation
nn.Conv2d(32, 64, kernel_size=2, stride=2), # (32, 10, 10)
nn.ReLU(), # activation
nn.Conv2d(64, 128, kernel_size=3, stride=1), # (64, 5, 5)
nn.ReLU(), # activation
nn.Conv2d(128, 256, kernel_size=3, stride=1), # (128, 3, 3)
nn.ReLU(), # activation
) # output shape (256, 1, 1)
else:
self.image_embed = nn.Sequential( # input shape (4, 96, 96)
nn.Conv2d(c, 8, kernel_size=4, stride=2),
nn.ReLU(), # activation
nn.Conv2d(8, 16, kernel_size=3, stride=2), # (8, 47, 47)
nn.ReLU(), # activation
nn.Conv2d(16, 32, kernel_size=3, stride=2), # (16, 23, 23)
nn.ReLU(), # activation
nn.Conv2d(32, 64, kernel_size=3, stride=2), # (32, 11, 11)
nn.ReLU(), # activation
nn.Conv2d(64, 128, kernel_size=3, stride=1), # (64, 5, 5)
nn.ReLU(), # activation
nn.Conv2d(128, 256, kernel_size=3, stride=1), # (128, 3, 3)
nn.ReLU(), # activation
) # output shape (256, 1, 1)
self.image_embedding_size = 256
# # Policy head
self.actor_fc = nn.Sequential(
init_relu_(nn.Linear(self.image_embedding_size, hidden_size)),
nn.ReLU(),
)
self.actor_alpha = nn.Sequential(
init_relu_(nn.Linear(hidden_size, self.action_dim)),
nn.Softplus(),
)
self.actor_beta = nn.Sequential(
init_relu_(nn.Linear(hidden_size, self.action_dim)),
nn.Softplus(),
)
# Value head
if use_popart:
value_out = init_(PopArt(hidden_size, 1))
self.popart = value_out
else:
value_out = init_(nn.Linear(hidden_size, 1))
self.popart = None
self.critic = nn.Sequential(
init_relu_(nn.Linear(self.image_embedding_size, hidden_size)),
nn.ReLU(),
value_out
)
# apply_init_(self.modules(), gain=nn.init.calculate_gain('relu'))
self.apply(self._weights_init)
self.train()
@staticmethod
def _weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
nn.init.constant_(m.bias, 0.1)
@property
def is_recurrent(self):
return False
@property
def recurrent_hidden_state_size(self):
# """Size of rnn_hx."""
return 1
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def process_action(self, action):
return action*(self.action_high - self.action_low) + self.action_low
def act(self, inputs, rnn_hxs, masks, deterministic=False):
image_embedding = self.image_embed(inputs).squeeze()
actor_fc_embed = self.actor_fc(image_embedding)
alpha = 1 + self.actor_alpha(actor_fc_embed)
beta = 1 + self.actor_beta(actor_fc_embed)
dist = Beta(alpha, beta)
# action = alpha/(alpha + beta)
action = dist.sample()
# For continuous action spaces, we just return dirac delta over
# sampled action tuple
action_log_dist = dist.log_prob(action).sum(dim=-1).unsqueeze(-1)
value = self.critic(image_embedding)
if inputs.shape[0] == 1:
action = action.unsqueeze(0)
return value, action, action_log_dist, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
image_embedding = self.image_embed(inputs).squeeze()
return self.critic(image_embedding)
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
image_embedding = self.image_embed(inputs).squeeze()
actor_fc_embed = self.actor_fc(image_embedding)
alpha = self.actor_alpha(actor_fc_embed) + 1
beta = self.actor_beta(actor_fc_embed) + 1
a_range = (torch.min(alpha).item(), torch.max(alpha).item())
b_range = (torch.min(beta).item(), torch.max(beta).item())
dist = Beta(alpha, beta)
action_log_probs = dist.log_prob(action).sum(dim=-1).unsqueeze(-1)
dist_entropy = dist.entropy().mean()
value = self.critic(image_embedding)
return value, action_log_probs, dist_entropy, rnn_hxs
class CarRacingBezierAdversaryEnvNetwork(DeviceAwareModule):
def __init__(self,
observation_space,
action_space,
scalar_fc=8,
use_categorical=False,
use_skip=False,
choose_start_pos=False,
use_popart=False,
set_start_pos=False,
use_goal=False,
num_goal_bins=1):
super().__init__()
self.sketch_dim = observation_space['control_points'].shape
self.random_z_dim = observation_space['random_z'].shape[0]
self.total_time_steps = observation_space['time_step'].high[0]
self.time_step_dim = self.total_time_steps + 1 # Handles terminal time step
if use_goal:
self.goal_bin_dim = observation_space['goal_bin'].high[0]
self.scalar_fc = scalar_fc
self.set_start_pos = set_start_pos
self.use_categorical = use_categorical
self.use_skip = use_skip
self.use_goal = use_goal
self.num_goal_bins = num_goal_bins
self.random = False
self.action_space = action_space
self.n_control_points = self.time_step_dim
if self.use_goal:
self.n_control_points -= 1
if self.set_start_pos:
self.n_control_points -= 1
if use_categorical:
self.action_dim = np.prod(self.sketch_dim) + 1 # +1 for skip action
else:
self.action_dim = action_space.shape[0]
if self.use_goal:
self.action_dim -= 1 # Sinc we don't learn Beta for goal prefix
self.sketch_embedding = nn.Sequential(
Conv2d_tf(1, 8, kernel_size=2, stride=1, padding='valid'), # (8, 9, 9)
Conv2d_tf(8, 16, kernel_size=2, stride=1, padding='valid'), # (16, 8, 8)
nn.Flatten(),
nn.ReLU() # output is 1024 dimensions
)
self.sketch_embed_dim = 1024
# Time step embedding
self.ts_embedding = nn.Linear(self.time_step_dim, scalar_fc)
self.base_output_size = self.sketch_embed_dim + \
self.scalar_fc + self.random_z_dim
if use_goal:
self.goal_bin_embedding = nn.Linear(num_goal_bins + 1, self.scalar_fc)
self.base_output_size += self.scalar_fc
# Value head
self.critic = init_(nn.Linear(self.base_output_size, 1))
# Value head
if use_popart:
self.critic = init_(PopArt(self.base_output_size, 1))
self.popart = self.critic
else:
self.critic = init_(nn.Linear(self.base_output_size, 1))
self.popart = None
# Policy heads
if self.use_categorical:
self.actor = nn.Sequential(
init_(nn.Linear(self.base_output_size, 256)),
nn.ReLU(),
init_(nn.Linear(256, self.action_dim)),
)
else:
self.fc_alpha = nn.Sequential(
init_relu_(nn.Linear(self.base_output_size, self.action_dim)),
nn.Softplus()
)
self.fc_beta = nn.Sequential(
init_relu_(nn.Linear(self.base_output_size, self.action_dim)),
nn.Softplus()
)
# Create a policy head to select a goal bin
if use_goal:
self.goal_head = nn.Sequential(
init_(nn.Linear(self.base_output_size, 256)),
nn.ReLU(),
init_(nn.Linear(256, num_goal_bins)),
)
apply_init_(self.modules())
self.train()
@property
def is_recurrent(self):
return False
@property
def recurrent_hidden_state_size(self):
# """Size of rnn_hx."""
return 1
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def _forward_base(self, inputs, rnn_hxs, masks):
sketch = inputs['control_points']
time_step = inputs['time_step']
in_z = inputs['random_z']
in_sketch = self.sketch_embedding(sketch)
in_time_step = one_hot(self.time_step_dim, time_step).to(self.device)
in_time_step = self.ts_embedding(in_time_step)
if self.use_goal:
goal_bin = inputs['goal_bin']
in_goal_bin = one_hot(self.goal_bin_dim, goal_bin).to(self.device)
in_goal_bin = self.goal_bin_embedding(in_goal_bin)
in_embedded = torch.cat((in_sketch, in_time_step, in_z, in_goal_bin), dim=-1)
else:
in_embedded = torch.cat((in_sketch, in_time_step, in_z), dim=-1)
return in_embedded
def process_action(self, action):
if self.use_goal:
if action[0][0]: # Check if it's a goal step
action_ = action[:,1]
return action_
else:
action = action[:,1:]
if self.use_categorical:
x = ((action - 1.) % self.sketch_dim[-1])/self.sketch_dim[-1]
y = ((action - 1.) // self.sketch_dim[-2])/self.sketch_dim[-2]
skip_action = (action == 0).float()
action_ = torch.cat((x,y,skip_action), dim=-1)
else:
xy_action = action[:,:-1]
skip_logits = torch.log(action[:,-1] + 1e-5) # ensure > 0
skip_action = F.gumbel_softmax(skip_logits, tau=1, hard=True).unsqueeze(-1)
action_ = torch.cat((xy_action, skip_action), dim=-1)
return action_
def _sketch_to_mask(self, sketch):
mask = torch.cat((torch.zeros(sketch.shape[0],1, dtype=torch.bool, device=self.device),
(sketch.flatten(1).bool())),
dim=-1)
return mask
def _is_goal_step(self, t):
if self.use_goal:
return t == self.total_time_steps - 1 # since time_step is +1 total time steps
else:
return False
def _is_start_pos_step(self, t):
if self.set_start_pos:
return t == self.n_control_points
else:
return False
def act_random(self, inputs, rnn_hxs):
time_step = inputs['time_step'][0]
is_goal_step = self._is_goal_step(time_step)
B = inputs['time_step'].shape[0]
values = torch.zeros((B,1), device=self.device)
action_log_dist = torch.ones((B,1), device=self.device)
action_shape = self.action_space.shape
if self.use_goal:
action_shape = (action_shape[0] - 1,)
# import pdb; pdb.set_trace()
if is_goal_step:
# random goal bin
action = torch.zeros((B,1), dtype=torch.int64, device=self.device)
for b in range(B):
action[b] = np.random.randint(self.num_goal_bins)
else:
action = torch.zeros(B, *action_shape,
dtype=torch.int64, device=self.device)
if self.use_categorical:
action_high = self.action_space.high[1] if self.use_goal \
else self.action_space.high[0]
for b in range(B):
action[b] = np.random.randint(1, action_high) # avoid skip action 0
else:
for b in range(B):
action[b] = np.random.rand(self.action_shape)
if self.use_goal:
if is_goal_step:
prefix = torch.ones_like(action[:,0].unsqueeze(-1))
else:
prefix = torch.zeros_like(action[:,0].unsqueeze(-1))
action = torch.cat((prefix, action), dim=-1)
return values, action, action_log_dist, rnn_hxs
def act(self, inputs, rnn_hxs, masks, deterministic=False):
if self.random:
return self.act_random(inputs, rnn_hxs)
in_embedded = self._forward_base(inputs, rnn_hxs, masks)
value = self.critic(in_embedded)
time_step = inputs['time_step'][0]
is_goal_step = self._is_goal_step(time_step)
if is_goal_step:
# generate goal bin action
logits = self.goal_head(in_embedded)
dist = FixedCategorical(logits=logits)
action = dist.sample()
action_log_probs = dist.log_probs(action)
else:
if self.use_categorical:
logits = self.actor(in_embedded)
mask = self._sketch_to_mask(inputs['control_points'])
# Conditionally mask out skip action
if self.use_skip:
if self._is_start_pos_step(time_step):
mask[:,0] = True # Can't skip setting start pos if necessary
else:
mask[mask.sum(-1) < 3,0] = True
else:
mask[:,0] = True
logits[mask] = torch.finfo(logits.dtype).min
dist = FixedCategorical(logits=logits)
action = dist.sample()
action_log_probs = dist.log_probs(action)
else:
# All B x 3
alpha = 1 + self.fc_alpha(in_embedded)
beta = 1 + self.fc_beta(in_embedded)
dist = Beta(alpha, beta)
action = dist.sample()
action_log_probs = dist.log_prob(action).sum(dim=1).unsqueeze(1)
# Hack: Just set action log dist to action log probs, since it's not used.
action_log_dist = action_log_probs
# Append [0] or [1] prefix to actions to signal goal step
if self.use_goal:
if is_goal_step:
prefix = torch.ones_like(action[:,0].unsqueeze(-1))
else:
prefix = torch.zeros_like(action[:,0].unsqueeze(-1))
action = torch.cat((prefix, action), dim=-1)
return value, action, action_log_dist, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
in_embedded = self._forward_base(inputs, rnn_hxs, masks)
return self.critic(in_embedded)
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
B = len(inputs['time_step'])
in_embedded = self._forward_base(inputs, rnn_hxs, masks)
value = self.critic(in_embedded)
time_steps = inputs['time_step']
mask = self._sketch_to_mask(inputs['control_points'])
if self.use_goal:
action = action[:,1:]
# Need to mask out both selectively
goal_steps = self._is_goal_step(time_steps)
if self.use_goal:
goal_steps = goal_steps.flatten()
has_goal_steps = goal_steps.any()
has_nongoal_steps = (~goal_steps).any()
else:
has_goal_steps = False
has_nongoal_steps = True
if has_goal_steps:
# Get logits for goal actions
goal_in_embed = in_embedded[goal_steps]
action_in_embed = in_embedded[~goal_steps]
mask = mask[~goal_steps]
goal_actions = action[goal_steps][0]
goal_logits = self.goal_head(goal_in_embed)
action = action[~goal_steps]
goal_dist = FixedCategorical(logits=goal_logits)
goal_action_log_probs = goal_dist.log_probs(goal_actions)
else:
action_in_embed = in_embedded
if has_nongoal_steps:
if self.use_categorical:
logits = self.actor(action_in_embed)
if self.use_skip:
start_pos_steps = self._is_start_pos_step(time_steps[~goal_steps])
mask[mask.sum(-1) < 3,0] = True
logits[start_pos_steps] = torch.finfo(logits.dtype).min
else:
mask[:,0] = True
logits[mask] = torch.finfo(logits.dtype).min
dist = FixedCategorical(logits=logits)
action_log_probs = dist.log_probs(action)
else:
# All B x 3
alpha = 1 + self.fc_alpha(action_in_embed)
beta = 1 + self.fc_beta(action_in_embed)
dist = Beta(alpha, beta)
action_log_probs = dist.log_prob(action).sum(dim=1).unsqueeze(1)
if self.use_goal:
combined_log_probs = torch.zeros((B,1), dtype=torch.float, device=self.device)
mean_entropy = 0
if goal_steps.any():
combined_log_probs[goal_steps] = goal_action_log_probs
mean_entropy += goal_dist.entropy().sum()
if (~goal_steps).any():
combined_log_probs[~goal_steps] = action_log_probs
mean_entropy += dist.entropy().sum()
action_log_probs = combined_log_probs
dist_entropy = mean_entropy/B
else:
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
|
dcd-main
|
models/car_racing_models.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .distributions import Categorical
from .common import *
class MultigridGlobalCriticNetwork(DeviceAwareModule):
"""
Actor-Critic module
"""
def __init__(self,
observation_space,
action_space,
actor_fc_layers=(32, 32),
value_fc_layers=(32, 32),
conv_filters=16,
conv_kernel_size=3,
scalar_fc=5,
scalar_dim=4,
random_z_dim=0,
xy_dim=0,
recurrent_arch='lstm',
recurrent_hidden_size=256,
use_global_policy=False):
super(MultigridGlobalCriticNetwork, self).__init__()
num_actions = action_space.n
self.use_global_policy = use_global_policy
# Image embedding
obs_shape = observation_space['image'].shape
m = obs_shape[-2] # x input dim
n = obs_shape[-1] # y input dim
c = obs_shape[-3] # channel input dim
# Full obs embedding
full_obs_shape = observation_space['full_obs'].shape
global_m = full_obs_shape[-2]
global_n = full_obs_shape[-1]
global_c = full_obs_shape[-3]
self.global_image_conv = nn.Sequential(
Conv2d_tf(3, 8, kernel_size=2, stride=2, padding='VALID'),
nn.ReLU(),
Conv2d_tf(8, 16, kernel_size=3, stride=1, padding='VALID'),
nn.Flatten(),
)
self.global_image_embedding_size = (((((global_n-2)//2)+1)-3)+1)*(((((global_n-2)//2)+1)-3)+1)*16
if self.use_global_policy:
self.image_conv = self.global_image_conv
self.image_embedding_size = self.global_image_embedding_size
self.preprocessed_input_size = self.image_embedding_size
else:
self.image_conv = nn.Sequential(
Conv2d_tf(3, conv_filters, kernel_size=conv_kernel_size, stride=1, padding='VALID'),
nn.Flatten(),
nn.ReLU()
)
self.image_embedding_size = (n-conv_kernel_size+1)*(m-conv_kernel_size+1)*conv_filters
self.preprocessed_input_size = self.image_embedding_size
# x, y positional embeddings
self.xy_embed = None
self.xy_dim = xy_dim
if xy_dim:
self.preprocessed_input_size += 2*xy_dim
# Scalar embedding
self.scalar_embed = None
self.scalar_dim = scalar_dim
if scalar_dim:
self.scalar_embed = nn.Linear(scalar_dim, scalar_fc)
self.preprocessed_input_size += scalar_fc
self.preprocessed_input_size += random_z_dim
self.base_output_size = self.preprocessed_input_size
# RNN (only for policy)
self.rnn = None
if recurrent_arch:
self.rnn = RNN(
input_size=self.preprocessed_input_size,
hidden_size=recurrent_hidden_size,
arch=recurrent_arch)
self.base_output_size = recurrent_hidden_size
# Policy head
self.actor = nn.Sequential(
make_fc_layers_with_hidden_sizes(actor_fc_layers, input_size=self.base_output_size),
Categorical(actor_fc_layers[-1], num_actions)
)
# Value head
if self.use_global_policy:
self.global_base_output_size = self.base_output_size
else:
self.global_base_output_size = self.global_image_embedding_size + self.base_output_size
self.critic = nn.Sequential(
make_fc_layers_with_hidden_sizes(value_fc_layers, input_size=self.global_base_output_size),
init_(nn.Linear(value_fc_layers[-1], 1))
)
apply_init_(self.modules())
self.train()
@property
def is_recurrent(self):
return self.rnn is not None
@property
def recurrent_hidden_state_size(self):
# """Size of rnn_hx."""
if self.rnn is not None:
return self.rnn.recurrent_hidden_state_size
else:
return 0
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def _forward_base(self, inputs, rnn_hxs, masks):
# Unpack input key values
if self.use_global_policy:
image = inputs.get('full_obs', None)
else:
image = inputs.get('image')
scalar = inputs.get('direction')
if scalar is None:
scalar = inputs.get('time_step')
x = inputs.get('x')
y = inputs.get('y')
in_z = inputs.get('random_z', torch.tensor([], device=self.device))
in_image = self.image_conv(image)
if self.xy_embed:
x = one_hot(self.xy_dim, x, device=self.device)
y = one_hot(self.xy_dim, y, device=self.device)
in_x = self.xy_embed(x)
in_y = self.xy_embed(y)
else:
in_x = torch.tensor([], device=self.device)
in_y = torch.tensor([], device=self.device)
if self.scalar_embed:
in_scalar = one_hot(self.scalar_dim, scalar).to(self.device)
in_scalar = self.scalar_embed(in_scalar)
else:
in_scalar = torch.tensor([], device=self.device)
in_embedded = torch.cat((in_image, in_x, in_y, in_scalar, in_z), dim=-1)
if self.rnn is not None:
core_features, rnn_hxs = self.rnn(in_embedded, rnn_hxs, masks)
else:
core_features = in_embedded
global_image = inputs.get('full_obs', None)
if global_image is not None:
if self.use_global_policy:
global_core_features = core_features
else:
in_global_image = self.global_image_conv(global_image)
global_core_features = torch.cat((core_features, in_global_image), dim=-1)
else:
global_core_features = None
return core_features, rnn_hxs, global_core_features
def act(self, inputs, rnn_hxs, masks, deterministic=False):
core_features, rnn_hxs, global_core_features = self._forward_base(inputs, rnn_hxs, masks)
dist = self.actor(core_features)
if global_core_features is not None:
value = self.critic(global_core_features)
else:
value = 0
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_dist = dist.logits
return value, action, action_log_dist, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
core_features, rnn_hxs, global_core_features = self._forward_base(inputs, rnn_hxs, masks)
if global_core_features is not None:
value = self.critic(global_core_features)
else:
value = 0
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
core_features, rnn_hxs, global_core_features = self._forward_base(inputs, rnn_hxs, masks)
dist = self.actor(core_features)
if global_core_features is not None:
value = self.critic(global_core_features)
else:
value = 0
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
|
dcd-main
|
models/multigrid_global_critic_models.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .ppo import PPO
from .storage import RolloutStorage
from .agent import ACAgent
|
dcd-main
|
algos/__init__.py
|
# Copyright (c) 2017 Ilya Kostrikov
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is a heavily modified version of:
# https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/storage.py
from collections import defaultdict
import numpy as np
import torch
import gym
from torch.utils.data.sampler import \
BatchSampler, SubsetRandomSampler, SequentialSampler
from lempel_ziv_complexity import lempel_ziv_complexity
def to_tensor(a):
if isinstance(a, dict):
for k in a.keys():
if isinstance(a[k], np.ndarray):
a[k] = torch.from_numpy(a[k]).float()
elif isinstance(a, np.ndarray):
a = torch.from_numpy(a).float()
elif isinstance(a, list):
a = torch.tensor(a, dtype=torch.float)
return a
def _flatten_helper(T, N, _tensor):
if isinstance(_tensor, dict):
return {k: _tensor[k].view(T * N, *_tensor[k].size()[2:]) for k in _tensor.keys()}
else:
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage(object):
def __init__(self,
model,
num_steps, num_processes, observation_space, action_space,
recurrent_hidden_state_size, recurrent_arch='rnn',
use_proper_time_limits=False,
use_popart=False,
device='cpu'):
self.device = device
self.model = model
self.num_processes = num_processes
self.recurrent_arch = recurrent_arch
self.recurrent_hidden_state_size = recurrent_hidden_state_size
self.is_lstm = recurrent_arch == 'lstm'
recurrent_hidden_state_buffer_size = 2*recurrent_hidden_state_size if self.is_lstm \
else recurrent_hidden_state_size
self.use_proper_time_limits = use_proper_time_limits
self.use_popart = use_popart
self.truncated_obs = None
if isinstance(observation_space, dict):
self.is_dict_obs = True
self.obs = {k:torch.zeros(num_steps + 1, num_processes, *(observation_space[k]).shape) \
for k,obs in observation_space.items()}
if self.use_proper_time_limits:
self.truncated_obs = {k:torch.zeros(num_steps + 1, num_processes, *(observation_space[k]).shape) \
for k,obs in observation_space.items()}
else:
self.is_dict_obs = False
self.obs = torch.zeros(num_steps + 1, num_processes, *observation_space.shape)
if self.use_proper_time_limits:
self.truncated_obs = torch.zeros_like(self.obs)
self.recurrent_hidden_states = torch.zeros(
num_steps + 1, num_processes, recurrent_hidden_state_buffer_size)
self.rewards = torch.zeros(num_steps, num_processes, 1)
self.value_preds = torch.zeros(num_steps + 1, num_processes, 1)
self.returns = torch.zeros(num_steps + 1, num_processes, 1)
self.action_log_probs = torch.zeros(num_steps, num_processes, 1)
if action_space.__class__.__name__ == 'Discrete':
action_shape = 1
self.action_log_dist = torch.zeros(num_steps, num_processes, action_space.n)
else: # Hack it to just store action prob for sampled action if continuous
action_shape = action_space.shape[0]
self.action_log_dist = torch.zeros(num_steps, num_processes, 1)
self.actions = torch.zeros(num_steps, num_processes, action_shape)
if action_space.__class__.__name__ == 'Discrete':
self.actions = self.actions.long()
self.masks = torch.ones(num_steps + 1, num_processes, 1)
# Masks that indicate whether it's a true terminal state
# or time limit end state
self.bad_masks = torch.ones(num_steps + 1, num_processes, 1)
# Keep track of cliffhanger timesteps
self.cliffhanger_masks = torch.ones(num_steps + 1, num_processes, 1)
self.truncated_value_preds = None
if self.use_proper_time_limits:
self.truncated_value_preds = torch.zeros_like(self.value_preds)
self.denorm_value_preds = None
self.level_seeds = torch.zeros(num_steps, num_processes, 1, dtype=torch.int)
self.num_steps = num_steps
self.step = 0
def to(self, device):
self.device = device
if self.is_dict_obs:
for k, obs in self.obs.items():
self.obs[k] = obs.to(device)
else:
self.obs = self.obs.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.returns = self.returns.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.action_log_dist = self.action_log_dist.to(device)
self.actions = self.actions.to(device)
self.masks = self.masks.to(device)
self.bad_masks = self.bad_masks.to(device)
self.cliffhanger_masks = self.cliffhanger_masks.to(device)
self.level_seeds = self.level_seeds.to(device)
if self.use_proper_time_limits:
if self.is_dict_obs:
for k, obs in self.truncated_obs.items():
self.truncated_obs[k] = obs.to(device)
else:
self.truncated_obs = self.truncated_obs.to(device)
self.truncated_value_preds = self.truncated_value_preds.to(device)
def get_obs(self, idx):
if self.is_dict_obs:
return {k: self.obs[k][idx] for k in self.obs.keys()}
else:
return self.obs[idx]
def copy_obs_to_index(self, obs, index):
if self.is_dict_obs:
[self.obs[k][index].copy_(obs[k]) for k in self.obs.keys()]
else:
self.obs[index].copy_(obs)
def insert(self, obs, recurrent_hidden_states, actions, action_log_probs, action_log_dist,
value_preds, rewards, masks, bad_masks, level_seeds=None, cliffhanger_masks=None):
if len(rewards.shape) == 3: rewards = rewards.squeeze(2)
if self.is_dict_obs:
[self.obs[k][self.step + 1].copy_(obs[k]) for k in self.obs.keys()]
else:
self.obs[self.step + 1].copy_(obs)
if self.is_lstm:
self.recurrent_hidden_states[self.step +1,:,
:self.recurrent_hidden_state_size].copy_(recurrent_hidden_states[0])
self.recurrent_hidden_states[self.step +1,:,
self.recurrent_hidden_state_size:].copy_(recurrent_hidden_states[1])
else:
self.recurrent_hidden_states[self.step + 1].copy_(recurrent_hidden_states)
self.actions[self.step].copy_(actions)
self.action_log_probs[self.step].copy_(action_log_probs)
self.action_log_dist[self.step].copy_(action_log_dist)
self.value_preds[self.step].copy_(value_preds)
self.rewards[self.step].copy_(rewards)
self.masks[self.step + 1].copy_(masks)
self.bad_masks[self.step + 1].copy_(bad_masks)
if cliffhanger_masks is not None:
self.cliffhanger_masks[self.step + 1].copy_(cliffhanger_masks)
if level_seeds is not None:
self.level_seeds[self.step].copy_(level_seeds)
self.step = (self.step + 1) % self.num_steps
def insert_truncated_obs(self, obs, index):
if self.is_dict_obs:
[self.truncated_obs[k][self.step + 1][index].copy_(
to_tensor(obs[k])) for k in self.truncated_obs.keys()]
else:
self.truncated_obs[self.step + 1][index].copy_(to_tensor(obs))
def after_update(self):
if self.is_dict_obs:
[self.obs[k][0].copy_(self.obs[k][-1]) for k in self.obs.keys()]
else:
self.obs[0].copy_(self.obs[-1])
self.recurrent_hidden_states[0].copy_(self.recurrent_hidden_states[-1])
self.masks[0].copy_(self.masks[-1])
self.bad_masks[0].copy_(self.bad_masks[-1])
self.cliffhanger_masks[0].copy_(self.cliffhanger_masks[-1])
def replace_final_return(self, returns):
self.rewards[-1] = returns
def _compute_truncated_value_preds(self):
self.truncated_value_preds.copy_(self.value_preds)
with torch.no_grad():
# For each process, forward truncated obs
for i in range(self.num_processes):
steps = (self.bad_masks[:,i,0] == 0).nonzero().squeeze()
if len(steps.shape) == 0 or steps.shape[0] == 0:
continue
if self.is_dict_obs:
obs = {k:self.truncated_obs[k][steps.squeeze(), i, :]
for k in self.truncated_obs.keys()}
else:
obs = self.truncated_obs[steps.squeeze(),i,:]
rnn_hxs = self.recurrent_hidden_states[steps,i,:]
if self.is_lstm:
rnn_hxs = self._split_batched_lstm_recurrent_hidden_states(rnn_hxs)
masks = torch.ones((len(steps), 1), device=self.device)
value_preds = self.model.get_value(obs, rnn_hxs, masks)
self.truncated_value_preds[steps,i,:] = value_preds
return self.truncated_value_preds
def compute_gae_returns(self,
returns_buffer,
next_value,
gamma,
gae_lambda):
self.value_preds[-1] = next_value
gae = 0
value_preds = self.value_preds
if self.use_proper_time_limits:
# Get truncated value preds
self._compute_truncated_value_preds()
value_preds = self.truncated_value_preds
if self.use_popart:
self.denorm_value_preds = self.model.popart.denormalize(value_preds) # denormalize all value predictions
value_preds = self.denorm_value_preds
for step in reversed(range(self.rewards.size(0))):
delta = self.rewards[step] + \
gamma*value_preds[step + 1]*self.masks[step + 1] - value_preds[step]
gae = delta + gamma * gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_preds[step]
def compute_discounted_returns(self,
returns_buffer,
next_value,
gamma):
self.value_preds[-1] = next_value
value_preds = self.value_preds
if self.use_proper_time_limits:
self._compute_truncated_value_preds()
value_preds = self.truncated_value_preds
if self.use_popart:
self.denorm_value_preds = self.model.popart.denormalize(value_preds) # denormalize all value predictions
self.returns[-1] = value_preds[-1]
for step in reversed(range(self.rewards.size(0))):
returns_buffer[step] = returns_buffer[step + 1] * \
gamma * self.masks[step + 1] + self.rewards[step]
def compute_returns(self,
next_value,
use_gae,
gamma,
gae_lambda):
if use_gae:
self.compute_gae_returns(
self.returns, next_value, gamma, gae_lambda)
else:
self.compute_discounted_returns(
self.returns, next_value, gamma)
def get_batched_value_loss(self,
signed=False,
positive_only=False,
power=1,
clipped=True,
batched=True):
"""
Assumes buffer contains pre-computed returns via compute_returns.
Computes the mean episodic value loss per batch.
"""
# If agent uses popart, then value_preds are normalized, while
# returns are not.
if self.use_popart:
value_preds = self.denorm_value_preds[:-1]
else:
value_preds = self.value_preds[:-1]
returns = self.returns[:-1]
if signed:
td = returns - value_preds
elif positive_only:
td = (returns - value_preds).clamp(0)
else:
td = (returns - value_preds).abs()
if power > 1:
td = td**power
batch_td = td.mean(0) # B x 1
if clipped:
batch_td = torch.clamp(batch_td, -1, 1)
if batched:
return batch_td
else:
return batch_td.mean().item()
def get_batched_action_complexity(self):
"""
Returns per-batch LZ complexity scores of the action trajectories in the buffer
"""
num_processes = self.actions.shape[1]
batched_complexity = torch.zeros(num_processes, 1, dtype=torch.float)
for b in range(num_processes):
num_traj = 0
avg_complexity = 0
done_steps = [0] + (self.masks[:,b,0] == 0).nonzero().flatten().tolist()
for i, t in enumerate(done_steps[:-1]):
if len(done_steps) > 1:
next_done = done_steps[i+1]
else:
next_done = self.actions.shape[0]
action_str = ' '.join([str(a.item()) for a in self.actions[t:next_done,b,0]])
avg_complexity += lempel_ziv_complexity(action_str)
num_traj += 1
batched_complexity[b] = avg_complexity/num_traj
return batched_complexity
def get_action_complexity(self):
"""
Returns mean LZ complexity scores of the action trajectories in the buffer
"""
num_processes = self.actions.shape[1]
avg_complexity = 0
num_traj = 0
for b in range(num_processes):
done_steps = [0] + (self.masks[:,b,0] == 0).nonzero().flatten().tolist()
for i, t in enumerate(done_steps[:-1]):
if len(done_steps) > 1:
next_done = done_steps[i+1]
else:
next_done = self.actions.shape[0]
action_str = ' '.join([str(a.item()) for a in self.actions[t:next_done,b,0]])
avg_complexity += lempel_ziv_complexity(action_str)
num_traj += 1
return avg_complexity/num_traj
def get_action_traj(self, as_string=False):
if as_string:
num_processes = self.actions.shape[1]
traj = []
for b in range(num_processes):
action_str = ' '.join([str(a.item()) for a in self.actions[:,b,0]])
traj.append(action_str)
return traj
else:
return self.actions.squeeze(-1)
def _split_batched_lstm_recurrent_hidden_states(self, hxs):
return (hxs[:, :self.recurrent_hidden_state_size],
hxs[:, self.recurrent_hidden_state_size:])
def get_recurrent_hidden_state(self, step):
if self.is_lstm:
return self._split_batched_lstm_recurrent_hidden_states(
self.recurrent_hidden_states[step,:].squeeze(0))
return self.recurrent_hidden_states[step]
def feed_forward_generator(self,
advantages,
num_mini_batch=None,
mini_batch_size=None):
num_steps, num_processes = self.rewards.size()[0:2]
batch_size = num_processes * num_steps
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(num_processes, num_steps, num_processes * num_steps,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
sampler = BatchSampler(
SubsetRandomSampler(range(batch_size)),
mini_batch_size,
drop_last=False)
for indices in sampler:
if self.is_dict_obs:
obs_batch = {k: self.obs[k][:-1].view(-1, *self.obs[k].size()[2:])[indices] for k in self.obs.keys()}
else:
obs_batch = self.obs[:-1].view(-1, *self.obs.size()[2:])[indices]
recurrent_hidden_states_batch = self.recurrent_hidden_states[:-1].view(
-1, self.recurrent_hidden_states.size(-1))[indices]
actions_batch = self.actions.view(-1,
self.actions.size(-1))[indices]
value_preds_batch = self.value_preds[:-1].view(-1, 1)[indices]
return_batch = self.returns[:-1].view(-1, 1)[indices]
masks_batch = self.masks[:-1].view(-1, 1)[indices]
old_action_log_probs_batch = self.action_log_probs.view(-1,
1)[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages.view(-1, 1)[indices]
if self.is_lstm:
# Split into (hxs, cxs) for LSTM
recurrent_hidden_states_batch = \
self._split_batched_lstm_recurrent_hidden_states(recurrent_hidden_states_batch)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ
def recurrent_generator(self, advantages, num_mini_batch):
num_processes = self.rewards.size(1)
assert num_processes >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(num_processes, num_mini_batch))
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
if self.is_dict_obs:
obs_batch = defaultdict(list)
else:
obs_batch = []
recurrent_hidden_states_batch = []
actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
if self.is_dict_obs:
[obs_batch[k].append(self.obs[k][:-1,ind]) for k in self.obs.keys()]
else:
obs_batch.append(self.obs[:-1, ind])
recurrent_hidden_states_batch.append(
self.recurrent_hidden_states[0:1, ind])
actions_batch.append(self.actions[:, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
old_action_log_probs_batch.append(
self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
T, N = self.num_steps, num_envs_per_batch
# These are all tensors of size (T, N, -1)
if self.is_dict_obs:
for k in obs_batch.keys():
obs_batch[k] = torch.stack(obs_batch[k],1)
else:
obs_batch = torch.stack(obs_batch, 1)
actions_batch = torch.stack(actions_batch, 1)
value_preds_batch = torch.stack(value_preds_batch, 1)
return_batch = torch.stack(return_batch, 1)
masks_batch = torch.stack(masks_batch, 1)
old_action_log_probs_batch = torch.stack(
old_action_log_probs_batch, 1)
adv_targ = torch.stack(adv_targ, 1)
# States is just a (N, -1) tensor
recurrent_hidden_states_batch = torch.stack(
recurrent_hidden_states_batch, 1).view(N, -1)
# Flatten the (T, N, ...) tensors to (T * N, ...)
obs_batch = _flatten_helper(T, N, obs_batch)
actions_batch = _flatten_helper(T, N, actions_batch)
value_preds_batch = _flatten_helper(T, N, value_preds_batch)
return_batch = _flatten_helper(T, N, return_batch)
masks_batch = _flatten_helper(T, N, masks_batch)
old_action_log_probs_batch = _flatten_helper(T, N, \
old_action_log_probs_batch)
adv_targ = _flatten_helper(T, N, adv_targ)
if self.is_lstm:
# Split into (hxs, cxs) for LSTM
recurrent_hidden_states_batch = \
self._split_batched_lstm_recurrent_hidden_states(recurrent_hidden_states_batch)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ
|
dcd-main
|
algos/storage.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class ACAgent(object):
def __init__(self, algo, storage):
self.algo = algo
self.storage = storage
def update(self, discard_grad=False):
info = self.algo.update(self.storage, discard_grad=discard_grad)
self.storage.after_update()
return info
def to(self, device):
self.algo.actor_critic.to(device)
self.storage.to(device)
return self
def train(self):
self.algo.actor_critic.train()
def eval(self):
self.algo.actor_critic.eval()
def random(self):
self.algo.actor_critic.random = True
def process_action(self, action):
if hasattr(self.algo.actor_critic, 'process_action'):
return self.algo.actor_critic.process_action(action)
else:
return action
def act(self, *args, **kwargs):
return self.algo.actor_critic.act(*args, **kwargs)
def get_value(self, *args, **kwargs):
return self.algo.actor_critic.get_value(*args, **kwargs)
def insert(self, *args, **kwargs):
return self.storage.insert(*args, **kwargs)
@property
def is_recurrent(self):
return self.algo.actor_critic.is_recurrent
|
dcd-main
|
algos/agent.py
|
# Copyright (c) 2017 Ilya Kostrikov
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is a modified version of:
# https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/algo/ppo.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
class PPO():
"""
Vanilla PPO
"""
def __init__(self,
actor_critic,
clip_param,
ppo_epoch,
num_mini_batch,
value_loss_coef,
entropy_coef,
lr=None,
eps=None,
max_grad_norm=None,
clip_value_loss=True,
log_grad_norm=False):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.clip_value_loss = clip_value_loss
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)
self.log_grad_norm = log_grad_norm
def _grad_norm(self):
total_norm = 0
for p in self.actor_critic.parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
return total_norm
def update(self, rollouts, discard_grad=False):
if rollouts.use_popart:
value_preds = rollouts.denorm_value_preds
else:
value_preds = rollouts.value_preds
advantages = rollouts.returns[:-1] - value_preds[:-1]
advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
if self.log_grad_norm:
grad_norms = []
for e in range(self.ppo_epoch):
if self.actor_critic.is_recurrent:
data_generator = rollouts.recurrent_generator(
advantages, self.num_mini_batch)
else:
data_generator = rollouts.feed_forward_generator(
advantages, self.num_mini_batch)
for sample in data_generator:
obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
values, action_log_probs, dist_entropy, _ = self.actor_critic.evaluate_actions(
obs_batch, recurrent_hidden_states_batch, masks_batch,
actions_batch)
ratio = torch.exp(action_log_probs -
old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
if rollouts.use_popart:
self.actor_critic.popart.update(return_batch)
return_batch = self.actor_critic.popart.normalize(return_batch)
if self.clip_value_loss:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch).pow(2)
value_loss = 0.5 * torch.max(value_losses,
value_losses_clipped).mean()
else:
value_loss = F.smooth_l1_loss(values, return_batch)
self.optimizer.zero_grad()
loss = (value_loss*self.value_loss_coef + action_loss - dist_entropy*self.entropy_coef)
loss.backward()
if self.log_grad_norm:
grad_norms.append(self._grad_norm())
if self.max_grad_norm is not None and self.max_grad_norm > 0:
nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
if not discard_grad:
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
info = {}
if self.log_grad_norm:
info = {'grad_norms': grad_norms}
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch, info
|
dcd-main
|
algos/ppo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import csv
import argparse
from collections import defaultdict
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
sns.set_palette("bright")
"""
Usage:
Plot MiniGrid results for 25 blocks
(Robust PLR and REPAIRED uses half as many gradients as baselines):
python results/plot_eval_bars.py \
-r results/minigrid_ood \
-f \
mg_25_blocks-dr-250M_steps.csv \
mg_25_blocks-minimax-250M_steps.csv \
mg_25_blocks-paired-250M_steps.csv \
mg_25_blocks-repaired-250M_steps.csv \
mg_25_blocks-plr-250M_steps.csv \
mg_25_blocks-robust_plr-250M_steps.csv \
-l "DR" Minimax PAIRED REPAIRED PLR "Robust PLR" \
--savename minigrid_25_blocks_eval
------------------------------------------------------------------------
Plot MiniGrid results for uniform block count in [0,60]:
python results/plot_eval_bars.py \
-r results/minigrid_ood \
-f \
mg_60_blocks_uni-dr_20k_updates.csv \
mg_60_blocks_uni-robust_plr_20k_updates.csv \
mg_60_blocks-accel_20k_updates.csv \
-l "DR" "Robust PLR" ACCEL \
--figsize 24,2 \
--savename minigrid_60_blocks_uni_eval
Plot BipedalWalker results:
python results/plot_eval_bars.py \
-r results/bipedal \
-f \
bipedal8d-dr_20k-updates.csv \
bipedal8d-robust_plr-20k_updates.csv \
bipedal8d-accel_20k-updates.csv \
-l "DR" "Robust PLR" ACCEL \
--savename bipedal_eval
"""
def parse_args():
parser = argparse.ArgumentParser(description='RL')
parser.add_argument(
'-r', '--result_path',
type=str,
default='results/',
help='Relative path to results directory.'
)
parser.add_argument(
'-f', '--files',
type=str,
nargs='+',
default=['test.csv', 'test2.csv'],
help='Name of results .csv file, output by eval.py.'
)
parser.add_argument(
'-l', '--labels',
type=str,
nargs='+',
default=[],
help='Name of condition corresponding to each results file.'
)
parser.add_argument(
'--row_prefix',
type=str,
default='solved_rate',
help='Plot rows in results .csv whose metric column matches this prefix.'
)
parser.add_argument(
'-m', '--metrics',
type=str,
nargs='+',
default=[],
help='List of metric names to plot, without the --row_prefix.',
)
parser.add_argument(
'--include',
type=str,
nargs='+',
default=None,
help='Further filter matched metric rows with a list of substrings.'
)
parser.add_argument(
'--ylabel',
type=str,
default='Solved rate',
help='Y-axis label.'
)
parser.add_argument(
'--savename',
type=str,
default='latest',
help='Filename of saved .pdf of plot, saved to figures/.'
)
parser.add_argument(
'--figsize',
type=str,
default='(14,2)',
help='Dimensions of output figure.'
)
return parser.parse_args()
LABEL_COLORS = {
'DR': 'gray',
'Minimax': 'red',
'PAIRED': (0.8859561388376407,0.5226505841897354,0.195714831410001),
'REPAIRED': (0.2038148518479279,0.6871367484391159,0.5309645021239799),
'PLR': (0.9637256003082545,0.40964669235271706,0.7430230442501574),
'PLR Robust': (0.3711152842731098,0.6174124752499043,0.9586047646790773),
'Robust PLR': (0.3711152842731098,0.6174124752499043,0.9586047646790773),
'ACCEL': (0.30588235,0.83921569,0.27843137)
}
ENV_ALIASES = {
'SixteenRooms': '16Rooms',
'SixteenRoomsFewerDoors': '16Rooms2',
'SimpleCrossingS11N5': 'SimpleCrossing',
'PerfectMazeMedium': 'PerfectMazeMed',
'BipedalWalkerHardcore': 'HardCore'
}
if __name__ == '__main__':
args = parse_args()
assert(len(args.files) == len(args.labels))
num_labels = len(args.labels)
colors = sns.husl_palette(num_labels, h=.1)
df = pd.DataFrame()
for i, f in enumerate(args.files):
fpath = os.path.join(args.result_path, f)
df_ = pd.read_csv(fpath)
df_ = df_[df_['metric'].str.startswith(args.row_prefix)]
if args.include is not None:
df_ = df_[df_['metric'].str.contains('|'.join(args.include))]
df_['label'] = args.labels[i]
out_cols = ['metric', 'label']
df_['median'] = df_.median(axis=1)
df_['q1'] = df_.quantile(0.25, axis=1)
df_['q3'] = df_.quantile(0.75, axis=1)
out_cols += ['median', 'q1', 'q3']
out = df_[out_cols]
df = pd.concat([df, out])
df_metrics = df['metric'].unique()
num_subplots = len(df_metrics)
nrows,ncols = 1,num_subplots
f, axes = plt.subplots(nrows=nrows, ncols=ncols,
sharey=True, sharex=True,
figsize=eval(args.figsize))
x=np.arange(len(args.labels))
width=0.35
for i, ax in enumerate(axes.flatten()):
metric = df_metrics[i]
for j, label in enumerate(args.labels):
idx = (df['metric'] == metric) & (df['label'] == label)
if label in LABEL_COLORS:
color = LABEL_COLORS[label]
else:
color = colors[j]
if label == 'PLR Robust' or label == 'Robust PLR':
label = r'$\mathregular{PLR^{\perp}}$'
value = df[idx]['median']
error = (df[idx]['q3'] - df[idx]['q1'])/2.
x1 = ax.bar(x[j]*width, value, width, label=label, color=color, yerr=error)
title = metric.split(':')[-1].split('-')
if len(title) > 2:
title = ''.join(title[1:-1])
elif len(title) > 1:
title = title[0]
if title in ENV_ALIASES:
title = ENV_ALIASES[title]
ax.set_title(title, fontsize=14)
ax.grid()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().set_visible(False)
ax.set_yticks([0, 0.5, 1])
ax.set_ylim(0,1)
if i == 0:
ax.set_ylabel(args.ylabel, fontsize=12)
handles, labels = ax.get_legend_handles_labels()
legend = f.legend(handles, labels,
ncol=len(args.labels),
loc='upper center', bbox_to_anchor=(0.5,1.25),
frameon=False,
fontsize=12)
plt.savefig(f"figures/{args.savename}.pdf", bbox_extra_artists=(legend,), bbox_inches='tight')
plt.show()
|
dcd-main
|
results/plot_eval_bars.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import csv
import argparse
from collections import defaultdict
import numpy as np
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
sns.set_palette("bright")
"""
Usage:
Plot CarRacingF1 Benchmark results:
python results/plot_f1.py \
-r results/car_racing_f1 \
-f \
f1-dr-5M_steps.csv \
f1-paired-5M_steps.csv \
f1-repaired-5M_steps.csv \
f1-plr-5M_steps.csv \
f1-robust_plr-5M_steps.csv \
-l DR PAIRED REPAIRED PLR "PLR Robust" \
--num_test_episodes 10 \
--threshold 477.71 \
--threshold_label 'Tang et al, 2020' \
--savename f1_eval
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-r', '--result_path',
type=str,
default='result/',
help='Relative path to results directory.'
)
parser.add_argument(
'-f', '--files',
type=str,
nargs='+',
default=[],
help='Name of results .csv file, output by eval.py.'
)
parser.add_argument(
'-l', '--labels',
type=str,
nargs='+',
default=[],
help='Name of condition corresponding to each results file.'
)
parser.add_argument(
'-p', '--row_prefix',
type=str,
default='test_returns',
help='Plot rows in results .csv whose metric column matches this prefix.'
)
parser.add_argument(
'-t', '--num_test_episodes',
type=int,
default=10
)
parser.add_argument(
'--savename',
type=str,
default="latest",
help='Filename of saved .pdf of plot, saved to figures/.'
)
parser.add_argument(
'--figsize',
type=str,
default="2,2",
help='Dimensions of output figure.'
)
parser.add_argument(
'--threshold',
type=float,
default=None
)
parser.add_argument(
'--threshold_label',
type=str,
default=None
)
return parser.parse_args()
def agg_test_episodes_by_seed(row, num_test_episodes, stat='mean'):
assert(len(row) % num_test_episodes == 0)
total_steps = len(row) // num_test_episodes
row = [float(x) for x in row]
step = num_test_episodes
return [np.mean(row[i*step:i*step + step]) for i in range(total_steps)]
LABEL_COLORS = {
'DR': 'gray',
'PAIRED': (0.8859561388376407, 0.5226505841897354, 0.195714831410001),
'REPAIRED': (0.2038148518479279, 0.6871367484391159, 0.5309645021239799),
'PLR': (0.9637256003082545, 0.40964669235271706, 0.7430230442501574),
'PLR Robust': (0.3711152842731098, 0.6174124752499043, 0.9586047646790773),
'Robust PLR': (0.3711152842731098, 0.6174124752499043, 0.9586047646790773),
}
if __name__ == '__main__':
args = parse_args()
plt.rcParams["figure.figsize"] = eval(args.figsize)
result_path = os.path.expandvars(os.path.expanduser(args.result_path))
num_labels = len(args.labels)
colors = sns.husl_palette(num_labels, h=.1)
colors_ = []
for l in args.labels:
if l in LABEL_COLORS:
colors_.append(LABEL_COLORS[l])
else:
colors_.append(colors[i])
colors = colors_
x = np.arange(len(args.files))
width = 0.35
all_stats = defaultdict(list)
for i, f in enumerate(args.files):
fpath = os.path.join(result_path, f)
with open(fpath, mode='r', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for row in csvreader:
if row[0].startswith(args.row_prefix):
agg_stats = agg_test_episodes_by_seed(row[1:], args.num_test_episodes)
all_stats[args.labels[i]] += agg_stats
label_stats = all_stats[args.labels[i]]
value = np.mean(label_stats)
err = stats.sem(label_stats)
label = args.labels[i]
if label == 'PLR Robust' or label == 'Robust PLR':
label = r'$\mathregular{PLR^{\perp}}$'
x1 = plt.bar(x[i]*width, value, width, label=label, color=colors[i], yerr=err)
x = np.arange(len(args.files))
plt.grid()
sns.despine(top=True, right=True, left=False, bottom=False)
plt.gca().get_xaxis().set_visible(False)
plt.ylabel('Test return', fontsize=12)
# Add threshold
if args.threshold:
plt.axhline(y=args.threshold, label=args.threshold_label, color='green', linestyle='dotted')
legend = plt.legend(
ncol=1,
loc='upper left', bbox_to_anchor=(1, 1),
frameon=False,
fontsize=12)
plt.savefig(f"figures/{args.savename}.pdf", bbox_extra_artists=(legend,), bbox_inches='tight')
plt.show()
|
dcd-main
|
results/plot_f1.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
def generate_train_cmds(
params, num_trials=1, start_index=0, newlines=False,
xpid_generator=None, xpid_prefix='', xvfb=False,
count_set=None):
separator = ' \\\n' if newlines else ' '
cmds = []
if xpid_generator:
params['xpid'] = xpid_generator(params, xpid_prefix)
start_seed = params['seed']
for t in range(num_trials):
params['seed'] = start_seed + t + start_index
if xvfb:
cmd = [f'xvfb-run -a -s "-screen 0 1400x900x24 +extension RANDR -noreset" -- python -m train']
else:
cmd = [f'python -m train']
trial_idx = t + start_index
for k,v in params.items():
if k == 'xpid':
v = f'{v}_{trial_idx}'
if count_set is not None:
count_set.add(v)
cmd.append(f'--{k}={v}')
cmd = separator.join(cmd)
cmds.append(cmd)
return cmds
def generate_all_params_for_grid(grid, defaults={}):
def update_params_with_choices(prev_params, param, choices):
updated_params = []
for v in choices:
for p in prev_params:
updated = p.copy()
updated[param] = v
updated_params.append(updated)
return updated_params
all_params = [{}]
for param, choices in grid.items():
all_params = update_params_with_choices(all_params, param, choices)
full_params = []
for p in all_params:
d = defaults.copy()
d.update(p)
full_params.append(d)
return full_params
def parse_args():
parser = argparse.ArgumentParser(description='Make commands')
parser.add_argument(
'--dir',
type=str,
default='train_scripts/grid_configs/',
help='Path to directory with .json configs')
parser.add_argument(
'--json',
type=str,
default=None,
help='Name of .json config for hyperparameter search-grid')
parser.add_argument(
'--num_trials',
type=int,
default=1,
help='Name of .json config for hyperparameter search-grid')
parser.add_argument(
'--start_index',
default=0,
type=int,
help='Starting trial index of xpid runs')
parser.add_argument(
'--count',
action='store_true',
help='Print number of generated commands at the end of output.')
parser.add_argument(
"--checkpoint",
action='store_true',
help='Whether to start from checkpoint'
)
parser.add_argument(
'--use_ucb',
action="store_true",
help='Whether to include ucb arguments.')
parser.add_argument(
'--xvfb',
action="store_true",
help='Whether to use xvfb.')
return parser.parse_args()
def xpid_from_params(p, prefix=''):
ued_algo = p['ued_algo']
is_train_env = ued_algo in ['paired', 'flexible_paired', 'minimax']
env_prefix = ''
if p['env_name'].startswith('MultiGrid') or p['env_name'].startswith('Bipedal'):
env_prefix = p['env_name']
elif p['env_name'].startswith('CarRacing'):
env_prefix = f"{p['env_name']}_{p['num_control_points']}pts"
if p.get('grayscale', False):
env_prefix = f"{env_prefix}_gray"
prefix_str = '' if prefix == '' else f'-{prefix}'
rnn_prefix = ''
rnn_agent = 'a' if p['recurrent_agent'] else ''
rnn_env = 'e' if p['recurrent_adversary_env'] and is_train_env else ''
if rnn_agent or rnn_env:
rnn_arch = p['recurrent_arch']
rnn_hidden = p['recurrent_hidden_size']
rnn_prefix = f'-{rnn_arch}{rnn_hidden}{rnn_agent}{rnn_env}'
ppo_prefix = f"-lr{p['lr']}-epoch{p['ppo_epoch']}-mb{p['num_mini_batch']}-v{p['value_loss_coef']}-gc{p['max_grad_norm']}"
if p['env_name'].startswith('CarRacing'):
clip_v_prefix = ''
if not p['clip_value_loss']:
clip_v_prefix = '-no_clipv'
ppo_prefix = f"{ppo_prefix}{clip_v_prefix}-gamma-{p['gamma']}-lambda{p['gae_lambda']}-gclip{p['clip_param']}"
entropy_prefix = f"-henv{p['adv_entropy_coef']}-ha{p['entropy_coef']}"
plr_prefix = ''
if p['use_plr']:
if 'level_replay_prob' in p and p['level_replay_prob'] > 0:
plr_prefix = f"-plr{p['level_replay_prob']}-rho{p['level_replay_rho']}-n{p['level_replay_seed_buffer_size']}-st{p['staleness_coef']}-{p['level_replay_strategy']}-{p['level_replay_score_transform']}-t{p['level_replay_temperature']}"
else:
plr_prefix = ''
editing_prefix = ''
if p['use_editor']:
editing_prefix = f"-editor{p['level_editor_prob']}-{p['level_editor_method']}-n{p['num_edits']}-base{p['base_levels']}"
timelimits = '-tl' if p['handle_timelimits'] else ''
global_critic = '-global' if p['use_global_critic'] else ''
noexpgrad = ''
if p['no_exploratory_grad_updates']:
noexpgrad = '-noexpgrad'
finetune = ''
if p.get('xpid_finetune', None):
finetune = f'-ft_{p["xpid_finetune"]}'
else:
return f'ued{prefix_str}-{env_prefix}-{ued_algo}{finetune}{noexpgrad}{rnn_prefix}{ppo_prefix}{entropy_prefix}{plr_prefix}{editing_prefix}{global_critic}{timelimits}'
if __name__ == '__main__':
args = parse_args()
# Default parameters
params = {
'xpid': 'test',
# Env params
'env_name': 'MultiGrid-GoalLastAdversarial-v0',
'use_gae': True,
'gamma': 0.995,
'gae_lambda': 0.95,
'seed': 88,
# CarRacing specific
'num_control_points': 12,
# Model params
'recurrent_arch': 'lstm',
'recurrent_agent': True,
'recurrent_adversary_env': True,
'recurrent_hidden_size': 256,
'use_global_critic': False,
# Learning params
'lr': 1e-4,
'num_steps': 256, # unroll length
'num_processes': 32, # number of actor processes
'num_env_steps': 1000000000, # total training steps
'ppo_epoch': 20,
'num_mini_batch': 1,
'entropy_coef': 0.,
'value_loss_coef': 0.5,
'clip_param': 0.2,
'clip_value_loss': True,
'adv_entropy_coef': 0.,
'max_grad_norm': 0.5,
'algo': 'ppo',
'ued_algo': 'paired',
# PLR params
'use_plr': False,
'level_replay_prob': 0.0,
'level_replay_rho': 1.0,
'level_replay_seed_buffer_size': 5000,
'level_replay_score_transform': "rank",
'level_replay_temperature': 0.1,
'staleness_coef': 0.3,
'no_exploratory_grad_updates': False,
# Editor params
'use_editor': False,
'level_editor_prob': 0,
'level_editor_method': 'random',
'num_edits': 0,
'base_levels': 'batch',
# Logging params
'log_interval': 25,
'screenshot_interval':1000,
'log_grad_norm': False,
}
json_filename = args.json
if not json_filename.endswith('.json'):
json_filename += '.json'
grid_path = os.path.join(os.path.expandvars(os.path.expanduser(args.dir)), json_filename)
config = json.load(open(grid_path))
grid = config['grid']
xpid_prefix = '' if 'xpid_prefix' not in config else config['xpid_prefix']
if args.checkpoint:
params['checkpoint'] = True
# Generate all parameter combinations within grid, using defaults for fixed params
all_params = generate_all_params_for_grid(grid, defaults=params)
unique_xpids = None
if args.count:
unique_xpids = set()
# Print all commands
count = 0
for p in all_params:
cmds = generate_train_cmds(p,
num_trials=args.num_trials,
start_index=args.start_index,
newlines=True,
xpid_generator=xpid_from_params,
xpid_prefix=xpid_prefix,
xvfb=args.xvfb,
count_set=unique_xpids)
for c in cmds:
print(c + '\n')
count += 1
if args.count:
print(f'Generated {len(unique_xpids)} unique commands.')
|
dcd-main
|
train_scripts/make_cmd.py
|
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import importlib
import warnings
from gym import error, logger
# This format is true today, but it's *not* an official spec.
# [username/](env-name)-v(version) env-name is group 1, version is group 2
#
# 2016-10-31: We're experimentally expanding the environment ID format
# to include an optional username.
env_id_re = re.compile(r'^(?:[\w:-]+\/)?([\w:.-]+)-v(\d+)$')
def load(name):
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
fn = getattr(mod, attr_name)
return fn
class EnvSpec(object):
"""A specification for a particular instance of the environment. Used
to register the parameters for official evaluations.
Args:
id (str): The official environment ID
entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
reward_threshold (Optional[int]): The reward threshold before the task is considered solved
kwargs (dict): The kwargs to pass to the environment class
nondeterministic (bool): Whether this environment is non-deterministic even after seeding
tags (dict[str:any]): A set of arbitrary key-value tags on this environment, including simple property=True tags
max_episode_steps (Optional[int]): The maximum number of steps that an episode can consist of
Attributes:
id (str): The official environment ID
"""
def __init__(self, id, entry_point=None, reward_threshold=None, kwargs=None, nondeterministic=False, tags=None, max_episode_steps=None):
self.id = id
# Evaluation parameters
self.reward_threshold = reward_threshold
# Environment properties
self.nondeterministic = nondeterministic
self.entry_point = entry_point
if tags is None:
tags = {}
self.tags = tags
tags['wrapper_config.TimeLimit.max_episode_steps'] = max_episode_steps
self.max_episode_steps = max_episode_steps
# We may make some of these other parameters public if they're
# useful.
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id, env_id_re.pattern))
self._env_name = match.group(1)
self._kwargs = {} if kwargs is None else kwargs
def make(self, **kwargs):
"""Instantiates an instance of the environment with appropriate kwargs"""
if self.entry_point is None:
raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
if callable(self.entry_point):
env = self.entry_point(**_kwargs)
else:
cls = load(self.entry_point)
env = cls(**_kwargs)
# Make the enviroment aware of which spec it came from.
env.unwrapped.spec = self
return env
def __repr__(self):
return "EnvSpec({})".format(self.id)
class EnvRegistry(object):
"""Register an env by ID. IDs remain stable over time and are
guaranteed to resolve to the same environment dynamics (or be
desupported). The goal is that results on a particular environment
should always be comparable, and not depend on the version of the
code that was running.
"""
def __init__(self):
self.env_specs = {}
def make(self, path, **kwargs):
if len(kwargs) > 0:
logger.info('Making new env: %s (%s)', path, kwargs)
else:
logger.info('Making new env: %s', path)
spec = self.spec(path)
env = spec.make(**kwargs)
# We used to have people override _reset/_step rather than
# reset/step. Set _gym_disable_underscore_compat = True on
# your environment if you use these methods and don't want
# compatibility code to be invoked.
if hasattr(env, "_reset") and hasattr(env, "_step") and not getattr(env, "_gym_disable_underscore_compat", False):
patch_deprecated_methods(env)
if (env.spec.max_episode_steps is not None) and not spec.tags.get('vnc'):
from envs.wrappers import TimeLimit
env = TimeLimit(env, max_episode_steps=env.spec.max_episode_steps)
return env
def all(self):
return self.env_specs.values()
def spec(self, path):
if ':' in path:
mod_name, _sep, id = path.partition(':')
try:
importlib.import_module(mod_name)
# catch ImportError for python2.7 compatibility
except ImportError:
raise error.Error('A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`'.format(mod_name))
else:
id = path
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id.encode('utf-8'), env_id_re.pattern))
try:
return self.env_specs[id]
except KeyError:
# Parse the env name and check to see if it matches the non-version
# part of a valid env (could also check the exact number here)
env_name = match.group(1)
matching_envs = [valid_env_name for valid_env_name, valid_env_spec in self.env_specs.items()
if env_name == valid_env_spec._env_name]
if matching_envs:
raise error.DeprecatedEnv('Env {} not found (valid versions include {})'.format(id, matching_envs))
else:
raise error.UnregisteredEnv('No registered env with id: {}'.format(id))
def register(self, id, **kwargs):
if id in self.env_specs:
raise error.Error('Cannot re-register id: {}'.format(id))
self.env_specs[id] = EnvSpec(id, **kwargs)
# Have a global registry
registry = EnvRegistry()
def register(id, **kwargs):
return registry.register(id, **kwargs)
def make(id, **kwargs):
return registry.make(id, **kwargs)
def spec(id):
return registry.spec(id)
warn_once = True
def patch_deprecated_methods(env):
"""
Methods renamed from '_method' to 'method', render() no longer has 'close' parameter, close is a separate method.
For backward compatibility, this makes it possible to work with unmodified environments.
"""
global warn_once
if warn_once:
logger.warn("Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior." % str(type(env)))
warn_once = False
env.reset = env._reset
env.step = env._step
env.seed = env._seed
def render(mode):
return env._render(mode, close=False)
def close():
env._render("human", close=True)
env.render = render
env.close = close
|
dcd-main
|
envs/registration.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .multigrid.adversarial import *
|
dcd-main
|
envs/__init__.py
|
# Copyright (c) 2019 Antonin Raffin
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is an extended version of
# https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/vec_env/vec_frame_stack.py
from .vec_env import VecEnvWrapper
import numpy as np
from gym import spaces
class VecFrameStack(VecEnvWrapper):
def __init__(self, venv, nstack, obs_key=None):
self.venv = venv
self.n_frame_channels = venv.observation_space.shape[-1]
self.nstack = nstack
self.obs_key = obs_key
wos = venv.observation_space # wrapped ob space
low = np.repeat(wos.low, self.nstack, axis=-1)
high = np.repeat(wos.high, self.nstack, axis=-1)
self.stackedobs = np.zeros((venv.num_envs,) + low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
if self.obs_key:
obs = obs[obs_key]
self.stackedobs = np.roll(self.stackedobs, shift=-self.n_frame_channels, axis=-1)
for (i, new) in enumerate(news):
if new:
self.stackedobs[i] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs, rews, news, infos
def reset(self, seed=None, index=None):
if seed is not None and index is not None:
obs = self.venv.seed(seed, index)
if self.obs_key:
obs = obs[obs_key]
self.stackedobs[index] = 0
self.stackedobs[index,...,-obs.shape[-1]:] = obs
return self.stackedobs[index,:]
else:
obs = self.venv.reset()
if self.obs_key:
obs = obs[obs_key]
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
def reset_agent(self):
obs = self.venv.reset_agent()
if self.obs_key:
obs = obs[obs_key]
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
def reset_random(self):
obs = self.venv.reset_random()
if self.obs_key:
obs = obs[obs_key]
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
|
dcd-main
|
envs/wrappers/vec_frame_stack.py
|
# Copyright (c) OpenAI
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is an extended version of
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_env.py
import contextlib
import os
from abc import ABC, abstractmethod
from baselines.common.tile_images import tile_images
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def step_env(self, actions, reset_random=False):
if reset_random:
self.step_env_reset_random_async(actions)
else:
self.step_env_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
class VecEnvObservationWrapper(VecEnvWrapper):
@abstractmethod
def process(self, obs):
pass
def reset(self):
obs = self.venv.reset()
return self.process(obs)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
return self.process(obs), rews, dones, infos
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
|
dcd-main
|
envs/wrappers/vec_env.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import gym
from .vec_env import VecEnvWrapper
class AdversarialObservationWrapper(gym.core.Wrapper):
def step(self, action):
observation, reward, done, info = self.env.step(action)
return self.agent_observation(observation), reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def reset_agent(self, **kwargs):
observation = self.env.reset_agent(**kwargs)
return self.agent_observation(observation)
def reset_random(self, **kwargs):
observation = self.env.reset_random(**kwargs)
return self.agent_observation(observation)
def reset_to_level(self, level, **kwargs):
observation = self.env.reset_to_level(level, **kwargs)
return self.agent_observation(observation)
def agent_observation(self, observation):
raise NotImplementedError
class VecPreprocessImageWrapper(VecEnvWrapper):
def __init__(self,
venv,
obs_key=None,
transpose_order=None,
scale=None,
channel_first=False,
to_tensor=True,
device=None):
super().__init__(venv)
self.is_dict_obs = isinstance(venv.observation_space, gym.spaces.Dict)
self.transpose_order = transpose_order
if self.transpose_order:
self.batch_transpose_order = [0,] + list([i + 1 for i in transpose_order])
else:
self.batch_transpose_order = None
self.obs_key = obs_key
self._obs_space = None
self._adversary_obs_space = None
self.to_tensor = to_tensor
self.device = device
# Colorspace parameters
self.scale = scale
self.channel_first = channel_first
self.channel_index = 1 if channel_first else -1
image_obs_space = self.venv.observation_space
if self.obs_key:
image_obs_space = image_obs_space[self.obs_key]
self.num_channels = image_obs_space.shape[self.channel_index]
delattr(self, 'observation_space')
def _obs_dict_to_tensor(self, obs):
for k in obs.keys():
if isinstance(obs[k], np.ndarray):
obs[k] = torch.from_numpy(obs[k]).float()
if self.device:
obs[k] = obs[k].to(self.device)
return obs
def _transpose(self, obs):
if len(obs.shape) == len(self.batch_transpose_order):
return obs.transpose(*self.batch_transpose_order)
else:
return obs.transpose(*self.transpose_order)
def _preprocess(self, obs, obs_key=None):
if obs_key is None:
if self.scale:
obs = obs/self.scale
if self.batch_transpose_order:
# obs = obs.transpose(*self.batch_transpose_order)
obs = self._transpose(obs)
if isinstance(obs, np.ndarray) and self.to_tensor:
obs = torch.from_numpy(obs).float()
if self.device:
obs = obs.to(self.device)
elif isinstance(obs, dict) and self.to_tensor:
obs = self._obs_dict_to_tensor(obs)
else:
if self.scale:
obs[self.obs_key] = obs[self.obs_key]/self.scale
if self.batch_transpose_order:
obs[self.obs_key] = self._transpose(obs[self.obs_key])
if 'full_obs' in obs:
obs['full_obs'] = self._transpose(obs['full_obs'])
if self.to_tensor:
obs = self._obs_dict_to_tensor(obs)
return obs
def _transpose_box_space(self, space):
if isinstance(space, gym.spaces.Box):
shape = np.array(space.shape)
shape = shape[self.transpose_order]
return gym.spaces.Box(
low=0,
high=255,
shape=shape,
dtype='uint8')
else:
raise ValueError('Expected gym.spaces.Box')
def _transpose_obs_space(self, obs_space):
if self.obs_key:
if isinstance(obs_space, gym.spaces.Dict):
keys = obs_space.spaces
else:
keys = obs_space.keys()
transposed_obs_space = {k:obs_space[k] for k in keys}
transposed_obs_space[self.obs_key] = \
self._transpose_box_space(transposed_obs_space[self.obs_key])
if 'full_obs' in transposed_obs_space:
transposed_obs_space['full_obs'] = \
self._transpose_box_space(transposed_obs_space['full_obs'])
else:
transposed_obs_space = self._transpose_box_space(obs_space)
return transposed_obs_space
# Public interface
def reset(self):
obs = self.venv.reset()
return self._preprocess(obs, obs_key=self.obs_key)
def reset_random(self):
obs = self.venv.reset_random()
return self._preprocess(obs, obs_key=self.obs_key)
def reset_agent(self):
obs = self.venv.reset_agent()
return self._preprocess(obs, obs_key=self.obs_key)
def reset_to_level(self, level, index):
obs = self.venv.reset_to_level(level, index)
return self._preprocess(obs, obs_key=self.obs_key)
def reset_to_level_batch(self, level):
obs = self.venv.reset_to_level_batch(level)
return self._preprocess(obs, obs_key=self.obs_key)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
obs = self._preprocess(obs, obs_key=self.obs_key)
for i, info in enumerate(infos):
if 'truncated_obs' in info:
truncated_obs = info['truncated_obs']
infos[i]['truncated_obs'] = \
self._preprocess(truncated_obs, obs_key=self.obs_key)
if self.to_tensor:
rews = torch.from_numpy(rews).unsqueeze(dim=1).float()
return obs, rews, dones, infos
def step_adversary(self, action):
obs, rews, dones, infos = self.venv.step_adversary(action)
obs = self._preprocess(obs, obs_key=self.obs_key)
if self.to_tensor:
rews = torch.from_numpy(rews).unsqueeze(dim=1).float()
return obs, rews, dones, infos
def get_observation_space(self):
if self._obs_space:
return self._obs_space
obs_space = self.venv.observation_space
if self.batch_transpose_order:
self._obs_space = self._transpose_obs_space(obs_space)
else:
self._obs_space = obs_space
return self._obs_space
def get_adversary_observation_space(self):
if self._adversary_obs_space:
return self._adversary_obs_space
adversary_obs_space = self.venv.adversary_observation_space
obs_space = self.venv.observation_space
same_shape = hasattr(adversary_obs_space, 'shape') and hasattr(obs_space, 'shape') and \
adversary_obs_space.shape == obs_space.shape
same_obs_key = self.obs_key and self.obs_key in adversary_obs_space
if self.batch_transpose_order and (same_shape or same_obs_key):
self._adversary_obs_space = self._transpose_obs_space(adversary_obs_space)
else:
self._adversary_obs_space = adversary_obs_space
return self._adversary_obs_space
def __getattr__(self, name):
if name == 'observation_space':
return self.get_observation_space()
elif name == 'adversary_observation_space':
return self.get_adversary_observation_space()
elif name == 'adversary_action_space':
return self.venv.get_adversary_action_space()
else:
return getattr(self.venv, name)
|
dcd-main
|
envs/wrappers/obs_wrappers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import gym
from gym import spaces
from gym_minigrid.minigrid import OBJECT_TO_IDX, COLOR_TO_IDX, STATE_TO_IDX
from .obs_wrappers import AdversarialObservationWrapper
class MultiGridFullyObsWrapper(AdversarialObservationWrapper):
"""
Fully observable gridworld using a compact grid encoding
"""
def __init__(self, env, is_adversarial=True):
super().__init__(env)
self.is_adversarial = is_adversarial
self.observation_space.spaces["full_obs"] = spaces.Box(
low=0,
high=255,
shape=(self.env.width, self.env.height, 3), # number of cells
dtype='uint8'
)
def agent_observation(self, obs):
env = self.unwrapped
full_grid = env.grid.encode()
# Note env.agent_pos is an array of length K, for K multigrid agents
if env.agent_pos[0] is not None:
full_grid[env.agent_pos[0][0]][env.agent_pos[0][1]] = np.array([
OBJECT_TO_IDX['agent'],
COLOR_TO_IDX['red'],
env.agent_dir[0]
])
obs['full_obs'] = full_grid
return obs
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
if self.is_adversarial:
return observation
else:
return self.agent_observation(observation)
|
dcd-main
|
envs/wrappers/multigrid_wrappers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .obs_wrappers import VecPreprocessImageWrapper, AdversarialObservationWrapper
from .parallel_wrappers import ParallelAdversarialVecEnv
from .time_limit import TimeLimit
from .vec_monitor import VecMonitor
from .vec_normalize import VecNormalize
from .vec_frame_stack import VecFrameStack
from .multigrid_wrappers import *
from .car_racing_wrappers import CarRacingWrapper
|
dcd-main
|
envs/wrappers/__init__.py
|
# Copyright (c) OpenAI
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is an extended version of
# https://github.com/openai/gym/blob/master/gym/wrappers/time_limit.py
import gym
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
if max_episode_steps is None and self.env.spec is not None:
max_episode_steps = env.spec.max_episode_steps
if self.env.spec is not None:
self.env.spec.max_episode_steps = max_episode_steps
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def step(self, action):
assert self._elapsed_steps is not None, "Cannot call env.step() before calling reset()"
observation, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
info['truncated'] = not done
info['truncated_obs'] = observation
done = True
return observation, reward, done, info
def reset(self):
self._elapsed_steps = 0
return self.env.reset()
def reset_random(self):
self._elapsed_steps = 0
return self.env.reset_random()
def reset_to_level(self, level):
self._elapsed_steps = 0
return self.env.reset_to_level(level)
def reset_agent(self):
self._elapsed_steps = 0
return self.env.reset_agent()
|
dcd-main
|
envs/wrappers/time_limit.py
|
# Copyright (c) 2019 Antonin Raffin
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is an extended version of
# https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/vec_env/vec_monitor.py
from .vec_env import VecEnvWrapper
from baselines.bench.monitor import ResultsWriter
import numpy as np
import time
from collections import deque
class VecMonitor(VecEnvWrapper):
def __init__(self, venv, filename=None, keep_buf=0, info_keywords=()):
VecEnvWrapper.__init__(self, venv)
self.eprets = None
self.eplens = None
self.epcount = 0
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(filename, header={'t_start': self.tstart},
extra_keys=info_keywords)
else:
self.results_writer = None
self.info_keywords = info_keywords
self.keep_buf = keep_buf
if self.keep_buf:
self.epret_buf = deque([], maxlen=keep_buf)
self.eplen_buf = deque([], maxlen=keep_buf)
def reset(self):
obs = self.venv.reset()
self.eprets = np.zeros(self.num_envs, 'f')
self.eplens = np.zeros(self.num_envs, 'i')
return obs
def reset_agent(self):
obs = self.venv.reset_agent()
self.eprets = np.zeros(self.num_envs, 'f')
self.eplens = np.zeros(self.num_envs, 'i')
return obs
def reset_random(self):
obs = self.venv.reset_random()
self.eprets = np.zeros(self.num_envs, 'f')
self.eplens = np.zeros(self.num_envs, 'i')
return obs
def reset_alp_gmm(self, level):
obs = self.venv.reset_alp_gmm(level)
self.eprets = np.zeros(self.num_envs, 'f')
self.eplens = np.zeros(self.num_envs, 'i')
return obs
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
self.eprets += rews
self.eplens += 1
newinfos = list(infos[:])
for i in range(len(dones)):
if dones[i]:
info = infos[i].copy()
ret = self.eprets[i]
eplen = self.eplens[i]
epinfo = {'r': ret, 'l': eplen, 't': round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
info['episode'] = epinfo
if self.keep_buf:
self.epret_buf.append(ret)
self.eplen_buf.append(eplen)
self.epcount += 1
self.eprets[i] = 0
self.eplens[i] = 0
if self.results_writer:
self.results_writer.write_row(epinfo)
newinfos[i] = info
return obs, rews, dones, newinfos
|
dcd-main
|
envs/wrappers/vec_monitor.py
|
# Copyright (c) 2019 Antonin Raffin
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is an extended version of
# https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/vec_env/vec_normalize.py
from .vec_env import VecEnvWrapper
import numpy as np
class VecNormalize(VecEnvWrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8, use_tf=False):
VecEnvWrapper.__init__(self, venv)
if use_tf:
from baselines.common.running_mean_std import TfRunningMeanStd
self.ob_rms = TfRunningMeanStd(shape=self.observation_space.shape, scope='ob_rms') if ob else None
self.ret_rms = TfRunningMeanStd(shape=(), scope='ret_rms') if ret else None
else:
from baselines.common.running_mean_std import RunningMeanStd
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
self.ret[news] = 0.
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
self.ret = np.zeros(self.num_envs)
obs = self.venv.reset()
return self._obfilt(obs)
def reset_agent(self):
self.ret = np.zeros(self.num_envs)
obs = self.venv.reset_agent()
return self._obfilt(obs)
def reset_random(self):
self.ret = np.zeros(self.num_envs)
obs = self.venv.reset_random()
return self._obfilt(obs)
def reset_alp_gmm(self, level):
self.ret = np.zeros(self.num_envs)
obs = self.venv.reset_alp_gmm(level)
return self._obfilt(obs)
|
dcd-main
|
envs/wrappers/vec_normalize.py
|
# Copyright (c) 2019 Antonin Raffin
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is a heavily modified version of
# https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/vec_env/subproc_vec_env.py
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper
from baselines.common.vec_env.vec_env import clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
def step(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
def step_env(env, action, reset_random=False):
ob, reward, done, info = env.step(action)
if done:
if reset_random:
env.reset_random()
ob = env.reset_agent()
else:
ob = env.reset_agent()
return ob, reward, done, info
def get_env_attr(env, attr):
if hasattr(env, attr):
return getattr(env, attr)
while hasattr(env, 'env'):
env = env.env
if hasattr(env, attr):
return getattr(env, attr)
return None
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
remote.send([step(env, action) for env, action in zip(envs, data)])
elif cmd == 'step_env':
remote.send([step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'step_env_reset_random':
remote.send([step_env(env, action, reset_random=True) for env, action in zip(envs, data)])
elif cmd == 'observation_space':
remote.send(envs[0].observation_space)
elif cmd == 'adversary_observation_space':
remote.send(envs[0].adversary_observation_space)
elif cmd == 'adversary_action_space':
remote.send(envs[0].adversary_action_space)
elif cmd == 'max_steps':
remote.send(envs[0].max_steps)
elif cmd == 'render':
remote.send([env.render(mode='level') for env in envs])
elif cmd == 'render_to_screen':
remote.send([envs[0].render('human')])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec)))
elif cmd == 'reset_to_level':
remote.send([envs[0].reset_to_level(data)])
elif cmd == 'reset_alp_gmm':
remote.send([envs[0].reset_alp_gmm(data)])
elif cmd == 'max_episode_steps':
max_episode_steps = get_env_attr(envs[0], '_max_episode_steps')
remote.send(max_episode_steps)
elif hasattr(envs[0], cmd):
attrs = [getattr(env, cmd) for env in envs]
is_callable = hasattr(attrs[0], '__call__')
if is_callable:
if not hasattr(data, '__len__'):
data = [data]*len(attrs)
remote.send([attr(d) if d is not None else attr() for attr, d in zip(attrs, data)])
else:
remote.send([attr for attr in attrs])
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn', in_series=1, is_eval=False):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv().x
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
# Get processed action dim
self.is_eval = is_eval
self.processed_action_dim = 1
if not is_eval:
self.remotes[0].send(('processed_action_dim', None))
self.processed_action_dim = self.remotes[0].recv()[0]
def step_async(self, action):
self._assert_not_closed()
action = np.array_split(action, self.nremotes)
for remote, action in zip(self.remotes, action):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_complexity_info(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('get_complexity_info', None))
info = [remote.recv() for remote in self.remotes]
info = _flatten_list(info)
return info
def get_images(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('render', None))
imgs = [remote.recv() for remote in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def render_to_screen(self):
self._assert_not_closed()
self.remotes[0].send(('render_to_screen', None))
return self.remotes[0].recv()
def max_episode_steps(self):
self._assert_not_closed()
self.remotes[0].send(('max_episode_steps', None))
return self.remotes[0].recv()
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
class ParallelAdversarialVecEnv(SubprocVecEnv):
def __init__(self, env_fns, adversary=True, is_eval=False):
super().__init__(env_fns, is_eval=is_eval)
action_space = self.action_space
if action_space.__class__.__name__ == 'Box':
self.action_dim = action_space.shape[0]
else:
self.action_dim = 1
self.adv_action_dim = 0
if adversary:
adv_action_space = self.adversary_action_space
if adv_action_space.__class__.__name__ == 'Box':
self.adv_action_dim = adv_action_space.shape[0]
else:
self.adv_action_dim = 1
def _should_expand_action(self, action, adversary=False):
if not adversary:
action_dim = self.action_dim
else:
action_dim = self.adv_action_dim
# print('expanding actions?', action_dim>1, flush=True)
return action_dim > 1 or self.processed_action_dim > 1
def seed_async(self, seed, index):
self._assert_not_closed()
self.remotes[index].send(('seed', seed))
self.waiting = True
def seed_wait(self, index):
self._assert_not_closed()
obs = self.remotes[index].recv()
self.waiting = False
return obs
def seed(self, seed, index):
self.seed_async(seed, index)
return self.seed_wait(index)
def level_seed_async(self, index):
self._assert_not_closed()
self.remotes[index].send(('level_seed', None))
self.waiting = True
def level_seed_wait(self, index):
self._assert_not_closed()
level_seed = self.remotes[index].recv()
self.waiting = False
return level_seed
def level_seed(self, index):
self.level_seed_async(index)
return self.level_seed_wait(index)
# step_adversary
def step_adversary(self, action):
if self._should_expand_action(action, adversary=True):
action = np.expand_dims(action, 1)
self.step_adversary_async(action)
return self.step_wait()
def step_adversary_async(self, action):
self._assert_not_closed()
[remote.send(('step_adversary', a)) for remote, a in zip(self.remotes, action)]
self.waiting = True
def step_env_async(self, action):
self._assert_not_closed()
if self._should_expand_action(action):
action = np.expand_dims(action, 1)
[remote.send(('step_env', a)) for remote, a in zip(self.remotes, action)]
self.waiting = True
def step_env_reset_random_async(self, action):
self._assert_not_closed()
if self._should_expand_action(action):
action = np.expand_dims(action, 1)
[remote.send(('step_env_reset_random', a)) for remote, a in zip(self.remotes, action)]
self.waiting = True
# reset_agent
def reset_agent(self):
self._assert_not_closed()
[remote.send(('reset_agent', None)) for remote in self.remotes]
self.waiting = True
obs = [remote.recv() for remote in self.remotes]
self.waiting = False
obs = _flatten_list(obs)
return _flatten_obs(obs)
# reset_random
def reset_random(self):
self._assert_not_closed()
[remote.send(('reset_random', None)) for remote in self.remotes]
self.waiting = True
obs = [remote.recv() for remote in self.remotes]
self.waiting = False
obs = _flatten_list(obs)
return _flatten_obs(obs)
# reset_to_level
def reset_to_level(self, level, index):
self._assert_not_closed()
self.remotes[index].send(('reset_to_level', level))
self.waiting = True
obs = self.remotes[index].recv()
self.waiting = False
return _flatten_obs(obs)
def reset_to_level_batch(self, level):
self._assert_not_closed()
[remote.send(('reset_to_level', level[i])) for i, remote in enumerate(self.remotes)]
self.waiting = True
obs = [remote.recv() for remote in self.remotes]
self.waiting = False
obs = _flatten_list(obs)
return _flatten_obs(obs)
# mutate level
def mutate_level(self, num_edits):
self._assert_not_closed()
[remote.send(('mutate_level', num_edits)) for _, remote in enumerate(self.remotes)]
self.waiting = True
obs = [remote.recv() for remote in self.remotes]
self.waiting = False
obs = _flatten_list(obs)
return _flatten_obs(obs)
# observation_space
def get_observation_space(self):
self._assert_not_closed()
self.remotes[0].send(('observation_space', None))
obs_space = self.remotes[0].recv()
if hasattr(obs_space, 'spaces'):
obs_space = obs_space.spaces
return obs_space
# adversary_observation_space
def get_adversary_observation_space(self):
self._assert_not_closed()
self.remotes[0].send(('adversary_observation_space', None))
obs_space = self.remotes[0].recv()
if hasattr(obs_space, 'spaces'):
obs_space = obs_space.spaces
return obs_space
def get_adversary_action_space(self):
self._assert_not_closed()
self.remotes[0].send(('adversary_action_space', None))
action_dim = self.remotes[0].recv()
return action_dim
def get_max_episode_steps(self):
self._assert_not_closed()
self.remotes[0].send(('max_episode_steps', None))
self.waiting = True
max_episode_steps = self.remotes[0].recv()
self.waiting = False
return max_episode_steps
# Generic getter
def remote_attr(self, name, data=None, flatten=False, index=None):
self._assert_not_closed()
if index is None or len(index) == 0:
remotes = self.remotes
else:
remotes = [self.remotes[i] for i in index]
if hasattr(data, '__len__'):
assert len(data) == len(remotes)
[remote.send((name, d)) for remote, d in zip(remotes, data)]
else:
[remote.send((name, data)) for remote in remotes]
self.waiting = True
result = [remote.recv() for remote in remotes]
self.waiting = False
return _flatten_list(result) if flatten else result
def get_seed(self):
return self.remote_attr('seed_value', flatten=True)
def set_seed(self, seeds):
return self.remote_attr('seed', data=seeds, flatten=True)
def get_level(self):
levels = self.remote_attr('level')
return [l[0] for l in levels] # flatten
def get_encodings(self, index=None):
return self.remote_attr('encoding', flatten=True, index=index)
# Navigation-specific
def get_distance_to_goal(self):
return self.remote_attr('distance_to_goal', flatten=True)
def get_passable(self):
return self.remote_attr('passable', flatten=True)
def get_shortest_path_length(self):
return self.remote_attr('shortest_path_length', flatten=True)
# ALP-GMM-specific
def reset_alp_gmm(self, levels):
self._assert_not_closed()
[remote.send(('reset_alp_gmm', levels[i])) for i, remote in enumerate(self.remotes)]
self.waiting = True
self._assert_not_closed()
obs = [remote.recv() for remote in self.remotes]
self.waiting = False
obs = _flatten_list(obs)
return _flatten_obs(obs)
# === Multigrid-specific ===
def get_num_blocks(self):
return self.remote_attr('n_clutter_placed', flatten=True)
def __getattr__(self, name):
if name == 'observation_space':
return self.get_observation_space()
elif name == 'adversary_observation_space':
return self.get_adversary_observation_space()
elif name == 'adversary_action_space':
return self.get_adversary_action_space()
elif name == 'max_steps':
return self.get_max_steps()
else:
return self.__getattribute__(name)
|
dcd-main
|
envs/wrappers/parallel_wrappers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
import numpy as np
import torch
import gym
from .vec_env import VecEnvWrapper
class CarRacingWrapper(gym.Wrapper):
def __init__(self,
env,
grayscale=True,
reward_shaping=True,
sparse_rewards=False,
early_termination=True,
timelimit_bonus=True,
num_action_repeat=8,
nstack=1,
channel_first=False,
crop=True,
eval_=False):
super().__init__(env)
self.eval_ = eval_
self.grayscale = grayscale
self.reward_shaping = reward_shaping
self.sparse_rewards = sparse_rewards
self.num_action_repeat = num_action_repeat
self.early_termination = early_termination
self.timelimit_bonus = timelimit_bonus
self.crop = crop
self.nstack = nstack
self.channel_first = channel_first
self.reset_reward_history()
self.set_observation_space()
if self.sparse_rewards:
self.accumulated_rewards = 0.0
def reset_reward_history(self):
if self.early_termination:
self.reward_history = deque([0]*100,maxlen=100)
if self.sparse_rewards:
self.accumulated_rewards = 0.0
def _preprocess(self, obs):
# Crop
if self.crop:
obs = obs[:-12, 6:-6]
# Grayscale
if self.grayscale:
obs = np.expand_dims(np.dot(obs[..., :], [0.299, 0.587, 0.114]), -1)
obs = obs/128. - 1.
return obs
@property
def _average_reward(self):
return np.mean(self.reward_history)
def _reset_stack(self, obs):
if self.nstack > 1:
self.stack = [obs] * self.nstack # four frames for decision
obs = np.concatenate(self.stack, axis=-1)
return obs
def _transpose(self, obs):
if self.channel_first:
obs = np.swapaxes(obs, 0, 2)
obs = np.swapaxes(obs, 1, 2)
return obs
# Public interface
def reset(self):
self.reset_reward_history()
if self.eval_:
obs = self.env.reset()
obs = self._preprocess(obs)
obs = self._reset_stack(obs)
obs = self._transpose(obs)
return obs
else:
return self.env.reset()
def reset_random(self):
self.reset_reward_history()
obs = self.env.reset_random()
obs = self._preprocess(obs)
obs = self._reset_stack(obs)
obs = self._transpose(obs)
return obs
def reset_agent(self):
self.reset_reward_history()
obs = self.env.reset_agent()
obs = self._preprocess(obs)
obs = self._reset_stack(obs)
obs = self._transpose(obs)
return obs
def reset_to_level(self, level):
self.reset_reward_history()
obs = self.env.reset_to_level(level)
obs = self._preprocess(obs)
obs = self._reset_stack(obs)
obs = self._transpose(obs)
return obs
def step(self, action):
done = False
total_reward = 0
for i in range(self.num_action_repeat):
obs, reward, die, info = self.env.step(action)
if self.reward_shaping:
# Don't penalize "done state"
if die:
if self.timelimit_bonus:
reward += 100
# Green penalty
if np.mean(obs[:, :, 1]) > 185.0:
reward -= 0.05
if self.early_termination:
self.reward_history.append(reward)
done = True if self._average_reward <= -0.1 else False
total_reward += reward
# If no reward recently, end the episode
if done or die:
break
obs = self._preprocess(obs)
if self.nstack > 1:
self.stack.pop(0)
self.stack.append(obs)
obs = np.concatenate(self.stack, axis=-1)
obs = self._transpose(obs)
if self.sparse_rewards:
if self.env.goal_reached:
revealed_reward = self.accumulated_rewards
self.accumulated_rewards = 0.0
else:
self.accumulated_rewards += total_reward
revealed_reward = 0.0
else:
revealed_reward = total_reward
# obs = np.expand_dims(obs, 0)
return obs, revealed_reward, done or die, info
def set_observation_space(self):
obs_space = self.env.observation_space
num_channels = 1 if self.grayscale else 3
if self.nstack > 1:
num_channels *= self.nstack
# Cropped and potentially grayscaled observation
if self.crop:
obs_shape = (obs_space.shape[0] - 12, obs_space.shape[1] - 12, num_channels)
else:
obs_shape = (obs_space.shape[0], obs_space.shape[1], num_channels)
if self.channel_first:
obs_shape = (obs_shape[2], obs_shape[0], obs_shape[1])
self.observation_space = gym.spaces.Box(
low=-1,
high=1,
shape=obs_shape,
dtype='float32')
return self.observation_space
|
dcd-main
|
envs/wrappers/car_racing_wrappers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gym
from envs.registration import register as gym_register
env_list = []
def register(env_id, entry_point, reward_threshold=0.95, max_episode_steps=None):
assert env_id.startswith("MultiGrid-")
if env_id in env_list:
del gym.envs.registry.env_specs[id]
else:
env_list.append(id)
kwargs = dict(
id=env_id,
entry_point=entry_point,
reward_threshold=reward_threshold
)
if max_episode_steps:
kwargs.update({'max_episode_steps':max_episode_steps})
gym_register(**kwargs)
|
dcd-main
|
envs/bipedalwalker/register.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import gym
import time
import numpy as np
import torch
from gym.envs.box2d import BipedalWalker, BipedalWalkerHardcore
from .walker_env import EnvConfig, BipedalWalkerCustom
from envs.registration import register as gym_register
"""
actions
1. ground_roughness
2,3. pit_gap
stump_width (fixed)
4,5. stump_height
stump_float (fixed)
6,7 stair_height
stair_width (fixed)
8 stair_steps
"""
PARAM_RANGES_DEBUG = {
1: [0,0.01], # ground roughness
2: [0,0], # pit gap 1
3: [0.01,0.01], # pit gap 2
4: [0,0], # stump height 1
5: [0.01,0.01], # stump height 2
6: [0,0], # stair height 1
7: [0.01,0.01], # stair height 2
8: [1,1], # stair steps
}
PARAM_RANGES_EASY = {
1: [0,0.6], # ground roughness
2: [0,0], # pit gap 1
3: [0.8,0.8], # pit gap 2
4: [0,0], # stump height 1
5: [0.4,0.4], # stump height 2
6: [0,0], # stair height 1
7: [0.4,0.4], # stair height 2
8: [1,1], # stair steps
}
PARAM_RANGES_FULL = {
1: [0,10], # ground roughness
2: [0,10], # pit gap 1
3: [0,10], # pit gap 2
4: [0,5], # stump height 1
5: [0,5], # stump height 2
6: [0,5], # stair height 1
7: [0,5], # stair height 2
8: [1,9], # stair steps
}
PARAM_MUTATIONS = {
1: [0,0.6], # ground roughness
2: [0.4], # pit gap 1
3: [0.4], # pit gap 2
4: [0.2], # stump height 1
5: [0.2], # stump height 2
6: [0.2], # stair height 1
7: [0.2], # stair height 2
8: [1], # stair steps
}
DEFAULT_LEVEL_PARAMS_VEC = [0,0,10,0,5,0,5,9]
STUMP_WIDTH_RANGE = [1, 2]
STUMP_FLOAT_RANGE = [0, 1]
STAIR_WIDTH_RANGE = [4, 5]
def rand_int_seed():
return int.from_bytes(os.urandom(4), byteorder="little")
class BipedalWalkerAdversarialEnv(BipedalWalkerCustom):
def __init__(self, mode='full', poet=False, random_z_dim=10, seed=0):
self.mode = mode
self.level_seed = seed
self.poet = poet # POET didn't use the stairs, not clear why
default_config = EnvConfig(
name='default_conf',
ground_roughness=0,
pit_gap=[0,10],
stump_width=[4,5],
stump_height=[0,5],
stump_float=[0,1],
stair_height=[0,5],
stair_width=[4,5],
stair_steps=[1])
super().__init__(default_config, seed=seed)
if self.poet:
self.adversary_max_steps = 5
else:
self.adversary_max_steps = 8
self.random_z_dim = random_z_dim
self.passable = True
# Level vec is the *tunable* UED params
self.level_params_vec = DEFAULT_LEVEL_PARAMS_VEC
if self.poet:
self.level_params_vec = self.level_params_vec[:5]
self._update_params(self.level_params_vec)
if poet:
self.mutations = {k:v for k,v in list(PARAM_MUTATIONS.items())[:5]}
else:
self.mutations = PARAM_MUTATIONS
n_u_chars = max(12, len(str(rand_int_seed())))
self.encoding_u_chars = np.dtype(('U', n_u_chars))
# Fixed params
self.stump_width = STUMP_WIDTH_RANGE
self.stump_float = STUMP_FLOAT_RANGE
self.stair_width = STAIR_WIDTH_RANGE
# Create spaces for adversary agent's specs.
self.adversary_action_dim = 1
self.adversary_action_space = gym.spaces.Box(low=-1, high=1, shape=(1,), dtype=np.float32)
self.adversary_ts_obs_space = \
gym.spaces.Box(
low=0,
high=self.adversary_max_steps,
shape=(1,),
dtype='uint8')
self.adversary_randomz_obs_space = \
gym.spaces.Box(
low=0,
high=1.0,
shape=(random_z_dim,),
dtype=np.float32)
self.adversary_image_obs_space = \
gym.spaces.Box(
low=0,
high=10.0,
shape=(len(self.level_params_vec),),
dtype=np.float32)
self.adversary_observation_space = \
gym.spaces.Dict({
'image': self.adversary_image_obs_space,
'time_step': self.adversary_ts_obs_space,
'random_z': self.adversary_randomz_obs_space})
def reset(self):
self.step_count = 0
self.adversary_step_count = 0
# Reset to default parameters
self.level_params_vec = DEFAULT_LEVEL_PARAMS_VEC
if self.poet:
self.level_params_vec = self.level_params_vec[:5]
self._update_params(self.level_params_vec)
self.level_seed = rand_int_seed()
obs = {
'image': self.get_obs(),
'time_step': [self.adversary_step_count],
'random_z': self.generate_random_z()
}
return obs
def get_obs(self):
## vector of *tunable* environment params
obs = []
obs += [self.ground_roughness]
obs += self.pit_gap
obs += self.stump_height
if not self.poet:
obs += self.stair_height
obs += self.stair_steps
return np.array(obs)
def reset_agent(self):
super().seed(self.level_seed)
obs = super()._reset_env()
return obs
def _update_params(self, level_params_vec):
self.ground_roughness = level_params_vec[0]
self.pit_gap = [level_params_vec[1],level_params_vec[2]]
self.pit_gap.sort()
self.stump_height = [level_params_vec[3],level_params_vec[4]]
self.stump_height.sort()
if self.poet:
self.stair_height = []
self.stair_steps = []
else:
self.stair_height = [level_params_vec[5],level_params_vec[6]]
self.stair_height.sort()
self.stair_steps = [int(round(level_params_vec[7]))]
def get_complexity_info(self):
complexity_info = {
'ground_roughness': self.ground_roughness,
'pit_gap_low': self.pit_gap[0],
'pit_gap_high': self.pit_gap[1],
'stump_height_low': self.stump_height[0],
'stump_height_high': self.stump_height[1]
}
if not self.poet:
complexity_info['stair_height_low'] = self.stair_height[0]
complexity_info['stair_height_high'] = self.stair_height[1]
complexity_info['stair_steps'] = self.stair_steps[0]
return complexity_info
def get_config(self):
"""
Gets the config to use to create the level.
If the range is zer or below a min threshold, we put blank entries.
"""
if self.stump_height[1] < 0.2:
stump_height = []
stump_width = []
stump_float = []
else:
stump_height = self.stump_height
stump_width = self.stump_width
stump_float = self.stump_float
if self.pit_gap[1] < 0.8:
pit_gap = []
else:
pit_gap = self.pit_gap
if self.poet:
stair_height = []
stair_width = []
stair_steps = []
elif self.stair_height[1] < 0.2:
stair_height = []
stair_width = []
stair_steps = []
else:
stair_height = self.stair_height
stair_width = self.stair_width
stair_steps = self.stair_steps
# get the current config
config = EnvConfig(
name='config',
ground_roughness=self.ground_roughness,
pit_gap=pit_gap,
stump_width=stump_width,
stump_height=stump_height,
stump_float=stump_float,
stair_height=stair_height,
stair_width=stair_width,
stair_steps=stair_steps)
return config
def _reset_env_config(self):
"""
Resets the environment based on current level encoding.
"""
config = self.get_config()
try:
super().re_init(config, self.level_seed)
except AssertionError:
super().re_init(config, self.level_seed+1)
def reset_to_level(self, level, editing=False):
self.reset()
if isinstance(level, str):
encoding = list(np.fromstring(level))
else:
encoding = [float(x) for x in level[:-1]] + [int(level[-1])]
assert len(level) == len(self.level_params_vec) + 1, \
f'Level input is the wrong length.'
self.level_params_vec = encoding[:-1]
self._update_params(self.level_params_vec)
self._reset_env_config()
self.level_seed = int(level[-1])
return self.reset_agent()
@property
def param_ranges(self):
if self.mode == 'easy':
param_ranges = PARAM_RANGES_EASY
elif self.mode == 'full':
param_ranges = PARAM_RANGES_FULL
elif self.mode == 'debug':
param_ranges = PARAM_RANGES_DEBUG
else:
raise ValueError("Mode must be 'easy' or 'full'")
return param_ranges
@property
def encoding(self):
enc = self.level_params_vec + [self.level_seed]
enc = [str(x) for x in enc]
return np.array(enc, dtype=self.encoding_u_chars)
@property
def level(self):
return self.encoding
def reset_random(self):
"""
Must reset randomly as step_adversary would otherwise do
"""
# action will be between [-1,1]
# this maps to a range, depending on the index
param_ranges = self.param_ranges
rand_norm_params = np.random.rand(len(param_ranges))
self.level_params_vec = \
[rand_norm_params[i]*(param_range[1]-param_range[0]) + param_range[0]
for i,param_range in enumerate(param_ranges.values())]
self._update_params(self.level_params_vec)
self.level_seed = rand_int_seed()
self._reset_env_config()
return self.reset_agent()
def reset_alp_gmm(self, level):
self.reset()
level = list(level)
param_ranges = self.param_ranges
for idx, action in enumerate(level):
val_range = param_ranges[idx + 1]
action -= 1
value = ((action + 1)/2) * (val_range[1]-val_range[0]) + val_range[0]
# update the level vec
self.level_params_vec[idx] = value
self.level_seed = rand_int_seed()
self._update_params(self.level_params_vec)
self._reset_env_config()
obs = {
'image': self.level_params_vec,
'time_step': [self.adversary_step_count],
'random_z': self.generate_random_z()
}
return obs
@property
def processed_action_dim(self):
return 1
def generate_random_z(self):
return np.random.uniform(size=(self.random_z_dim,)).astype(np.float32)
def mutate_level(self, num_edits=1):
if num_edits > 0:
# Perform mutations on current level vector
param_ranges = self.param_ranges
edit_actions = np.random.randint(1, len(self.mutations) + 1, num_edits)
edit_dirs = np.random.randint(0, 3, num_edits) - 1
# Update level_params_vec
for a,d in zip(edit_actions, edit_dirs):
mutation_range = self.mutations[a]
if len(mutation_range) == 1:
mutation = d*mutation_range[0]
elif len(mutation_range) == 2:
mutation = d*np.random.uniform(*mutation_range)
self.level_params_vec[a-1] = \
np.clip(self.level_params_vec[a-1]+mutation,
*PARAM_RANGES_FULL[a])
self.level_seed = rand_int_seed()
self._update_params(self.level_params_vec)
self._reset_env_config()
return self.reset_agent()
def step_adversary(self, action):
# action will be between [-1,1]
# this maps to a range, depending on the index
param_ranges = self.param_ranges
val_range = param_ranges[self.adversary_step_count+1]
if torch.is_tensor(action):
action = action.item()
# get unnormalized value from the action
value = ((action + 1)/2) * (val_range[1]-val_range[0]) + val_range[0]
# update the level vec
self.level_params_vec[self.adversary_step_count] = value
self.adversary_step_count += 1
if self.adversary_step_count >= self.adversary_max_steps:
self.level_seed = rand_int_seed()
self._update_params(self.level_params_vec)
self._reset_env_config()
done=True
else:
done=False
obs = {
'image': self.level_params_vec,
'time_step': [self.adversary_step_count],
'random_z': self.generate_random_z()
}
return obs, 0, done, {}
class BipedalWalkerDev(BipedalWalker):
def __init__(self, random_z_dim=5):
super().__init__()
self.adversary_action_space = gym.spaces.Box(low=-1, high=1, shape=(1,), dtype=np.float32)
self.adversary_max_steps = 5
self.level_params_vec = [0]
self.adversary_ts_obs_space = \
gym.spaces.Box(
low=0,
high=self.adversary_max_steps,
shape=(1,),
dtype='uint8')
self.adversary_randomz_obs_space = \
gym.spaces.Box(
low=0,
high=1.0,
shape=(random_z_dim,),
dtype=np.float32)
self.adversary_image_obs_space = \
gym.spaces.Box(
low=0,
high=10.0,
shape=(len(self.level_params_vec),),
dtype=np.float32)
self.adversary_observation_space = \
gym.spaces.Dict({
'image': self.adversary_image_obs_space,
'time_step': self.adversary_ts_obs_space,
'random_z': self.adversary_randomz_obs_space})
def reset_random(self):
seed = rand_int_seed()
super().seed(seed)
return super().reset()
def reset_agent(self):
return super().reset()
def step_adversary(self):
pass
@property
def processed_action_dim(self):
return 1
def get_complexity_info(self):
complexity_info = {
'ground_roughness': 0,
}
return complexity_info
class BipedalWalkerHC(BipedalWalkerHardcore):
def __init__(self, random_z_dim=5, seed=0):
super().__init__()
self.adversary_action_space = gym.spaces.Box(low=-1, high=1, shape=(1,), dtype=np.float32)
self.adversary_max_steps = 5
self.level_params_vec = [0]
self.adversary_ts_obs_space = \
gym.spaces.Box(
low=0,
high=self.adversary_max_steps,
shape=(1,),
dtype='uint8')
self.adversary_randomz_obs_space = \
gym.spaces.Box(
low=0,
high=1.0,
shape=(random_z_dim,),
dtype=np.float32)
self.adversary_image_obs_space = \
gym.spaces.Box(
low=0,
high=10.0,
shape=(len(self.level_params_vec),),
dtype=np.float32)
self.adversary_observation_space = \
gym.spaces.Dict({
'image': self.adversary_image_obs_space,
'time_step': self.adversary_ts_obs_space,
'random_z': self.adversary_randomz_obs_space})
self.adversary_editor_action_space = gym.spaces.MultiDiscrete([3, 3])
def reset_random(self):
seed = rand_int_seed()
super().seed(seed)
return super().reset()
def reset_agent(self):
return super().reset()
def step_adversary(self):
pass
@property
def processed_action_dim(self):
return 1
def get_complexity_info(self):
complexity_info = {
'ground_roughness': 0,
}
return complexity_info
class BipedalWalkerFull(BipedalWalkerAdversarialEnv):
def __init__(self, seed=0):
super().__init__(mode='full', seed=seed)
class BipedalWalkerEasy(BipedalWalkerAdversarialEnv):
def __init__(self, seed=0):
super().__init__(mode='easy', seed=seed)
class BipedalWalkerDebug(BipedalWalkerDev):
def __init__(self, seed=0):
super().__init__()
class BipedalWalkerPOET(BipedalWalkerAdversarialEnv):
def __init__(self, seed=0):
super().__init__(mode='full', poet=True, seed=seed)
class BipedalWalkerEasyPOET(BipedalWalkerAdversarialEnv):
def __init__(self, seed=0):
super().__init__(mode='easy', poet=True, seed=seed)
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
gym_register(id='BipedalWalker-Adversarial-v0',
entry_point=module_path + ':BipedalWalkerFull',
max_episode_steps=2000)
gym_register(id='BipedalWalker-Adversarial-Easy-v0',
entry_point=module_path + ':BipedalWalkerEasy',
max_episode_steps=2000)
gym_register(id='BipedalWalker-Vanilla-v0',
entry_point=module_path + ':BipedalWalkerDebug',
max_episode_steps=2000)
gym_register(id='BipedalWalker-HC-v0',
entry_point=module_path + ':BipedalWalkerHC',
max_episode_steps=2000)
gym_register(id='BipedalWalker-POET-v0',
entry_point=module_path + ':BipedalWalkerPOET',
max_episode_steps=2000)
gym_register(id='BipedalWalker-POET-Easy-v0',
entry_point=module_path + ':BipedalWalkerEasyPOET',
max_episode_steps=2000)
|
dcd-main
|
envs/bipedalwalker/adversarial.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .adversarial import BipedalWalkerAdversarialEnv
from .walker_test_envs import BipedalWalkerDefault
import pandas as pd
BIPEDALWALKER_POET_DF_COLUMNS = \
['roughness',
'pitgap_low',
'pitgap_high',
'stumpheight_low',
'stumpheight_high',
'seed']
BIPEDALWALKER_DF_COLUMNS = \
['roughness',
'pitgap_low',
'pitgap_high',
'stumpheight_low',
'stumpheight_high',
'stairheight_low',
'stairheight_high',
'stair_steps',
'seed']
def bipedalwalker_df_from_encodings(env_name, encodings):
df = pd.DataFrame(encodings)
if 'POET' in env_name:
df.columns = BIPEDALWALKER_POET_DF_COLUMNS
else:
df.columns = BIPEDALWALKER_DF_COLUMNS
return df
|
dcd-main
|
envs/bipedalwalker/__init__.py
|
# Copyright (c) OpenAI
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# This file is an extended version of
# https://github.com/openai/gym/blob/master/gym/envs/box2d/bipedal_walker.py
import sys
import math
import numpy as np
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef,
polygonShape, revoluteJointDef, contactListener)
import gym
from gym import spaces
from gym.utils import colorize, seeding
from collections import namedtuple
EnvConfig = namedtuple('EnvConfig', [
'name',
'ground_roughness',
'pit_gap',
'stump_width', 'stump_height', 'stump_float',
'stair_height', 'stair_width', 'stair_steps'
])
FPS = 50
SCALE = 30.0 # Affects how fast-paced the game is, forces should be adjusted as well
MOTORS_TORQUE = 80
SPEED_HIP = 4
SPEED_KNEE = 6
LIDAR_RANGE = 160 / SCALE
INITIAL_RANDOM = 5
HULL_POLY = [
(-30, +9), (+6, +9), (+34, +1),
(+34, -8), (-30, -8)
]
LEG_DOWN = -8 / SCALE
LEG_W, LEG_H = 8 / SCALE, 34 / SCALE
VIEWPORT_W = 600
VIEWPORT_H = 400
TERRAIN_STEP = 14 / SCALE
TERRAIN_LENGTH = 200 # in steps
TERRAIN_HEIGHT = VIEWPORT_H / SCALE / 4
TERRAIN_GRASS = 10 # low long are grass spots, in steps
TERRAIN_STARTPAD = 20 # in steps
FRICTION = 2.5
HULL_FD = fixtureDef(
shape=polygonShape(vertices=[(x / SCALE, y / SCALE)
for x, y in HULL_POLY]),
density=5.0,
friction=0.1,
categoryBits=0x0020,
maskBits=0x001, # collide only with ground
restitution=0.0) # 0.99 bouncy
LEG_FD = fixtureDef(
shape=polygonShape(box=(LEG_W / 2, LEG_H / 2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
LOWER_FD = fixtureDef(
shape=polygonShape(box=(0.8 * LEG_W / 2, LEG_H / 2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
STAIR_HEIGHT_EPS = 1e-2
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if self.env.hull == contact.fixtureA.body or self.env.hull == contact.fixtureB.body:
self.env.game_over = True
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = True
def EndContact(self, contact):
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = False
class BipedalWalkerCustom(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': FPS
}
def __repr__(self):
return "{}\nenv\n{}".format(self.__dict__, self.__dict__["np_random"].get_state())
def __init__(self, env_config, seed=None):
self.spec = None
self.set_env_config(env_config)
self.env_params = None
self.env_seed = seed
self._seed(seed)
self.viewer = None
self.world = Box2D.b2World()
self.terrain = None
self.hull = None
self.prev_shaping = None
self.fd_polygon = fixtureDef(
shape=polygonShape(vertices=[(0, 0),
(1, 0),
(1, -1),
(0, -1)]),
friction=FRICTION)
self.fd_edge = fixtureDef(
shape=edgeShape(vertices=[(0, 0),
(1, 1)]),
friction=FRICTION,
categoryBits=0x0001,
)
self._reset_env()
high = np.array([np.inf] * 24)
self.action_space = spaces.Box(
np.array([-1, -1, -1, -1]), np.array([+1, +1, +1, +1]))
self.observation_space = spaces.Box(-high, high)
def re_init(self, env_config, seed):
self.spec = None
self.set_env_config(env_config)
self._seed(seed)
self.env_params = None
self.world = Box2D.b2World()
self.terrain = None
self.hull = None
self.prev_shaping = None
self.fd_polygon = fixtureDef(
shape=polygonShape(vertices=[(0, 0),
(1, 0),
(1, -1),
(0, -1)]),
friction=FRICTION)
self.fd_edge = fixtureDef(
shape=edgeShape(vertices=[(0, 0),
(1, 1)]),
friction=FRICTION,
categoryBits=0x0001,
)
self._reset_env()
def set_env_config(self, env_config):
self.config = env_config
def augment(self, params):
self.env_params = params
def _set_terrain_number(self):
self.hardcore = False
self.GRASS = 0
self.STUMP, self.STAIRS, self.PIT = -1, -1, -1
self._STATES_ = 1
if self.config.stump_width and self.config.stump_height and self.config.stump_float:
# STUMP exist
self.STUMP = self._STATES_
self._STATES_ += 1
if self.config.stair_height and self.config.stair_width and self.config.stair_steps:
# STAIRS exist
self.STAIRS = self._STATES_
self._STATES_ += 1
if self.config.pit_gap:
# PIT exist
self.PIT = self._STATES_
self._STATES_ += 1
if self._STATES_ > 1:
self.hardcore = True
def save_env_def(self, filename):
import json
a = {'config': self.config._asdict(), 'seed': self.env_seed}
with open(filename, 'w') as f:
json.dump(a, f)
def seed(self, seed=None):
return self._seed(seed)
def _seed(self, seed=None):
self.env_seed = seed
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.terrain:
return
self.world.contactListener = None
for t in self.terrain:
self.world.DestroyBody(t)
self.terrain = []
self.world.DestroyBody(self.hull)
self.hull = None
for leg in self.legs:
self.world.DestroyBody(leg)
self.legs = []
self.joints = []
self.world = None
def _get_poly_stump(self, x, y, terrain_step):
stump_width = self.np_random.randint(*self.config.stump_width)
stump_height = self.np_random.uniform(*self.config.stump_height)
stump_float = self.np_random.randint(*self.config.stump_float)
# counter = np.ceil(stump_width)
counter = stump_width
countery = stump_height
poly = [(x, y + stump_float * terrain_step),
(x + stump_width * terrain_step, y + stump_float * terrain_step),
(x + stump_width * terrain_step, y + countery * terrain_step + stump_float * terrain_step),
(x, y + countery * terrain_step + stump_float * terrain_step), ]
return poly
def _generate_terrain(self, hardcore):
#GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5)
state = self.GRASS
velocity = 0.0
y = TERRAIN_HEIGHT
counter = TERRAIN_STARTPAD
oneshot = False
self.terrain = []
self.terrain_x = []
self.terrain_y = []
pit_diff = 0
for i in range(TERRAIN_LENGTH):
x = i * TERRAIN_STEP
self.terrain_x.append(x)
if state == self.GRASS and not oneshot:
velocity = 0.8 * velocity + 0.01 * np.sign(TERRAIN_HEIGHT - y)
if self.env_params is not None and self.env_params.altitude_fn is not None:
y += velocity
if i > TERRAIN_STARTPAD:
mid = TERRAIN_LENGTH * TERRAIN_STEP / 2.
x_ = (x - mid) * np.pi / mid
y = TERRAIN_HEIGHT + self.env_params.altitude_fn((x_, ))[0]
if i == TERRAIN_STARTPAD+1:
y_norm = self.env_params.altitude_fn((x_, ))[0]
y -= y_norm
else:
if i > TERRAIN_STARTPAD:
velocity += self.np_random.uniform(-1, 1) / SCALE # 1
y += self.config.ground_roughness * velocity
elif state == self.PIT and oneshot:
pit_gap = 1.0 + self.np_random.uniform(*self.config.pit_gap)
counter = np.ceil(pit_gap)
pit_diff = counter - pit_gap
poly = [
(x, y),
(x + TERRAIN_STEP, y),
(x + TERRAIN_STEP, y - 4 * TERRAIN_STEP),
(x, y - 4 * TERRAIN_STEP),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(
fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
self.fd_polygon.shape.vertices = [
(p[0] + TERRAIN_STEP * pit_gap, p[1]) for p in poly]
t = self.world.CreateStaticBody(
fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
counter += 2
original_y = y
elif state == self.PIT and not oneshot:
y = original_y
if counter > 1:
y -= 4 * TERRAIN_STEP
if counter == 1:
self.terrain_x[-1] = self.terrain_x[-1] - pit_diff * TERRAIN_STEP
pit_diff = 0
elif state == self.STUMP and oneshot:
# Sometimes this doesnt work due to randomness,
# so iterate until it does
attempts = 0
done = False
while not done:
try:
poly = self._get_poly_stump(x, y, TERRAIN_STEP)
self.fd_polygon.shape.vertices = poly
done = True
self.env_seed -= int(attempts)
except:
self.seed(self.env_seed + int(1))
attempts += 1
if attempts > 10:
print("Stump issues: num attempts: ", attempts)
done = True
t = self.world.CreateStaticBody(
fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
elif state == self.STAIRS and oneshot:
stair_height = self.np_random.uniform(
*self.config.stair_height)
stair_slope = 1 if self.np_random.rand() > 0.5 else -1
stair_width = self.np_random.randint(*self.config.stair_width)
stair_steps = self.np_random.randint(*self.config.stair_steps)
original_y = y
if stair_height > STAIR_HEIGHT_EPS:
for s in range(stair_steps):
poly = [(x + (s * stair_width) * TERRAIN_STEP, y + (s * stair_height * stair_slope) * TERRAIN_STEP),
(x + ((1 + s) * stair_width) * TERRAIN_STEP, y + (s * stair_height * stair_slope) * TERRAIN_STEP),
(x + ((1 + s) * stair_width) * TERRAIN_STEP, y + (-stair_height + s * stair_height * stair_slope) * TERRAIN_STEP),
(x + (s * stair_width) * TERRAIN_STEP, y + (-stair_height + s * stair_height * stair_slope) * TERRAIN_STEP), ]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(
fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
counter = stair_steps * stair_width + 1
elif state == self.STAIRS and not oneshot:
s = stair_steps * stair_width - counter
n = s // stair_width
y = original_y + (n * stair_height * stair_slope) * TERRAIN_STEP - \
(stair_height if stair_slope == -1 else 0) * TERRAIN_STEP
oneshot = False
self.terrain_y.append(y)
counter -= 1
if counter == 0:
counter = self.np_random.randint(
TERRAIN_GRASS / 2, TERRAIN_GRASS)
if state == self.GRASS and hardcore:
state = self.np_random.randint(1, self._STATES_)
oneshot = True
else:
state = self.GRASS
oneshot = True
self.terrain_poly = []
for i in range(TERRAIN_LENGTH - 1):
poly = [
(self.terrain_x[i], self.terrain_y[i]),
(self.terrain_x[i + 1], self.terrain_y[i + 1])
]
self.fd_edge.shape.vertices = poly
t = self.world.CreateStaticBody(
fixtures=self.fd_edge)
color = (0.3, 1.0 if i % 2 == 0 else 0.8, 0.3)
t.color1 = color
t.color2 = color
self.terrain.append(t)
color = (0.4, 0.6, 0.3)
poly += [(poly[1][0], 0), (poly[0][0], 0)]
self.terrain_poly.append((poly, color))
self.terrain.reverse()
def _generate_clouds(self):
# Sorry for the clouds, couldn't resist
self.cloud_poly = []
for i in range(TERRAIN_LENGTH // 20):
x = self.np_random.uniform(0, TERRAIN_LENGTH) * TERRAIN_STEP
y = VIEWPORT_H / SCALE * 3 / 4
poly = [
(x + 15 * TERRAIN_STEP * math.sin(3.14 * 2 * a / 5) + self.np_random.uniform(0, 5 * TERRAIN_STEP),
y + 5 * TERRAIN_STEP * math.cos(3.14 * 2 * a / 5) + self.np_random.uniform(0, 5 * TERRAIN_STEP))
for a in range(5)]
x1 = min([p[0] for p in poly])
x2 = max([p[0] for p in poly])
self.cloud_poly.append((poly, x1, x2))
def _reset_env(self):
self._destroy()
self.world = Box2D.b2World()
self.world.contactListener_bug_workaround = ContactDetector(self)
self.world.contactListener = self.world.contactListener_bug_workaround
self.game_over = False
self.prev_shaping = None
self.scroll = 0.0
self.lidar_render = 0
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
self._set_terrain_number()
self._generate_terrain(self.hardcore)
self._generate_clouds()
init_x = TERRAIN_STEP * TERRAIN_STARTPAD / 2
init_y = TERRAIN_HEIGHT + 2 * LEG_H
self.hull = self.world.CreateDynamicBody(
position=(init_x, init_y),
fixtures=HULL_FD
)
self.hull.color1 = (0.5, 0.4, 0.9)
self.hull.color2 = (0.3, 0.3, 0.5)
self.hull.ApplyForceToCenter(
(self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True)
self.legs = []
self.joints = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(init_x, init_y - LEG_H / 2 - LEG_DOWN),
angle=(i * 0.05),
fixtures=LEG_FD
)
leg.color1 = (0.6 - i / 10., 0.3 - i / 10., 0.5 - i / 10.)
leg.color2 = (0.4 - i / 10., 0.2 - i / 10., 0.3 - i / 10.)
rjd = revoluteJointDef(
bodyA=self.hull,
bodyB=leg,
localAnchorA=(0, LEG_DOWN),
localAnchorB=(0, LEG_H / 2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed=i,
lowerAngle=-0.8,
upperAngle=1.1,
)
self.legs.append(leg)
self.joints.append(self.world.CreateJoint(rjd))
lower = self.world.CreateDynamicBody(
position=(init_x, init_y - LEG_H * 3 / 2 - LEG_DOWN),
angle=(i * 0.05),
fixtures=LOWER_FD
)
lower.color1 = (0.6 - i / 10., 0.3 - i / 10., 0.5 - i / 10.)
lower.color2 = (0.4 - i / 10., 0.2 - i / 10., 0.3 - i / 10.)
rjd = revoluteJointDef(
bodyA=leg,
bodyB=lower,
localAnchorA=(0, -LEG_H / 2),
localAnchorB=(0, LEG_H / 2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed=1,
lowerAngle=-1.6,
upperAngle=-0.1,
)
lower.ground_contact = False
self.legs.append(lower)
self.joints.append(self.world.CreateJoint(rjd))
self.drawlist = self.terrain + self.legs + [self.hull]
class LidarCallback(Box2D.b2.rayCastCallback):
def ReportFixture(self, fixture, point, normal, fraction):
if (fixture.filterData.categoryBits & 1) == 0:
return -1
self.p2 = point
self.fraction = fraction
return fraction
self.lidar = [LidarCallback() for _ in range(10)]
return self._step(np.array([0, 0, 0, 0]))[0]
def step(self, action):
return self._step(action)
def _step(self, action):
# self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help
control_speed = False # Should be easier as well
if control_speed:
self.joints[0].motorSpeed = float(
SPEED_HIP * np.clip(action[0], -1, 1))
self.joints[1].motorSpeed = float(
SPEED_KNEE * np.clip(action[1], -1, 1))
self.joints[2].motorSpeed = float(
SPEED_HIP * np.clip(action[2], -1, 1))
self.joints[3].motorSpeed = float(
SPEED_KNEE * np.clip(action[3], -1, 1))
else:
self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0]))
self.joints[0].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1))
self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1]))
self.joints[1].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1))
self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2]))
self.joints[2].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1))
self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3]))
self.joints[3].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1))
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
pos = self.hull.position
vel = self.hull.linearVelocity
for i in range(10):
self.lidar[i].fraction = 1.0
self.lidar[i].p1 = pos
self.lidar[i].p2 = (
pos[0] + math.sin(1.5 * i / 10.0) * LIDAR_RANGE,
pos[1] - math.cos(1.5 * i / 10.0) * LIDAR_RANGE)
self.world.RayCast(
self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)
state = [
# Normal angles up to 0.5 here, but sure more is possible.
self.hull.angle,
2.0 * self.hull.angularVelocity / FPS,
# Normalized to get -1..1 range
0.3 * vel.x * (VIEWPORT_W / SCALE) / FPS,
0.3 * vel.y * (VIEWPORT_H / SCALE) / FPS,
# This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)
self.joints[0].angle,
self.joints[0].speed / SPEED_HIP,
self.joints[1].angle + 1.0,
self.joints[1].speed / SPEED_KNEE,
1.0 if self.legs[1].ground_contact else 0.0,
self.joints[2].angle,
self.joints[2].speed / SPEED_HIP,
self.joints[3].angle + 1.0,
self.joints[3].speed / SPEED_KNEE,
1.0 if self.legs[3].ground_contact else 0.0
]
state += [l.fraction for l in self.lidar]
assert len(state) == 24
self.scroll = pos.x - VIEWPORT_W / SCALE / 5
# moving forward is a way to receive reward (normalized to get 300 on completion)
shaping = 130 * pos[0] / SCALE
# keep head straight, other than that and falling, any behavior is unpunished
shaping -= 5.0 * abs(state[0])
reward = 0
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
for a in action:
reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1)
# normalized to about -50.0 using heuristic, more optimal agent should spend less
done = False
finish = False
if self.game_over or pos[0] < 0:
reward = -100
done = True
if pos[0] > (TERRAIN_LENGTH - TERRAIN_GRASS) * TERRAIN_STEP:
done = True
finish = True
return np.array(state), reward, done, {"finish": finish}
def render(self, *args, **kwargs):
return self._render(*args, **kwargs)
def _render(self, mode='level', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(self.scroll, VIEWPORT_W /
SCALE + self.scroll, 0, VIEWPORT_H / SCALE)
self.viewer.draw_polygon([
(self.scroll, 0),
(self.scroll + VIEWPORT_W / SCALE, 0),
(self.scroll + VIEWPORT_W / SCALE, VIEWPORT_H / SCALE),
(self.scroll, VIEWPORT_H / SCALE),
], color=(0.9, 0.9, 1.0))
for poly, x1, x2 in self.cloud_poly:
if x2 < self.scroll / 2:
continue
if x1 > self.scroll / 2 + VIEWPORT_W / SCALE:
continue
self.viewer.draw_polygon(
[(p[0] + self.scroll / 2, p[1]) for p in poly], color=(1, 1, 1))
for poly, color in self.terrain_poly:
if poly[1][0] < self.scroll:
continue
if poly[0][0] > self.scroll + VIEWPORT_W / SCALE:
continue
self.viewer.draw_polygon(poly, color=color)
self.lidar_render = (self.lidar_render + 1) % 100
i = self.lidar_render
if i < 2 * len(self.lidar):
l = self.lidar[i] if i < len(
self.lidar) else self.lidar[len(self.lidar) - i - 1]
self.viewer.draw_polyline(
[l.p1, l.p2], color=(1, 0, 0), linewidth=1)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
t = rendering.Transform(translation=trans * f.shape.pos)
self.viewer.draw_circle(
f.shape.radius, 30, color=obj.color1).add_attr(t)
self.viewer.draw_circle(
f.shape.radius, 30, color=obj.color2, filled=False, linewidth=2).add_attr(t)
else:
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
path.append(path[0])
self.viewer.draw_polyline(
path, color=obj.color2, linewidth=2)
flagy1 = TERRAIN_HEIGHT
flagy2 = flagy1 + 50 / SCALE
x = TERRAIN_STEP * 3
self.viewer.draw_polyline(
[(x, flagy1), (x, flagy2)], color=(0, 0, 0), linewidth=2)
f = [(x, flagy2), (x, flagy2 - 10 / SCALE),
(x + 25 / SCALE, flagy2 - 5 / SCALE)]
self.viewer.draw_polygon(f, color=(0.9, 0.2, 0))
self.viewer.draw_polyline(f + [f[0]], color=(0, 0, 0), linewidth=2)
return_rgb_array = mode in ['rgb_array', 'level']
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
|
dcd-main
|
envs/bipedalwalker/walker_env.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gym
import time
import numpy as np
import torch
from .walker_env import EnvConfig, BipedalWalkerCustom
from envs.registration import register as gym_register
def get_config(name='default',
ground_roughness=0,
pit_gap=[],
stump_width=[],
stump_height=[],
stump_float=[],
stair_height=[],
stair_width=[],
stair_steps=[]):
config = EnvConfig(
name=name,
ground_roughness=ground_roughness,
pit_gap=pit_gap,
stump_width=stump_width,
stump_height=stump_height,
stump_float=stump_float,
stair_height=stair_height,
stair_width=stair_width,
stair_steps=stair_steps)
return config
class BipedalWalkerDefault(BipedalWalkerCustom):
def __init__(self):
config = get_config()
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
## stump height
class BipedalWalkerMedStumps(BipedalWalkerCustom):
def __init__(self):
config = get_config(
stump_height=[2, 2],
stump_width=[1, 2],
stump_float=[0, 1]
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
class BipedalWalkerHighStumps(BipedalWalkerCustom):
def __init__(self):
config = get_config(
stump_height=[5, 5],
stump_width=[1, 2],
stump_float=[0, 1]
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
## pit gap
class BipedalWalkerMedPits(BipedalWalkerCustom):
def __init__(self):
config = get_config(
pit_gap=[5, 5]
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
class BipedalWalkerWidePits(BipedalWalkerCustom):
def __init__(self):
config = get_config(
pit_gap=[10, 10]
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
# stair height + number of stairs
class BipedalWalkerMedStairs(BipedalWalkerCustom):
def __init__(self):
config = get_config(
stair_height=[2, 2],
stair_steps=[5],
stair_width = [4, 5]
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
class BipedalWalkerHighStairs(BipedalWalkerCustom):
def __init__(self):
config = get_config(
stair_height=[5, 5],
stair_steps=[9],
stair_width=[4, 5]
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
# ground roughness
class BipedalWalkerMedRoughness(BipedalWalkerCustom):
def __init__(self):
config = get_config(
ground_roughness=5
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
class BipedalWalkerHighRoughness(BipedalWalkerCustom):
def __init__(self):
config = get_config(
ground_roughness=9
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
# everything maxed out
class BipedalWalkerInsane(BipedalWalkerCustom):
def __init__(self):
config = get_config(
stump_height=[5, 5],
stump_width=[1, 2],
stump_float=[0, 1],
pit_gap=[10, 10],
stair_height=[5, 5],
stair_steps=[9],
stair_width=[4, 5],
ground_roughness=9
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
## PCG "Extremely Challenging" Env
# First samples params, then generates level
class BipedalWalkerXChal(BipedalWalkerCustom):
def __init__(self):
config = get_config(
stump_height=[],
stump_width=[],
stump_float=[],
pit_gap=[],
stair_height=[],
stair_steps=0,
stair_width=[],
ground_roughness=0
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
self.level_seed = int(str(time.time() / 1000)[-3:])
super().seed(self.level_seed)
stump_high = np.random.uniform(2.4, 3)
gap_high = np.random.uniform(6, 8)
roughness = np.random.uniform(4.5, 8)
config = get_config(
stump_height=[0, stump_high],
stump_width=[1, 2],
stump_float=[0, 1],
pit_gap=[0, gap_high],
stair_height=[],
stair_steps=0,
stair_width=[],
ground_roughness=roughness)
super().re_init(config, self.level_seed)
return super()._reset_env()
## POET Rose
roses = {
'1a': [5.6, 2.4, 2.82, 6.4, 4.48],
'1b': [5.44, 1.8, 2.82, 6.72, 4.48],
'2a': [7.2, 1.98, 2.82, 7.2, 5.6],
'2b': [5.76, 2.16, 2.76, 7.2, 1.6],
'3a': [5.28, 1.98, 2.76, 7.2, 4.8],
'3b': [4.8, 2.4, 2.76, 4.48, 4.8]
}
class BipedalWalkerPOETRose(BipedalWalkerCustom):
def __init__(self, rose_id='1a'):
id = roses[rose_id]
config = get_config(
stump_height=[id[1], id[2]],
stump_width=[1, 2],
stump_float=[0, 1],
pit_gap=[id[4], id[3]],
stair_height=[],
stair_steps=[],
stair_width=[],
ground_roughness=id[0]
)
super().__init__(env_config=config, seed=int(str(time.time() / 1000)[-3:]))
def reset(self):
super().seed(int(str(time.time() / 1000)[-3:]))
return super()._reset_env()
class BipedalWalkerPOETRose1a(BipedalWalkerPOETRose):
def __init__(self):
super().__init__(rose_id='1a')
class BipedalWalkerPOETRose1b(BipedalWalkerPOETRose):
def __init__(self):
super().__init__(rose_id='1b')
class BipedalWalkerPOETRose2a(BipedalWalkerPOETRose):
def __init__(self):
super().__init__(rose_id='2a')
class BipedalWalkerPOETRose2b(BipedalWalkerPOETRose):
def __init__(self):
super().__init__(rose_id='2b')
class BipedalWalkerPOETRose3a(BipedalWalkerPOETRose):
def __init__(self):
super().__init__(rose_id='3a')
class BipedalWalkerPOETRose3b(BipedalWalkerPOETRose):
def __init__(self):
super().__init__(rose_id='3b')
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
gym_register(id='BipedalWalker-Default-v0',
entry_point=module_path + ':BipedalWalkerDefault',
max_episode_steps=2000)
gym_register(id='BipedalWalker-Med-Roughness-v0',
entry_point=module_path + ':BipedalWalkerMedRoughness',
max_episode_steps=2000)
gym_register(id='BipedalWalker-High-Roughness-v0',
entry_point=module_path + ':BipedalWalkerHighRoughness',
max_episode_steps=2000)
gym_register(id='BipedalWalker-Med-StumpHeight-v0',
entry_point=module_path + ':BipedalWalkerMedStumps',
max_episode_steps=2000)
gym_register(id='BipedalWalker-High-StumpHeight-v0',
entry_point=module_path + ':BipedalWalkerHighStumps',
max_episode_steps=2000)
gym_register(id='BipedalWalker-Med-Stairs-v0',
entry_point=module_path + ':BipedalWalkerMedStairs',
max_episode_steps=2000)
gym_register(id='BipedalWalker-High-Stairs-v0',
entry_point=module_path + ':BipedalWalkerHighStairs',
max_episode_steps=2000)
gym_register(id='BipedalWalker-Med-PitGap-v0',
entry_point=module_path + ':BipedalWalkerMedPits',
max_episode_steps=2000)
gym_register(id='BipedalWalker-Wide-PitGap-v0',
entry_point=module_path + ':BipedalWalkerWidePits',
max_episode_steps=2000)
gym_register(id='BipedalWalker-Insane-v0',
entry_point=module_path + ':BipedalWalkerInsane',
max_episode_steps=2000)
gym_register(id='BipedalWalker-XChal-v0',
entry_point=module_path + ':BipedalWalkerXChal',
max_episode_steps=2000)
for id in ['1a', '1b', '2a', '2b', '3a', '3b']:
gym_register(id=f'BipedalWalker-POET-Rose-{id}-v0',
entry_point=module_path + f':BipedalWalkerPOETRose{id}',
max_episode_steps=2000)
|
dcd-main
|
envs/bipedalwalker/walker_test_envs.py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Multi-agent goal-seeking task with many static obstacles.
"""
import gym_minigrid.minigrid as minigrid
from . import multigrid
from . import register
class ClutteredMultiGrid(multigrid.MultiGridEnv):
"""Goal seeking environment with obstacles."""
def __init__(self, size=15, n_agents=3, n_clutter=25, randomize_goal=True,
agent_view_size=5, max_steps=250, walls_are_lava=False,
**kwargs):
self.n_clutter = n_clutter
self.randomize_goal = randomize_goal
self.walls_are_lava = walls_are_lava
super().__init__(grid_size=size, max_steps=max_steps, n_agents=n_agents,
agent_view_size=agent_view_size, **kwargs)
def _gen_grid(self, width, height):
self.grid = multigrid.Grid(width, height)
self.grid.wall_rect(0, 0, width, height)
if self.randomize_goal:
self.place_obj(minigrid.Goal(), max_tries=100)
else:
self.put_obj(minigrid.Goal(), width - 2, height - 2)
for _ in range(self.n_clutter):
if self.walls_are_lava:
self.place_obj(minigrid.Lava(), max_tries=100)
else:
self.place_obj(minigrid.Wall(), max_tries=100)
self.place_agent()
self.mission = 'get to the green square'
def step(self, action):
obs, reward, done, info = multigrid.MultiGridEnv.step(self, action)
return obs, reward, done, info
class ClutteredMultiGridSingle6x6(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, size=6, n_clutter=5, randomize_goal=True,
agent_view_size=5, max_steps=50)
class ClutteredMultiGridSingle(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, size=15, n_clutter=25, randomize_goal=True,
agent_view_size=5, max_steps=250)
class Cluttered40Minigrid(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=40, minigrid_mode=True)
class Cluttered10Minigrid(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=10, minigrid_mode=True)
class Cluttered50Minigrid(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=50, minigrid_mode=True)
class Cluttered5Minigrid(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=5, minigrid_mode=True)
class Cluttered1MinigridMini(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=1, minigrid_mode=True, size=6)
class Cluttered6MinigridMini(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=6, minigrid_mode=True, size=6)
class Cluttered7MinigridMini(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=7, minigrid_mode=True, size=6)
class ClutteredMinigridLava(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, walls_are_lava=True, minigrid_mode=True)
class ClutteredMinigridLavaMini(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=4, walls_are_lava=True, size=6,
minigrid_mode=True)
class ClutteredMinigridLavaMedium(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=15, walls_are_lava=True, size=10,
minigrid_mode=True)
class Cluttered15MinigridMedium(ClutteredMultiGrid):
def __init__(self):
super().__init__(n_agents=1, n_clutter=15, minigrid_mode=True, size=10)
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
register.register(
env_id='MultiGrid-Cluttered-v0',
entry_point=module_path + ':ClutteredMultiGrid'
)
register.register(
env_id='MultiGrid-Cluttered-Single-v0',
entry_point=module_path + ':ClutteredMultiGridSingle'
)
register.register(
env_id='MultiGrid-Cluttered-Single-6x6-v0',
entry_point=module_path + ':ClutteredMultiGridSingle6x6'
)
register.register(
env_id='MultiGrid-Cluttered40-Minigrid-v0',
entry_point=module_path + ':Cluttered40Minigrid'
)
register.register(
env_id='MultiGrid-Cluttered10-Minigrid-v0',
entry_point=module_path + ':Cluttered10Minigrid'
)
register.register(
env_id='MultiGrid-Cluttered50-Minigrid-v0',
entry_point=module_path + ':Cluttered50Minigrid'
)
register.register(
env_id='MultiGrid-Cluttered5-Minigrid-v0',
entry_point=module_path + ':Cluttered5Minigrid'
)
register.register(
env_id='MultiGrid-MiniCluttered1-Minigrid-v0',
entry_point=module_path + ':Cluttered1MinigridMini'
)
register.register(
env_id='MultiGrid-MiniCluttered6-Minigrid-v0',
entry_point=module_path + ':Cluttered6MinigridMini'
)
register.register(
env_id='MultiGrid-MiniCluttered7-Minigrid-v0',
entry_point=module_path + ':Cluttered7MinigridMini'
)
register.register(
env_id='MultiGrid-Cluttered-Lava-Minigrid-v0',
entry_point=module_path + ':ClutteredMinigridLava'
)
register.register(
env_id='MultiGrid-MiniCluttered-Lava-Minigrid-v0',
entry_point=module_path + ':ClutteredMinigridLavaMini'
)
register.register(
env_id='MultiGrid-MediumCluttered-Lava-Minigrid-v0',
entry_point=module_path + ':ClutteredMinigridLavaMedium'
)
register.register(
env_id='MultiGrid-MediumCluttered15-Minigrid-v0',
entry_point=module_path + ':Cluttered15MinigridMedium'
)
|
dcd-main
|
envs/multigrid/cluttered.py
|
# Copyright (c) 2019 Maxime Chevalier-Boisvert.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import sys
import numpy as np
# Only ask users to install matplotlib if they actually need it
try:
import matplotlib.pyplot as plt
except:
print('To display the environment in a window, please install matplotlib, eg:')
print('pip3 install --user matplotlib')
sys.exit(-1)
class Window:
"""
Window to draw a gridworld instance using Matplotlib
"""
def __init__(self, title):
self.fig = None
self.imshow_obj = None
# Create the figure and axes
self.fig, self.ax = plt.subplots()
# Show the env name in the window title
self.fig.canvas.manager.set_window_title(title)
plt.axis('off')
# Turn off x/y axis numbering/ticks
# self.ax.set_xticks([], [])
# self.ax.set_yticks([], [])
# Flag indicating the window was closed
self.closed = False
def close_handler(evt):
self.closed = True
self.fig.canvas.mpl_connect('close_event', close_handler)
def show_img(self, img):
"""
Show an image or update the image being shown
"""
# Show the first image of the environment
if self.imshow_obj is None:
self.imshow_obj = self.ax.imshow(img, interpolation='bilinear')
self.imshow_obj.set_data(img)
self.fig.canvas.draw()
# Let matplotlib process UI events
# This is needed for interactive mode to work properly
plt.pause(0.001)
def set_caption(self, text):
"""
Set/update the caption text below the image
"""
plt.xlabel(text)
def reg_key_handler(self, key_handler):
"""
Register a keyboard event handler
"""
# Keyboard handler
self.fig.canvas.mpl_connect('key_press_event', key_handler)
def show(self, block=True):
"""
Show the window, and start an event loop
"""
# If not blocking, trigger interactive mode
if not block:
plt.ion()
# Show the plot
# In non-interative mode, this enters the matplotlib event loop
# In interactive mode, this call does not block
plt.show()
def close(self):
"""
Close the window
"""
plt.close()
|
dcd-main
|
envs/multigrid/window.py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Register MultiGrid environments with OpenAI gym."""
import gym
from envs.registration import register as gym_register
env_list = []
def register(env_id, entry_point, reward_threshold=0.95, max_episode_steps=None):
"""Register a new environment with OpenAI gym based on id."""
assert env_id.startswith("MultiGrid-")
if env_id in env_list:
del gym.envs.registry.env_specs[id]
else:
# Add the environment to the set
env_list.append(id)
kwargs = dict(
id=env_id,
entry_point=entry_point,
reward_threshold=reward_threshold
)
if max_episode_steps:
kwargs.update({'max_episode_steps':max_episode_steps})
# Register the environment with OpenAI gym
gym_register(**kwargs)
|
dcd-main
|
envs/multigrid/register.py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""An environment which is built by a learning adversary.
Has additional functions, step_adversary, and reset_agent. How to use:
1. Call reset() to reset to an empty environment
2. Call step_adversary() to place the goal, agent, and obstacles. Repeat until
a done is received.
3. Normal RL loop. Use learning agent to generate actions and use them to call
step() until a done is received.
4. If required, call reset_agent() to reset the environment the way the
adversary designed it. A new agent can now play it using the step() function.
"""
import random
import time
import gym
import gym_minigrid.minigrid as minigrid
import networkx as nx
from networkx import grid_graph
import numpy as np
from . import multigrid
from . import register
EDITOR_ACTION_SPACES = {
'walls_none': {
0: '-',
1: '.',
},
'walls_none_goal': {
0: '-',
1: '.',
2: 'g',
},
'walls_none_agent_goal': {
0: '-',
1: '.',
2: 'a',
3: 'g',
},
}
class AdversarialEnv(multigrid.MultiGridEnv):
"""Grid world where an adversary build the environment the agent plays.
The adversary places the goal, agent, and up to n_clutter blocks in sequence.
The action dimension is the number of squares in the grid, and each action
chooses where the next item should be placed.
"""
def __init__(self,
n_clutter=50,
resample_n_clutter=False,
size=15,
agent_view_size=5,
max_steps=250,
goal_noise=0.,
random_z_dim=50,
choose_goal_last=False,
see_through_walls=True,
seed=0,
editor_actions='walls_none_agent_goal',
fixed_environment=False):
"""Initializes environment in which adversary places goal, agent, obstacles.
Args:
n_clutter: The maximum number of obstacles the adversary can place.
size: The number of tiles across one side of the grid; i.e. make a
size x size grid.
agent_view_size: The number of tiles in one side of the agent's partially
observed view of the grid.
max_steps: The maximum number of steps that can be taken before the
episode terminates.
goal_noise: The probability with which the goal will move to a different
location than the one chosen by the adversary.
random_z_dim: The environment generates a random vector z to condition the
adversary. This gives the dimension of that vector.
choose_goal_last: If True, will place the goal and agent as the last
actions, rather than the first actions.
"""
self.agent_start_pos = None
self.goal_pos = None
self.n_clutter = n_clutter
self.resample_n_clutter = resample_n_clutter
self.goal_noise = goal_noise
self.random_z_dim = random_z_dim
self.choose_goal_last = choose_goal_last
# Add two actions for placing the agent and goal.
self.n_clutter_sampled = False
self.adversary_max_steps = self.n_clutter + 2
super().__init__(
n_agents=1,
minigrid_mode=True,
grid_size=size,
max_steps=max_steps,
agent_view_size=agent_view_size,
see_through_walls=see_through_walls, # Set this to True for maximum speed
competitive=True,
seed=seed,
fixed_environment=fixed_environment,
)
# Metrics
self.reset_metrics()
self.editor_actions = list(EDITOR_ACTION_SPACES[editor_actions].values())
# Create spaces for adversary agent's specs.
self.adversary_action_dim = (size - 2)**2
self.adversary_action_space = gym.spaces.Discrete(self.adversary_action_dim)
self.adversary_ts_obs_space = gym.spaces.Box(
low=0, high=self.adversary_max_steps, shape=(1,), dtype='uint8')
self.adversary_randomz_obs_space = gym.spaces.Box(
low=0, high=1.0, shape=(random_z_dim,), dtype=np.float32)
self.adversary_image_obs_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.width, self.height, 3),
dtype='uint8')
# Adversary observations are dictionaries containing an encoding of the
# grid, the current time step, and a randomly generated vector used to
# condition generation (as in a GAN).
self.adversary_observation_space = gym.spaces.Dict(
{'image': self.adversary_image_obs_space,
'time_step': self.adversary_ts_obs_space,
'random_z': self.adversary_randomz_obs_space})
# NetworkX graph used for computing shortest path
self.graph = grid_graph(dim=[size-2, size-2])
self.wall_locs = []
def _resample_n_clutter(self):
n_clutter = np.random.randint(0, self.n_clutter)
self.adversary_max_steps = n_clutter + 2
self.n_clutter_sampled = True
return n_clutter
@property
def processed_action_dim(self):
return 1
@property
def encoding(self):
return self.grid.encode()
def _gen_grid(self, width, height):
"""Grid is initially empty, because adversary will create it."""
# Create an empty grid
self.grid = multigrid.Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
def get_goal_x(self):
if self.goal_pos is None:
return -1
return self.goal_pos[0]
def get_goal_y(self):
if self.goal_pos is None:
return -1
return self.goal_pos[1]
def reset_metrics(self):
self.distance_to_goal = -1
self.n_clutter_placed = 0
self.passable = -1
self.shortest_path_length = (self.width - 2) * (self.height - 2) + 1
def compute_metrics(self):
self.n_clutter_placed = self._count_walls()
self.compute_shortest_path()
def reset(self):
"""Fully resets the environment to an empty grid with no agent or goal."""
self.graph = grid_graph(dim=[self.width-2, self.height-2])
self.wall_locs = []
self.step_count = 0
self.adversary_step_count = 0
if self.resample_n_clutter:
self.n_clutter_sampled = False
self.agent_start_dir = self._rand_int(0, 4)
# Current position and direction of the agent
self.reset_agent_status()
self.agent_start_pos = None
self.goal_pos = None
self.done = False
# Extra metrics
self.reset_metrics()
# Generate the grid. Will be random by default, or same environment if
# 'fixed_environment' is True.
self._gen_grid(self.width, self.height)
image = self.grid.encode()
obs = {
'image': image,
'time_step': [self.adversary_step_count],
'random_z': self.generate_random_z()
}
return obs
def reset_agent_status(self):
"""Reset the agent's position, direction, done, and carrying status."""
self.agent_pos = [None] * self.n_agents
self.agent_dir = [self.agent_start_dir] * self.n_agents
self.done = [False] * self.n_agents
self.carrying = [None] * self.n_agents
def reset_agent(self):
"""Resets the agent's start position, but leaves goal and walls."""
# Remove the previous agents from the world
for a in range(self.n_agents):
if self.agent_pos[a] is not None:
self.grid.set(self.agent_pos[a][0], self.agent_pos[a][1], None)
# Current position and direction of the agent
self.reset_agent_status()
if self.agent_start_pos is None:
raise ValueError('Trying to place agent at empty start position.')
else:
self.place_agent_at_pos(0, self.agent_start_pos, rand_dir=False)
for a in range(self.n_agents):
assert self.agent_pos[a] is not None
assert self.agent_dir[a] is not None
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos[a])
if not (start_cell.type == 'agent' or
start_cell is None or start_cell.can_overlap()):
raise ValueError('Wrong object in agent start position.')
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def reset_to_level(self, level):
self.reset()
if isinstance(level, str):
actions = [int(a) for a in level.split()]
if self.resample_n_clutter:
self.adversary_max_steps = len(actions)
for a in actions:
obs, _, done, _ = self.step_adversary(a)
if done:
obs = self.reset_agent()
else:
# reset based on encoding
obs = self.reset_to_encoding(level)
return obs
def reset_to_encoding(self, encoding):
self.grid.set_encoding(encoding, multigrid_env=self)
self.compute_metrics()
return self.reset_agent()
def _clean_loc(self, x,y):
# Remove any walls
self.remove_wall(x, y)
# print(f'cleaning loc {x}, {y}', flush=True)
if isinstance(self.grid.get(x,y), minigrid.Goal):
self.goal_pos = None
elif isinstance(self.grid.get(x,y), multigrid.Agent):
self.agent_start_pos = None
self.grid.set(x, y, None)
def _free_xy_from_mask(self, free_mask):
free_idx = free_mask.flatten().nonzero()[0]
free_loc = np.random.choice(free_idx)
mask_w, mask_h = free_mask.shape
x = free_loc % mask_w
y = free_loc // mask_w
return x,y
def mutate_level(self, num_edits=1):
"""
Mutate the current level:
- Select num_edits locations (with replacement).
- Take the unique set of locations, which can be < num_edits.
- Choose a unique entity for each location.
- Place entities in each location.
- Place goal and agent if they do not exist.
"""
num_tiles = (self.width-2)*(self.height-2)
edit_locs = list(set(np.random.randint(0, num_tiles, num_edits)))
action_idx = np.random.randint(0, len(self.editor_actions), len(edit_locs))
actions = [self.editor_actions[i] for i in action_idx]
free_mask = ~self.wall_mask
free_mask[self.agent_start_pos[1]-1, self.agent_start_pos[0]-1] = False
free_mask[self.goal_pos[1]-1, self.goal_pos[0]-1] = False
for loc, a in zip(edit_locs, actions):
x = loc % (self.width - 2) + 1
y = loc // (self.width - 2) + 1
self._clean_loc(x,y)
if a == '-':
self.put_obj(minigrid.Wall(), x, y)
self.wall_locs.append((x-1, y-1))
self.n_clutter_placed += 1
free_mask[y-1,x-1] = False
elif a == '.':
self.remove_wall(x, y)
self.grid.set(x, y, None)
free_mask[y-1,x-1] = True
elif a == 'a':
if self.agent_start_pos is not None:
ax,ay = self.agent_start_pos
self.grid.set(ax, ay, None)
free_mask[ay-1,ax-1] = True
self.place_one_agent(0, top=(x,y), size=(1,1))
self.agent_start_pos = np.array((x,y))
free_mask[y-1,x-1] = False
elif a == 'g':
if self.goal_pos is not None:
gx,gy = self.goal_pos
self.grid.set(gx, gy, None)
free_mask[gy-1,gx-1] = True
self.put_obj(minigrid.Goal(), x, y)
self.goal_pos = np.array((x,y))
free_mask[y-1,x-1] = False
# Make sure goal exists
if self.goal_pos is None:
x,y = self._free_xy_from_mask(free_mask)
free_mask[y,x] = False
x += 1
y += 1
self.put_obj(minigrid.Goal(), x, y)
self.goal_pos = np.array((x,y))
# Make sure agent exists
if self.agent_start_pos is None:
x,y = self._free_xy_from_mask(free_mask)
free_mask[y,x] = False
x += 1
y += 1
self.place_one_agent(0, top=(x,y), size=(1,1))
self.agent_start_pos = np.array((x,y))
# Reset meta info
self.graph = grid_graph(dim=[self.width-2, self.height-2])
self.step_count = 0
self.adversary_step_count = 0
self.reset_metrics()
self.compute_metrics()
return self.reset_agent()
def remove_wall(self, x, y):
if (x-1, y-1) in self.wall_locs:
self.wall_locs.remove((x-1, y-1))
self.n_clutter_placed -= 1
obj = self.grid.get(x, y)
if obj is not None and obj.type == 'wall':
self.grid.set(x, y, None)
def _count_walls(self):
wall_mask = np.array(
[1 if isinstance(x, minigrid.Wall) else 0 for x in self.grid.grid], dtype=np.bool)\
.reshape(self.height, self.width)[1:-1,1:-1]
self.wall_mask = wall_mask
num_walls = wall_mask.sum()
wall_pos = list(zip(*np.nonzero(wall_mask)))
self.wall_locs = [(x+1,y+1) for y,x in wall_pos]
for y,x in wall_pos:
self.graph.remove_node((x,y))
return num_walls
def compute_shortest_path(self):
if self.agent_start_pos is None or self.goal_pos is None:
return
self.distance_to_goal = abs(
self.goal_pos[0] - self.agent_start_pos[0]) + abs(
self.goal_pos[1] - self.agent_start_pos[1])
# Check if there is a path between agent start position and goal. Remember
# to subtract 1 due to outside walls existing in the Grid, but not in the
# networkx graph.
self.passable = nx.has_path(
self.graph,
source=(self.agent_start_pos[0] - 1, self.agent_start_pos[1] - 1),
target=(self.goal_pos[0]-1, self.goal_pos[1]-1))
if self.passable:
# Compute shortest path
self.shortest_path_length = nx.shortest_path_length(
self.graph,
source=(self.agent_start_pos[0]-1, self.agent_start_pos[1]-1),
target=(self.goal_pos[0]-1, self.goal_pos[1]-1))
else:
# Impassable environments have a shortest path length 1 longer than
# longest possible path
self.shortest_path_length = (self.width - 2) * (self.height - 2) + 1
def generate_random_z(self):
return np.random.uniform(size=(self.random_z_dim,)).astype(np.float32)
def step_adversary(self, loc):
"""The adversary gets n_clutter + 2 moves to place the goal, agent, blocks.
The action space is the number of possible squares in the grid. The squares
are numbered from left to right, top to bottom.
Args:
loc: An integer specifying the location to place the next object which
must be decoded into x, y coordinates.
Returns:
Standard RL observation, reward (always 0), done, and info
"""
if loc >= self.adversary_action_dim:
raise ValueError('Position passed to step_adversary is outside the grid.')
# Resample block count if necessary, based on first loc
if self.resample_n_clutter and not self.n_clutter_sampled:
n_clutter = int((loc/self.adversary_action_dim)*self.n_clutter)
self.adversary_max_steps = n_clutter + 2
self.n_clutter_sampled = True
if self.adversary_step_count < self.adversary_max_steps:
# Add offset of 1 for outside walls
x = int(loc % (self.width - 2)) + 1
y = int(loc / (self.width - 2)) + 1
done = False
if self.choose_goal_last:
should_choose_goal = self.adversary_step_count == self.adversary_max_steps - 2
should_choose_agent = self.adversary_step_count == self.adversary_max_steps - 1
else:
should_choose_goal = self.adversary_step_count == 0
should_choose_agent = self.adversary_step_count == 1
# print(f"{self.adversary_step_count}/{self.adversary_max_steps}", flush=True)
# print(f"goal/agent = {should_choose_goal}/{should_choose_agent}", flush=True)
# Place goal
if should_choose_goal:
# If there is goal noise, sometimes randomly place the goal
if random.random() < self.goal_noise:
self.goal_pos = self.place_obj(minigrid.Goal(), max_tries=100)
else:
self.remove_wall(x, y) # Remove any walls that might be in this loc
self.put_obj(minigrid.Goal(), x, y)
self.goal_pos = (x, y)
# Place the agent
elif should_choose_agent:
self.remove_wall(x, y) # Remove any walls that might be in this loc
# Goal has already been placed here
if self.grid.get(x, y) is not None:
# Place agent randomly
self.agent_start_pos = self.place_one_agent(0, rand_dir=False)
self.deliberate_agent_placement = 0
else:
self.agent_start_pos = np.array([x, y])
self.place_agent_at_pos(0, self.agent_start_pos, rand_dir=False)
self.deliberate_agent_placement = 1
# Place wall
elif self.adversary_step_count < self.adversary_max_steps:
# If there is already an object there, action does nothing
if self.grid.get(x, y) is None:
self.put_obj(minigrid.Wall(), x, y)
self.n_clutter_placed += 1
self.wall_locs.append((x-1, y-1))
self.adversary_step_count += 1
# End of episode
if self.adversary_step_count >= self.n_clutter + 2:
done = True
self.reset_metrics()
self.compute_metrics()
else:
done = False
image = self.grid.encode()
obs = {
'image': image,
'time_step': [self.adversary_step_count],
'random_z': self.generate_random_z()
}
return obs, 0, done, {}
def reset_random(self):
if self.fixed_environment:
self.seed(self.seed_value)
"""Use domain randomization to create the environment."""
self.graph = grid_graph(dim=[self.width-2, self.height-2])
self.step_count = 0
self.adversary_step_count = 0
# Current position and direction of the agent
self.reset_agent_status()
self.agent_start_pos = None
self.goal_pos = None
# Extra metrics
self.reset_metrics()
# Create empty grid
self._gen_grid(self.width, self.height)
# Randomly place goal
self.goal_pos = self.place_obj(minigrid.Goal(), max_tries=100)
# Randomly place agent
self.agent_start_dir = self._rand_int(0, 4)
self.agent_start_pos = self.place_one_agent(0, rand_dir=False)
# Randomly place walls
if self.resample_n_clutter:
n_clutter = self._resample_n_clutter()
else:
n_clutter = int(self.n_clutter/2) # Based on original PAIRED logic
for _ in range(n_clutter):
self.place_obj(minigrid.Wall(), max_tries=100)
self.compute_metrics()
return self.reset_agent()
class MiniAdversarialEnv(AdversarialEnv):
def __init__(self):
super().__init__(n_clutter=7, size=6, agent_view_size=5, max_steps=50)
class NoisyAdversarialEnv(AdversarialEnv):
def __init__(self):
super().__init__(goal_noise=0.3)
class MediumAdversarialEnv(AdversarialEnv):
def __init__(self):
super().__init__(n_clutter=30, size=10, agent_view_size=5, max_steps=200)
class GoalLastAdversarialEnv(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(choose_goal_last=True, fixed_environment=fixed_environment, seed=seed, max_steps=250)
class GoalLastAdversarialEnv30(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(choose_goal_last=True, n_clutter=30, fixed_environment=fixed_environment, seed=seed, max_steps=250)
class GoalLastAdversarialEnv60(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(choose_goal_last=True, n_clutter=60, fixed_environment=fixed_environment, seed=seed, max_steps=250)
class GoalLastOpaqueWallsAdversarialEnv(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(
choose_goal_last=True, see_through_walls=False,
fixed_environment=fixed_environment, seed=seed, max_steps=250)
class GoalLastFewerBlocksAdversarialEnv(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(
choose_goal_last=True, n_clutter=25,
fixed_environment=fixed_environment, seed=seed, max_steps=250)
class GoalLastFewerBlocksAdversarialEnv_WN(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(
choose_goal_last=True, n_clutter=25,
fixed_environment=fixed_environment, seed=seed, max_steps=250,
editor_actions='walls_none')
class GoalLastFewerBlocksAdversarialEnv_WNG(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(
choose_goal_last=True, n_clutter=25,
fixed_environment=fixed_environment, seed=seed, max_steps=250,
editor_actions='walls_none_goal')
class GoalLastVariableBlocksAdversarialEnv_WNG(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(
choose_goal_last=True, n_clutter=60, resample_n_clutter=True,
fixed_environment=fixed_environment, seed=seed, max_steps=250,
editor_actions='walls_none_goal')
class GoalLastVariableBlocksAdversarialEnv(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(
choose_goal_last=True, n_clutter=60, resample_n_clutter=True,
fixed_environment=fixed_environment, seed=seed, max_steps=250)
class GoalLastEmptyAdversarialEnv_WNG(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(
choose_goal_last=True, n_clutter=0,
fixed_environment=fixed_environment, seed=seed, max_steps=250,
editor_actions='walls_none_goal')
class GoalLastFewerBlocksOpaqueWallsAdversarialEnv(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(
choose_goal_last=True, n_clutter=25, see_through_walls=False,
fixed_environment=fixed_environment, seed=seed, max_steps=250)
class MiniGoalLastAdversarialEnv(AdversarialEnv):
def __init__(self, fixed_environment=False, seed=None):
super().__init__(n_clutter=7, size=6, agent_view_size=5, max_steps=50,
choose_goal_last=True, fixed_environment=fixed_environment, seed=seed)
class FixedAdversarialEnv(AdversarialEnv):
def __init__(self):
super().__init__(n_clutter=50, size=15, agent_view_size=5, max_steps=50, fixed_environment=True)
class EmptyMiniFixedAdversarialEnv(AdversarialEnv):
def __init__(self):
super().__init__(n_clutter=0, size=6, agent_view_size=5, max_steps=50, fixed_environment=True)
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
register.register(
env_id='MultiGrid-Adversarial-v0',
entry_point=module_path + ':AdversarialEnv',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-MiniAdversarial-v0',
entry_point=module_path + ':MiniAdversarialEnv',
max_episode_steps=50,
)
register.register(
env_id='MultiGrid-NoisyAdversarial-v0',
entry_point=module_path + ':NoisyAdversarialEnv',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-MediumAdversarial-v0',
entry_point=module_path + ':MediumAdversarialEnv',
max_episode_steps=200,
)
register.register(
env_id='MultiGrid-GoalLastAdversarial-v0',
entry_point=module_path + ':GoalLastAdversarialEnv',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-GoalLastOpaqueWallsAdversarial-v0',
entry_point=module_path + ':GoalLastOpaqueWallsAdversarialEnv',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-GoalLastFewerBlocksAdversarial-v0',
entry_point=module_path + ':GoalLastFewerBlocksAdversarialEnv',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-GoalLastFewerBlocksAdversarial-EditWN-v0',
entry_point=module_path + ':GoalLastFewerBlocksAdversarialEnv_WN',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-GoalLastFewerBlocksAdversarial-EditWNG-v0',
entry_point=module_path + ':GoalLastFewerBlocksAdversarialEnv_WNG',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-GoalLastVariableBlocksAdversarialEnv-v0',
entry_point=module_path + ':GoalLastVariableBlocksAdversarialEnv',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-GoalLastVariableBlocksAdversarialEnv-Edit-v0',
entry_point=module_path + ':GoalLastVariableBlocksAdversarialEnv_WNG',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-GoalLastEmptyAdversarialEnv-Edit-v0',
entry_point=module_path + ':GoalLastEmptyAdversarialEnv_WNG',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-GoalLastFewerBlocksOpaqueWallsAdversarial-v0',
entry_point=module_path + ':GoalLastFewerBlocksOpaqueWallsAdversarialEnv',
max_episode_steps=250,
)
register.register(
env_id='MultiGrid-MiniGoalLastAdversarial-v0',
entry_point=module_path + ':MiniGoalLastAdversarialEnv',
max_episode_steps=50,
)
register.register(
env_id='MultiGrid-FixedAdversarial-v0',
entry_point=module_path + ':FixedAdversarialEnv',
max_episode_steps=50,
)
register.register(
env_id='MultiGrid-EmptyMiniFixedAdversarial-v0',
entry_point=module_path + ':EmptyMiniFixedAdversarialEnv',
max_episode_steps=50,
)
register.register(
env_id='MultiGrid-GoalLastAdversarialEnv30-v0',
entry_point=module_path + ':GoalLastAdversarialEnv30',
max_episode_steps=50,
)
register.register(
env_id='MultiGrid-GoalLastAdversarialEnv60-v0',
entry_point=module_path + ':GoalLastAdversarialEnv60',
max_episode_steps=50,
)
|
dcd-main
|
envs/multigrid/adversarial.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .adversarial import *
|
dcd-main
|
envs/multigrid/__init__.py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym_minigrid.minigrid as minigrid
import numpy as np
from . import multigrid
# from . import register
import envs.registration as register
class MazeEnv(multigrid.MultiGridEnv):
"""Single-agent maze environment specified via a bit map."""
def __init__(self, agent_view_size=5, minigrid_mode=True, max_steps=None,
bit_map=None, start_pos=None, goal_pos=None, size=15):
default_agent_start_x = 7
default_agent_start_y = 1
default_goal_start_x = 7
default_goal_start_y = 13
self.start_pos = np.array(
[default_agent_start_x,
default_agent_start_y]) if start_pos is None else start_pos
self.goal_pos = (
default_goal_start_x,
default_goal_start_y) if goal_pos is None else goal_pos
if max_steps is None:
max_steps = 2*size*size
if bit_map is not None:
bit_map = np.array(bit_map)
if bit_map.shape != (size-2, size-2):
print('Error! Bit map shape does not match size. Using default maze.')
bit_map = None
if bit_map is None:
self.bit_map = np.array([
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1],
[0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0],
[1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0]
])
else:
self.bit_map = bit_map
super().__init__(
n_agents=1,
grid_size=size,
agent_view_size=agent_view_size,
max_steps=max_steps,
see_through_walls=True, # Set this to True for maximum speed
minigrid_mode=minigrid_mode
)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = multigrid.Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Goal
self.put_obj(minigrid.Goal(), self.goal_pos[0], self.goal_pos[1])
# Agent
self.place_agent_at_pos(0, self.start_pos)
# Walls
for x in range(self.bit_map.shape[0]):
for y in range(self.bit_map.shape[1]):
if self.bit_map[y, x]:
# Add an offset of 1 for the outer walls
self.put_obj(minigrid.Wall(), x+1, y+1)
class HorizontalMazeEnv(MazeEnv):
"""A short but non-optimal path is 80 moves."""
def __init__(self):
# positions go col, row
start_pos = np.array([1, 7])
goal_pos = np.array([13, 5])
bit_map = np.array([
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0],
[1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class Maze3Env(MazeEnv):
"""A short but non-optimal path is 80 moves."""
def __init__(self):
# positions go col, row and indexing starts at 1
start_pos = np.array([4, 1])
goal_pos = np.array([13, 7])
bit_map = np.array([
[0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class SmallCorridorEnv(MazeEnv):
"""A shorter backtracking env."""
def __init__(self):
# positions go col, row and indexing starts at 1
start_pos = np.array([1, 7])
row = np.random.choice([6,8])
col = np.random.choice([3,5,7,9,11])
goal_pos = np.array([col,row])
bit_map = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class LargeCorridorEnv(MazeEnv):
"""A long backtracking env."""
def __init__(self):
# positions go col, row and indexing starts at 1
start_pos = np.array([1, 10])
row = np.random.choice([9, 11])
col = np.random.choice([3,5,7,9,11,13,15,17])
goal_pos = np.array([col,row])
bit_map = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
super().__init__(size=21, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class LabyrinthEnv(MazeEnv):
"""A short but non-optimal path is 118 moves."""
def __init__(self):
# positions go col, row
start_pos = np.array([1, 13])
goal_pos = np.array([7, 7])
bit_map = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class Labyrinth2Env(MazeEnv):
"""A short but non-optimal path is 118 moves."""
def __init__(self):
# positions go col, row
start_pos = np.array([1, 1])
goal_pos = np.array([7, 7])
bit_map = np.array([
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class NineRoomsEnv(MazeEnv):
"""Can be completed in 27 moves."""
def __init__(self):
# positions go col, row
start_pos = np.array([2, 2])
goal_pos = np.array([12, 12])
bit_map = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class NineRoomsFewerDoorsEnv(MazeEnv):
"""Can be completed in 27 moves."""
def __init__(self):
# positions go col, row
start_pos = np.array([2, 2])
goal_pos = np.array([12, 12])
bit_map = np.array([
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class SixteenRoomsEnv(MazeEnv):
"""Can be completed in 16 moves."""
def __init__(self):
# positions go col, row
start_pos = np.array([2, 2])
goal_pos = np.array([12, 12])
bit_map = np.array([
[0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class SixteenRoomsFewerDoorsEnv(MazeEnv):
"""Can be completed in 16 moves."""
def __init__(self):
# positions go col, row
start_pos = np.array([2, 2])
goal_pos = np.array([12, 12])
bit_map = np.array([
[0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0]
])
super().__init__(size=15, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class MiniMazeEnv(MazeEnv):
"""A smaller maze for debugging."""
def __init__(self):
start_pos = np.array([1, 1])
goal_pos = np.array([1, 3])
bit_map = np.array([
[0, 0, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
])
super().__init__(size=6, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
class MediumMazeEnv(MazeEnv):
"""A 10x10 Maze environment."""
def __init__(self):
start_pos = np.array([5, 1])
goal_pos = np.array([3, 8])
bit_map = np.array([
[0, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
])
super().__init__(size=10, bit_map=bit_map, start_pos=start_pos,
goal_pos=goal_pos)
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
register.register(
id='MultiGrid-Maze-v0',
entry_point=module_path + ':MazeEnv'
)
register.register(
id='MultiGrid-MiniMaze-v0',
entry_point=module_path + ':MiniMazeEnv'
)
register.register(
id='MultiGrid-MediumMaze-v0',
entry_point=module_path + ':MediumMazeEnv'
)
register.register(
id='MultiGrid-Maze2-v0',
entry_point=module_path + ':HorizontalMazeEnv'
)
register.register(
id='MultiGrid-Maze3-v0',
entry_point=module_path + ':Maze3Env'
)
register.register(
id='MultiGrid-SmallCorridor-v0',
entry_point=module_path + ':SmallCorridorEnv'
)
register.register(
id='MultiGrid-LargeCorridor-v0',
entry_point=module_path + ':LargeCorridorEnv'
)
register.register(
id='MultiGrid-Labyrinth-v0',
entry_point=module_path + ':LabyrinthEnv'
)
register.register(
id='MultiGrid-Labyrinth2-v0',
entry_point=module_path + ':Labyrinth2Env'
)
register.register(
id='MultiGrid-SixteenRooms-v0',
entry_point=module_path + ':SixteenRoomsEnv'
)
register.register(
id='MultiGrid-SixteenRoomsFewerDoors-v0',
entry_point=module_path + ':SixteenRoomsFewerDoorsEnv'
)
register.register(
id='MultiGrid-NineRooms-v0',
entry_point=module_path + ':NineRoomsEnv'
)
register.register(
id='MultiGrid-NineRoomsFewerDoors-v0',
entry_point=module_path + ':NineRoomsFewerDoorsEnv'
)
|
dcd-main
|
envs/multigrid/maze.py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the multi-agent version of the Grid and MultiGridEnv classes.
Note that at each step, the environment expects an array of actions equal to the
number of agents with which the class was initialized. Similarly, it will return
an array of observations, and an array of rewards.
In the competitive version, as soon as one agent finds the goal, the game is
over.
In the non-competitive case, all episodes have a fixed length based on the
maximum number of steps. To avoid issues with some agents finishing early and
therefore requiring support for non-scalar step types, if an agent finishes
before the step deadlie it will be respawned in a new location. To make the
single-agent case comparable to this design, it should also run for a fixed
number of steps and allow the agent to find the goal as many times as possible
within this step budget.
Unlike Minigrid, Multigrid does not include the string text of the 'mission'
with each observation.
"""
import math
import gym
import gym_minigrid.minigrid as minigrid
import gym_minigrid.rendering as rendering
import numpy as np
from . import window
# Map of color names to RGB values
AGENT_COLOURS = [
np.array([60, 182, 234]), # Blue
np.array([229, 52, 52]), # Red
np.array([144, 32, 249]), # Purple
np.array([69, 196, 60]), # Green
np.array([252, 227, 35]), # Yellow
]
class WorldObj(minigrid.WorldObj):
"""Override MiniGrid base class to deal with Agent objects."""
def __init__(self, obj_type, color=None):
assert obj_type in minigrid.OBJECT_TO_IDX, obj_type
self.type = obj_type
if color:
assert color in minigrid.COLOR_TO_IDX, color
self.color = color
self.contains = None
# Initial position of the object
self.init_pos = None
# Current position of the object
self.cur_pos = None
@staticmethod
def decode(type_idx, color_idx, state):
"""Create an object from a 3-tuple state description."""
obj_type = minigrid.IDX_TO_OBJECT[type_idx]
if obj_type != 'agent':
color = minigrid.IDX_TO_COLOR[color_idx]
if obj_type == 'empty' or obj_type == 'unseen':
return None
if obj_type == 'wall':
v = minigrid.Wall(color)
elif obj_type == 'floor':
v = minigrid.Floor(color)
elif obj_type == 'ball':
v = minigrid.Ball(color)
elif obj_type == 'key':
v = minigrid.Key(color)
elif obj_type == 'box':
v = minigrid.Box(color)
elif obj_type == 'door':
# State, 0: open, 1: closed, 2: locked
is_open = state == 0
is_locked = state == 2
v = Door(color, is_open, is_locked)
elif obj_type == 'goal':
v = minigrid.Goal()
elif obj_type == 'lava':
v = minigrid.Lava()
elif obj_type == 'agent':
v = Agent(color_idx, state)
else:
assert False, "unknown object type in decode '%s'" % obj_type
return v
class Door(minigrid.Door):
"""Extends minigrid Door class to multiple agents possibly carrying keys."""
def toggle(self, env, pos, carrying):
# If the player has the right key to open the door
if self.is_locked:
if isinstance(carrying, minigrid.Key) and carrying.color == self.color:
self.is_locked = False
self.is_open = True
return True
return False
self.is_open = not self.is_open
return True
class Agent(WorldObj):
"""Class to represent other agents existing in the world."""
def __init__(self, agent_id, state):
super(Agent, self).__init__('agent')
self.agent_id = agent_id
self.dir = state
def can_contain(self):
"""Can this contain another object?"""
return True
def encode(self):
"""Encode the a description of this object as a 3-tuple of integers."""
return (minigrid.OBJECT_TO_IDX[self.type], self.agent_id, self.dir)
def render(self, img):
tri_fn = rendering.point_in_triangle(
(0.12, 0.19),
(0.87, 0.50),
(0.12, 0.81),
)
# Rotate the agent based on its direction
tri_fn = rendering.rotate_fn(
tri_fn, cx=0.5, cy=0.5, theta=0.5 * math.pi * self.dir)
color = AGENT_COLOURS[self.agent_id]
rendering.fill_coords(img, tri_fn, color)
class Grid(minigrid.Grid):
"""Extends Grid class, overrides some functions to cope with multi-agent case."""
@classmethod
def render_tile(cls,
obj,
highlight=None,
tile_size=minigrid.TILE_PIXELS,
subdivs=3,
cell_type=None):
"""Render a tile and cache the result."""
# Hash map lookup key for the cache
if isinstance(highlight, list):
key = (tuple(highlight), tile_size)
else:
key = (highlight, tile_size)
key = obj.encode() + key if obj else key
if key in cls.tile_cache:
return cls.tile_cache[key]
img = np.zeros(
shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8)
# Draw the grid lines (top and left edges)
rendering.fill_coords(img, rendering.point_in_rect(0, 0.031, 0, 1),
(100, 100, 100))
rendering.fill_coords(img, rendering.point_in_rect(0, 1, 0, 0.031),
(100, 100, 100))
if obj is not None and obj.type != 'agent':
obj.render(img)
# Highlight the cell if needed (do not highlight walls)
if highlight and not (cell_type is not None and cell_type == 'wall'):
if isinstance(highlight, list):
for a, agent_highlight in enumerate(highlight):
if agent_highlight:
rendering.highlight_img(img, color=AGENT_COLOURS[a])
else:
# Default highlighting for agent's partially observed views
rendering.highlight_img(img)
# Render agents after highlight to avoid highlighting agent triangle (the
# combination of colours makes it difficult to ID agent)
if obj is not None and obj.type == 'agent':
obj.render(img)
# Downsample the image to perform supersampling/anti-aliasing
img = rendering.downsample(img, subdivs)
# Cache the rendered tile
cls.tile_cache[key] = img
return img
def render(self,
tile_size,
highlight_mask=None):
"""Render this grid at a given scale.
Args:
tile_size: Tile size in pixels.
highlight_mask: An array of binary masks, showing which part of the grid
should be highlighted for each agent. Can also be used in partial
observation for single agent, which must be handled differently.
Returns:
An image of the rendered Grid.
"""
if highlight_mask is None:
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# Compute the total grid size
width_px = self.width * tile_size
height_px = self.height * tile_size
img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)
# Render the grid
for y in range(0, self.height):
for x in range(0, self.width):
cell = self.get(x, y)
cell_type = cell.type if cell else None
if isinstance(highlight_mask, list):
# Figure out highlighting for each agent
n_agents = len(highlight_mask)
highlights = [highlight_mask[a][x, y] for a in range(n_agents)]
else:
highlights = highlight_mask[x, y]
tile_img = Grid.render_tile(
cell,
highlight=highlights,
tile_size=tile_size,
cell_type=cell_type,
)
ymin = y * tile_size
ymax = (y + 1) * tile_size
xmin = x * tile_size
xmax = (x + 1) * tile_size
img[ymin:ymax, xmin:xmax, :] = tile_img
return img
def set_encoding(self, encoding, multigrid_env=None):
assert tuple(encoding.shape[:2]) == (self.height, self.width)
for i in range(self.height):
for j in range(self.width):
v = WorldObj.decode(*encoding[i,j,:])
if isinstance(v, Agent):
v.agent_id = 0
if multigrid_env:
multigrid_env.agent_start_pos = np.array((i,j), dtype=np.int64)
elif isinstance(v, minigrid.Goal):
if multigrid_env:
multigrid_env.goal_pos = np.array((i,j), dtype=np.int64)
self.set(i, j, v)
@staticmethod
def decode(array):
"""Decode an array grid encoding back into a grid."""
width, height, channels = array.shape
assert channels == 3
vis_mask = np.ones(shape=(width, height), dtype=np.bool)
grid = Grid(width, height)
for i in range(width):
for j in range(height):
type_idx, color_idx, state = array[i, j]
v = WorldObj.decode(type_idx, color_idx, state)
grid.set(i, j, v)
vis_mask[i, j] = (type_idx != minigrid.OBJECT_TO_IDX['unseen'])
return grid, vis_mask
def rotate_left(self):
"""Rotate the grid counter-clockwise, including agents within it."""
grid = Grid(self.height, self.width)
for i in range(self.width):
for j in range(self.height):
v = self.get(i, j)
# Directions are relative to the agent so must be modified
if v is not None and v.type == 'agent':
# Make a new agent so original grid isn't modified
v = Agent(v.agent_id, v.dir)
v.dir -= 1
if v.dir < 0:
v.dir += 4
grid.set(j, grid.height - 1 - i, v)
return grid
def slice(self, top_x, top_y, width, height, agent_pos=None):
"""Get a subset of the grid for agents' partial observations."""
grid = Grid(width, height)
for j in range(0, height):
for i in range(0, width):
x = top_x + i
y = top_y + j
if x >= 0 and x < self.width and \
y >= 0 and y < self.height:
v = self.get(x, y)
else:
v = minigrid.Wall()
grid.set(i, j, v)
return grid
class MultiGridEnv(minigrid.MiniGridEnv):
"""2D grid world game environment with multi-agent support."""
def __init__(
self,
grid_size=None,
width=None,
height=None,
max_steps=100,
see_through_walls=False,
seed=52,
agent_view_size=7,
n_agents=3,
competitive=False,
fixed_environment=False,
minigrid_mode=False
):
"""Constructor for multi-agent gridworld environment generator.
Args:
grid_size: Number of tiles for the width and height of the square grid.
width: Number of tiles across grid width.
height: Number of tiles in height of grid.
max_steps: Number of environment steps before the episode end (max
episode length).
see_through_walls: True if agents can see through walls.
seed: Random seed used in generating environments.
agent_view_size: Number of tiles in the agent's square, partially
observed view of the world.
n_agents: The number of agents playing in the world.
competitive: If True, as soon as one agent locates the goal, the episode
ends for all agents. If False, if one agent locates the goal it is
respawned somewhere else in the grid, and the episode continues until
max_steps is reached.
fixed_environment: If True, will use the same random seed each time the
environment is generated, so it will remain constant / be the same
environment each time.
minigrid_mode: Set to True to maintain backwards compatibility with
minigrid in the single agent case.
"""
# Can't set both grid_size and width/height
if grid_size:
assert width is None and height is None
width = grid_size
height = grid_size
# Set the number of agents
self.n_agents = n_agents
# If competitive, only one agent is allowed to reach the goal.
self.competitive = competitive
if self.n_agents == 1:
self.competitive = True
# Action enumeration for this environment
self.actions = MultiGridEnv.Actions
# Number of cells (width and height) in the agent view
self.agent_view_size = agent_view_size
# Range of possible rewards
self.reward_range = (0, 1)
# Compute observation and action spaces
# Direction always has an extra dimension for tf-agents compatibility
self.direction_obs_space = gym.spaces.Box(
low=0, high=3, shape=(self.n_agents,), dtype='uint8')
# Maintain for backwards compatibility with minigrid.
self.minigrid_mode = minigrid_mode
if self.minigrid_mode:
msg = 'Backwards compatibility with minigrid only possible with 1 agent'
assert self.n_agents == 1, msg
# Single agent case
# Actions are discrete integer values
self.action_space = gym.spaces.Discrete(len(self.actions))
# Images have three dimensions
self.image_obs_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, 3),
dtype='uint8')
else:
# First dimension of all observations is the agent ID
self.action_space = gym.spaces.Box(low=0, high=len(self.actions)-1,
shape=(self.n_agents,), dtype='int64')
self.image_obs_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.n_agents, self.agent_view_size, self.agent_view_size, 3),
dtype='uint8')
# Observations are dictionaries containing an encoding of the grid and the
# agent's direction
self.observation_space = gym.spaces.Dict(
{'image': self.image_obs_space,
'direction': self.direction_obs_space})
# Window to use for human rendering mode
self.window = None
# Environment configuration
self.width = width
self.height = height
self.max_steps = max_steps
self.see_through_walls = see_through_walls
# Current position and direction of the agent
self.agent_pos = [None] * self.n_agents
self.agent_dir = [None] * self.n_agents
# Maintain a done variable for each agent
self.done = [False] * self.n_agents
# Initialize the RNG
self.seed(seed=seed)
self.fixed_environment = fixed_environment
# Initialize the state
self.reset()
def seed(self, seed):
super().seed(seed=seed)
self.seed_value = seed
return [seed]
def reset(self):
if self.fixed_environment:
self.seed(self.seed_value)
# Current position and direction of the agent
self.agent_pos = [None] * self.n_agents
self.agent_dir = [None] * self.n_agents
self.done = [False] * self.n_agents
# Generate the grid. Will be random by default, or same environment if
# 'fixed_environment' is True.
self._gen_grid(self.width, self.height)
# These fields should be defined by _gen_grid
for a in range(self.n_agents):
assert self.agent_pos[a] is not None
assert self.agent_dir[a] is not None
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos[a])
assert (start_cell.type == 'agent' or
start_cell is None or start_cell.can_overlap())
# Item picked up, being carried, initially nothing
self.carrying = [None] * self.n_agents
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def __str__(self):
"""Produce a pretty string of the environment's grid along with the agent.
A grid cell is represented by 2-character string, the first one for
the object and the second one for the color.
Returns:
String representation of the grid.
"""
# Map of object types to short string
obj_to_str = {
'wall': 'W',
'floor': 'F',
'door': 'D',
'key': 'K',
'ball': 'A',
'box': 'B',
'goal': 'G',
'lava': 'V',
}
# Map agent's direction to short string
agent_dir_to_str = {0: '>', 1: 'V', 2: '<', 3: '^'}
text = ''
for j in range(self.grid.height):
for i in range(self.grid.width):
# Draw agents
agent_here = False
for a in range(self.n_agents):
if self.agent_pos[a] is not None and (i == self.agent_pos[a][0] and
j == self.agent_pos[a][1]):
text += str(a) + agent_dir_to_str[self.agent_dir[a]]
agent_here = True
if agent_here:
continue
c = self.grid.get(i, j)
if c is None:
text += ' '
continue
if c.type == 'door':
if c.is_open:
text += '__'
elif c.is_locked:
text += 'L' + c.color[0].upper()
else:
text += 'D' + c.color[0].upper()
continue
text += obj_to_str[c.type] + c.color[0].upper()
if j < self.grid.height - 1:
text += '\n'
return text
def place_obj(self,
obj,
top=None,
size=None,
reject_fn=None,
max_tries=math.inf):
"""Place an object at an empty position in the grid.
Args:
obj: Instance of Minigrid WorldObj class (such as Door, Key, etc.).
top: (x,y) position of the top-left corner of rectangle where to place.
size: Size of the rectangle where to place.
reject_fn: Function to filter out potential positions.
max_tries: Throw an error if a position can't be found after this many
tries.
Returns:
Position where object was placed.
"""
if top is None:
top = (0, 0)
else:
top = (max(top[0], 0), max(top[1], 0))
if size is None:
size = (self.grid.width, self.grid.height)
num_tries = 0
while True:
# This is to handle with rare cases where rejection sampling
# gets stuck in an infinite loop
if num_tries > max_tries:
raise gym.error.RetriesExceededError(
'Rejection sampling failed in place_obj')
num_tries += 1
pos = np.array((self._rand_int(top[0],
min(top[0] + size[0], self.grid.width)),
self._rand_int(top[1],
min(top[1] + size[1], self.grid.height))))
# Don't place the object on top of another object
if self.grid.get(*pos) is not None:
continue
# Don't place the object where the agent is
pos_no_good = False
for a in range(self.n_agents):
if np.array_equal(pos, self.agent_pos[a]):
pos_no_good = True
if pos_no_good:
continue
# Check if there is a filtering criterion
if reject_fn and reject_fn(self, pos):
continue
break
self.grid.set(pos[0], pos[1], obj)
if obj is not None:
obj.init_pos = pos
obj.cur_pos = pos
return pos
def place_agent(self, top=None, size=None, rand_dir=True, max_tries=math.inf):
"""Set the starting point of all agents in the world.
Name chosen for backwards compatibility.
Args:
top: (x,y) position of the top-left corner of rectangle where agents can
be placed.
size: Size of the rectangle where to place.
rand_dir: Choose a random direction for agents.
max_tries: Throw an error if a position can't be found after this many
tries.
"""
for a in range(self.n_agents):
self.place_one_agent(
a, top=top, size=size, rand_dir=rand_dir, max_tries=math.inf)
def place_one_agent(self,
agent_id,
top=None,
size=None,
rand_dir=True,
max_tries=math.inf,
agent_obj=None):
"""Set the agent's starting point at an empty position in the grid."""
self.agent_pos[agent_id] = None
pos = self.place_obj(None, top, size, max_tries=max_tries)
self.place_agent_at_pos(agent_id, pos, agent_obj=agent_obj,
rand_dir=rand_dir)
return pos
def place_agent_at_pos(self, agent_id, pos, agent_obj=None, rand_dir=True):
self.agent_pos[agent_id] = pos
if rand_dir:
# self.agent_dir[agent_id] = self._rand_int(0, 4)
self.agent_dir[agent_id] = 0
# Place the agent object into the grid
if not agent_obj:
agent_obj = Agent(agent_id, self.agent_dir[agent_id])
agent_obj.init_pos = pos
else:
agent_obj.dir = self.agent_dir[agent_id]
agent_obj.cur_pos = pos
self.grid.set(pos[0], pos[1], agent_obj)
@property
def dir_vec(self):
"""Get the direction vector for the agent (points toward forward movement).
Returns:
An array of directions that each agent is facing.
"""
for a in range(self.n_agents):
assert self.agent_dir[a] >= 0 and self.agent_dir[a] < 4
return [
minigrid.DIR_TO_VEC[self.agent_dir[a]] for a in range(self.n_agents)
]
@property
def right_vec(self):
"""Get the vector pointing to the right of the agents."""
return [np.array((-dy, dx)) for (dx, dy) in self.dir_vec]
@property
def front_pos(self):
"""Get the position of the cell that is right in front of the agent."""
front_pos = [None] * self.n_agents
for a in range(self.n_agents):
assert self.agent_pos[a] is not None and self.dir_vec[a] is not None
front_pos[a] = self.agent_pos[a] + self.dir_vec[a]
return front_pos
def get_view_coords(self, i, j, agent_id):
"""Convert grid coordinates into agent's partially observed view.
Translate and rotate absolute grid coordinates (i, j) into the agent's
partially observable view (sub-grid).
Note that the resulting coordinates may be negative or outside of the
agent's view size.
Args:
i: Integer x coordinate.
j: Integer y coordinate.
agent_id: ID of the agent.
Returns:
Agent-centric coordinates.
"""
ax, ay = self.agent_pos[agent_id]
dx, dy = self.dir_vec[agent_id]
rx, ry = self.right_vec[agent_id]
# Compute the absolute coordinates of the top-left view corner
sz = self.agent_view_size
hs = self.agent_view_size // 2
tx = ax + (dx * (sz - 1)) - (rx * hs)
ty = ay + (dy * (sz - 1)) - (ry * hs)
lx = i - tx
ly = j - ty
# Project the coordinates of the object relative to the top-left
# corner onto the agent's own coordinate system
vx = (rx * lx + ry * ly)
vy = -(dx * lx + dy * ly)
return vx, vy
def get_view_exts(self, agent_id):
"""Get the extents of the square set of tiles visible to the agent.
Note: the bottom extent indices are not included in the set
Args:
agent_id: Integer ID of the agent.
Returns:
Top left and bottom right (x,y) coordinates of set of visible tiles.
"""
# Facing right
if self.agent_dir[agent_id] == 0:
top_x = self.agent_pos[agent_id][0]
top_y = self.agent_pos[agent_id][1] - self.agent_view_size // 2
# Facing down
elif self.agent_dir[agent_id] == 1:
top_x = self.agent_pos[agent_id][0] - self.agent_view_size // 2
top_y = self.agent_pos[agent_id][1]
# Facing left
elif self.agent_dir[agent_id] == 2:
top_x = self.agent_pos[agent_id][0] - self.agent_view_size + 1
top_y = self.agent_pos[agent_id][1] - self.agent_view_size // 2
# Facing up
elif self.agent_dir[agent_id] == 3:
top_x = self.agent_pos[agent_id][0] - self.agent_view_size // 2
top_y = self.agent_pos[agent_id][1] - self.agent_view_size + 1
else:
assert False, 'invalid agent direction'
bot_x = top_x + self.agent_view_size
bot_y = top_y + self.agent_view_size
return (top_x, top_y, bot_x, bot_y)
def relative_coords(self, x, y, agent_id):
"""Check if a grid position belongs to the agent's field of view.
Args:
x: Integer x coordinate.
y: Integer y coordinate.
agent_id: ID of the agent.
Returns:
The corresponding agent-centric coordinates of the grid position.
"""
vx, vy = self.get_view_coords(x, y, agent_id)
if (vx < 0 or vy < 0 or vx >= self.agent_view_size or
vy >= self.agent_view_size):
return None
return vx, vy
def in_view(self, x, y, agent_id):
"""Check if a grid position is visible to the agent."""
return self.relative_coords(x, y, agent_id) is not None
def agent_sees(self, x, y, agent_id):
"""Check if a non-empty grid position is visible to the agent."""
coordinates = self.relative_coords(x, y, agent_id)
if coordinates is None:
return False
vx, vy = coordinates
obs = self.gen_obs()
obs_grid, _ = Grid.decode(obs['image'][agent_id])
obs_cell = obs_grid.get(vx, vy)
world_cell = self.grid.get(x, y)
return obs_cell is not None and obs_cell.type == world_cell.type
def agent_is_done(self, agent_id):
# Remove correspnding agent object from the grid
pos = self.agent_pos[agent_id]
agent_obj = self.grid.get(pos[0], pos[1])
self.grid.set(pos[0], pos[1], None)
self.done[agent_id] = True
# If an agent finishes the level while carrying an object, it is randomly
# respawned in a new position. Warning: this may break dependencies for the
# level (e.g. if a key is spawned on the wrong side of a door).
# TODO(natashajaques): environments can define respawn behavior
if self.carrying[agent_id]:
self.place_obj(obj=self.carrying[agent_id])
self.carrying[agent_id] = None
# Respawn agent in new location
self.place_one_agent(agent_id, agent_obj=agent_obj)
def move_agent(self, agent_id, new_pos):
# Retrieve agent object
old_pos = self.agent_pos[agent_id]
agent_obj = self.grid.get(old_pos[0], old_pos[1])
assert agent_obj.agent_id == agent_id
assert (agent_obj.cur_pos == old_pos).all()
# Update the agent position in grid and environment
self.grid.set(old_pos[0], old_pos[1], None)
self.agent_pos[agent_id] = new_pos
agent_obj.cur_pos = new_pos
self.grid.set(new_pos[0], new_pos[1], agent_obj)
assert (self.grid.get(
new_pos[0], new_pos[1]).cur_pos == self.agent_pos[agent_id]).all()
def rotate_agent(self, agent_id):
# Retrieve agent object
pos = self.agent_pos[agent_id]
agent_obj = self.grid.get(pos[0], pos[1])
assert agent_obj.agent_id == agent_id
# Update the dir
agent_obj.dir = self.agent_dir[agent_id]
self.grid.set(pos[0], pos[1], agent_obj)
assert self.grid.get(pos[0], pos[1]).dir == self.agent_dir[agent_id]
def step_one_agent(self, action, agent_id):
reward = 0
# Get the position in front of the agent
fwd_pos = self.front_pos[agent_id]
# Get the contents of the cell in front of the agent
fwd_cell = self.grid.get(*fwd_pos)
# Rotate left
if action == self.actions.left:
self.agent_dir[agent_id] -= 1
if self.agent_dir[agent_id] < 0:
self.agent_dir[agent_id] += 4
self.rotate_agent(agent_id)
# Rotate right
elif action == self.actions.right:
self.agent_dir[agent_id] = (self.agent_dir[agent_id] + 1) % 4
self.rotate_agent(agent_id)
# Move forward
elif action == self.actions.forward:
# Make sure agents can't walk into each other
agent_blocking = False
for a in range(self.n_agents):
if a != agent_id and np.array_equal(self.agent_pos[a], fwd_pos):
agent_blocking = True
# Deal with object interactions
if not agent_blocking:
if fwd_cell is not None and fwd_cell.type == 'goal':
self.agent_is_done(agent_id)
reward = self._reward()
elif fwd_cell is not None and fwd_cell.type == 'lava':
self.agent_is_done(agent_id)
elif fwd_cell is None or fwd_cell.can_overlap():
self.move_agent(agent_id, fwd_pos)
# Pick up an object
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying[agent_id] is None:
self.carrying[agent_id] = fwd_cell
self.carrying[agent_id].cur_pos = np.array([-1, -1])
self.grid.set(fwd_pos[0], fwd_pos[1], None)
a_pos = self.agent_pos[agent_id]
agent_obj = self.grid.get(a_pos[0], a_pos[1])
agent_obj.contains = fwd_cell
# Drop an object
elif action == self.actions.drop:
if not fwd_cell and self.carrying[agent_id]:
self.grid.set(fwd_pos[0], fwd_pos[1], self.carrying[agent_id])
self.carrying[agent_id].cur_pos = fwd_pos
self.carrying[agent_id] = None
a_pos = self.agent_pos[agent_id]
agent_obj = self.grid.get(a_pos[0], a_pos[1])
agent_obj.contains = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cell:
if fwd_cell.type == 'door':
fwd_cell.toggle(self, fwd_pos, self.carrying[agent_id])
else:
fwd_cell.toggle(self, fwd_pos)
# Done action -- by default acts as no-op.
elif action == self.actions.done:
pass
else:
assert False, 'unknown action'
return reward
def step(self, actions):
# Maintain backwards compatibility with MiniGrid when there is one agent
if not isinstance(actions, list) and self.n_agents == 1:
actions = [actions]
self.step_count += 1
rewards = [0] * self.n_agents
# Randomize order in which agents act for fairness
agent_ordering = np.arange(self.n_agents)
np.random.shuffle(agent_ordering)
# Step each agent
for a in agent_ordering:
rewards[a] = self.step_one_agent(actions[a], a)
obs = self.gen_obs()
# Backwards compatibility
if self.minigrid_mode:
rewards = rewards[0]
collective_done = False
# In competitive version, if one agent finishes the episode is over.
if self.competitive:
collective_done = np.sum(self.done) >= 1
# Running out of time applies to all agents
if self.step_count >= self.max_steps:
collective_done = True
return obs, rewards, collective_done, {}
def gen_obs_grid(self, agent_id):
"""Generate the sub-grid observed by the agent.
This method also outputs a visibility mask telling us which grid cells
the agent can actually see.
Args:
agent_id: Integer ID of the agent for which to generate the grid.
Returns:
Sub-grid and visibility mask.
"""
top_x, top_y, _, _ = self.get_view_exts(agent_id)
grid = self.grid.slice(top_x, top_y, self.agent_view_size,
self.agent_view_size)
for _ in range(self.agent_dir[agent_id] + 1):
grid = grid.rotate_left()
# Process occluders and visibility
# Note that this incurs some performance cost
if not self.see_through_walls:
vis_mask = grid.process_vis(
agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1))
else:
vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)
# Make it so the agent sees what it's carrying
# We do this by placing the carried object at the agent's position
# in the agent's partially observable view
agent_pos = grid.width // 2, grid.height - 1
if self.carrying[agent_id]:
grid.set(agent_pos[0], agent_pos[1], self.carrying[agent_id])
else:
grid.set(agent_pos[0], agent_pos[1], None)
return grid, vis_mask
def gen_obs(self):
"""Generate the stacked observation for all agents."""
images = []
dirs = []
for a in range(self.n_agents):
image, direction = self.gen_agent_obs(a)
images.append(image)
dirs.append(direction)
# Backwards compatibility: if there is a single agent do not return an array
if self.minigrid_mode:
images = images[0]
# Observations are dictionaries containing:
# - an image (partially observable view of the environment)
# - the agent's direction/orientation (acting as a compass)
# Note direction has shape (1,) for tfagents compatibility
obs = {
'image': images,
'direction': dirs
}
return obs
def gen_agent_obs(self, agent_id):
"""Generate the agent's view (partially observed, low-resolution encoding).
Args:
agent_id: ID of the agent for which to generate the observation.
Returns:
3-dimensional partially observed agent-centric view, and int direction
"""
grid, vis_mask = self.gen_obs_grid(agent_id)
# Encode the partially observable view into a numpy array
image = grid.encode(vis_mask)
return image, self.agent_dir[agent_id]
def get_obs_render(self, obs, tile_size=minigrid.TILE_PIXELS // 2):
"""Render an agent observation for visualization."""
grid, vis_mask = Grid.decode(obs)
# Render the whole grid
img = grid.render(
tile_size,
# agent_pos=self.agent_pos,
# agent_dir=self.agent_dir,
highlight_mask=vis_mask)
return img
def compute_agent_visibility_mask(self, agent_id):
# Mask of which cells to highlight
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# Compute which cells are visible to the agent
_, vis_mask = self.gen_obs_grid(agent_id)
# Compute the world coordinates of the bottom-left corner
# of the agent's view area
f_vec = self.dir_vec[agent_id]
r_vec = self.right_vec[agent_id]
top_left = self.agent_pos[agent_id] + f_vec * (self.agent_view_size-1) - \
r_vec * (self.agent_view_size // 2)
# For each cell in the visibility mask
for vis_j in range(0, self.agent_view_size):
for vis_i in range(0, self.agent_view_size):
# If this cell is not visible, don't highlight it
if not vis_mask[vis_i, vis_j]:
continue
# Compute the world coordinates of this cell
abs_i, abs_j = top_left - (f_vec * vis_j) + (r_vec * vis_i)
if abs_i < 0 or abs_i >= self.width:
continue
if abs_j < 0 or abs_j >= self.height:
continue
# Mark this cell to be highlighted
highlight_mask[abs_i, abs_j] = True
return highlight_mask
def render(self,
mode='human',
close=False,
highlight=True,
tile_size=minigrid.TILE_PIXELS):
"""Render the whole-grid human view."""
if close:
if self.window:
self.window.close()
return None
if mode == 'human' and not self.window:
self.window = window.Window('gym_minigrid')
self.window.show(block=False)
if highlight:
highlight_mask = []
for a in range(self.n_agents):
if self.agent_pos[a] is not None:
highlight_mask.append(self.compute_agent_visibility_mask(a))
else:
highlight_mask = None
# Render the whole grid
img = self.grid.render(tile_size, highlight_mask=highlight_mask)
if mode == 'human':
self.window.show_img(img)
if hasattr(self, 'mission'):
self.window.set_caption(self.mission)
if mode == 'human':
self.window.show()
return img
|
dcd-main
|
envs/multigrid/multigrid.py
|
# Copyright (c) 2019 Maxime Chevalier-Boisvert.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
from gym_minigrid.minigrid import *
from envs.registration import register as register
class FourRoomsEnv(MiniGridEnv):
"""
Classic 4 rooms gridworld environment.
Can specify agent and goal position, if not it set at random.
"""
def __init__(self, agent_pos=None, goal_pos=None):
self._agent_default_pos = agent_pos
self._goal_default_pos = goal_pos
super().__init__(
grid_size=19,
max_steps=100,
agent_view_size=5)
direction_obs_space = gym.spaces.Box(
low=0, high=3, shape=(1,), dtype='uint8')
self.observation_space = spaces.Dict({
'image': self.observation_space['image'],
'direction': direction_obs_space
})
def _gen_grid(self, width, height):
# Create the grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(0, 0)
self.grid.horz_wall(0, height - 1)
self.grid.vert_wall(0, 0)
self.grid.vert_wall(width - 1, 0)
room_w = width // 2
room_h = height // 2
# For each row of rooms
for j in range(0, 2):
# For each column
for i in range(0, 2):
xL = i * room_w
yT = j * room_h
xR = xL + room_w
yB = yT + room_h
# Bottom wall and door
if i + 1 < 2:
self.grid.vert_wall(xR, yT, room_h)
pos = (xR, self._rand_int(yT + 1, yB))
self.grid.set(*pos, None)
# Bottom wall and door
if j + 1 < 2:
self.grid.horz_wall(xL, yB, room_w)
pos = (self._rand_int(xL + 1, xR), yB)
self.grid.set(*pos, None)
# Randomize the player start position and orientation
if self._agent_default_pos is not None:
self.agent_pos = self._agent_default_pos
self.grid.set(*self._agent_default_pos, None)
self.agent_dir = self._rand_int(0, 4) # assuming random start direction
else:
self.place_agent()
if self._goal_default_pos is not None:
goal = Goal()
self.put_obj(goal, *self._goal_default_pos)
goal.init_pos, goal.cur_pos = self._goal_default_pos
else:
self.place_obj(Goal())
self.mission = 'Reach the goal'
def step(self, action):
obs, rewards, done, info = super().step(action)
del obs['mission']
obs['image'] = obs['image']
obs['direction'] = [self.agent_dir]
return obs, rewards, done, info
def reset(self):
obs = super().reset()
del obs['mission']
obs['image'] = obs['image']
obs['direction'] = [self.agent_dir]
return obs
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
register(
id='MiniGrid-FourRooms-v0',
entry_point=module_path+':FourRoomsEnv'
)
|
dcd-main
|
envs/multigrid/fourrooms.py
|
# Copyright (c) 2019 Maxime Chevalier-Boisvert.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#!/usr/bin/env python3
import time
import argparse
import numpy as np
import gym
import gym_minigrid
from gym_minigrid.wrappers import *
from . import window as multigrid_window
from .maze import *
from .mst_maze import *
from .crossing import *
from envs.registration import make as gym_make
def redraw(img):
if not args.agent_view:
img = env.render('rgb_array', tile_size=args.tile_size)
window.show_img(img)
def reset():
if args.seed != -1:
env.seed(args.seed)
obs = env.reset()
if hasattr(env, 'mission'):
print('Mission: %s' % env.mission)
window.set_caption(env.mission)
redraw(obs)
def step(action):
print('taking action', action)
obs, reward, done, info = env.step(action)
# print('step=%s, reward=%.2f' % (env.step_count, reward))
if done or action == env.actions.done:
print('done!')
reset()
else:
redraw(obs)
def key_handler(event):
print('pressed', event.key)
if event.key == 'escape':
window.close()
return
if event.key == 'backspace':
reset()
return
if event.key == 'left':
step(env.actions.left)
return
if event.key == 'right':
step(env.actions.right)
return
if event.key == 'up':
step(env.actions.forward)
return
# Spacebar
if event.key == ' ':
step(env.actions.toggle)
return
if event.key == 'pageup':
step(env.actions.pickup)
return
if event.key == 'pagedown':
step(env.actions.drop)
return
if event.key == 'enter':
step(env.actions.done)
return
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
help="gym environment to load",
default='MultiGrid-MultiRoom-N4-S5-v0'
)
parser.add_argument(
"--seed",
type=int,
help="random seed to generate the environment with",
default=-1
)
parser.add_argument(
"--tile_size",
type=int,
help="size at which to render tiles",
default=32
)
parser.add_argument(
'--agent_view',
default=False,
help="draw the agent sees (partially observable view)",
action='store_true'
)
parser.add_argument(
'--use_walls',
default=False,
action='store_true',
help="draw the agent sees (partially observable view)",
)
args = parser.parse_args()
env = gym_make(args.env)
window = multigrid_window.Window('gym_minigrid - ' + args.env)
window.reg_key_handler(key_handler)
reset()
# Blocking event loop
window.show(block=True)
|
dcd-main
|
envs/multigrid/manual_control.py
|
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import networkx
import gym_minigrid.minigrid as minigrid
from . import multigrid
from util.unionfind import UnionFind
import envs.registration as register
class MSTMazeEnv(multigrid.MultiGridEnv):
"""Single-agent maze environment specified via a bit map."""
def __init__(self, agent_view_size=5, minigrid_mode=True, max_steps=None,
start_pos=None, goal_pos=None, size=15, seed=None):
self.seed(seed)
self.size = size
self._sample_start_and_goal_pos()
if max_steps is None:
max_steps = 2*size*size
super().__init__(
n_agents=1,
grid_size=size,
agent_view_size=agent_view_size,
max_steps=max_steps,
see_through_walls=True, # Set this to True for maximum speed
minigrid_mode=minigrid_mode
)
def _sample_start_and_goal_pos(self):
size = self.size
top_left = (1,1)
top_right = (size-2,1)
bottom_left = (1,size-2)
bottom_right = (size-2,size-2)
choices = [top_left, top_right, bottom_left, bottom_right]
agent_idx, goal_idx = self.np_random.choice(range(len(choices)), size=(2,), replace=False)
agent_pos = choices[agent_idx]
goal_pos = choices[goal_idx]
self.start_pos = np.array(agent_pos)
self.goal_pos = np.array(goal_pos)
def _gen_maze(self, width, height):
# Use Kruskal's to compute a MST with random edges (walls)
# connecting a grid of cells
assert (width-2) % 2 == 1 and (height-2) % 2 == 1, 'Dimensions must be 2n+1'
self._sample_start_and_goal_pos()
h,w = (height-2)//2 + 1, (width-2)//2 + 1
g = networkx.grid_graph([h,w])
bit_map = np.ones((width-2, height-2))
ds = UnionFind() # track connected components
for v in g.nodes:
y,x = v[0],v[1]
bit_map[y*2][x*2] = 0
ds.add(v)
# Randomly sample edge
edges = list(g.edges)
self.np_random.shuffle(edges)
for u,v in edges:
# convert u,v to full bitmap coordinates
if not ds.connected(u,v):
y1,x1 = u[0]*2,u[1]*2
y2,x2 = v[0]*2,v[1]*2
wall_y = y1 + (y2 - y1)//2
wall_x = x1 + (x2 - x1)//2
bit_map[wall_y][wall_x] = 0
ds.union(u,v)
self.bit_map = bit_map
return bit_map
def _gen_grid(self, width, height):
self._gen_maze(width, height)
# Create an empty grid
self.grid = multigrid.Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Goal
self.put_obj(minigrid.Goal(), self.goal_pos[0], self.goal_pos[1])
# Agent
self.place_agent_at_pos(0, self.start_pos)
# Walls
for x in range(self.bit_map.shape[0]):
for y in range(self.bit_map.shape[1]):
if self.bit_map[y, x]:
# Add an offset of 1 for the outer walls
self.put_obj(minigrid.Wall(), x+1, y+1)
class PerfectMazeSmall(MSTMazeEnv):
"""A 11x11 Maze environment."""
def __init__(self, seed=None):
super().__init__(size=11, seed=seed)
class PerfectMazeMedium(MSTMazeEnv):
"""A 11x11 Maze environment."""
def __init__(self, seed=None):
super().__init__(size=21, seed=seed)
class PerfectMazeLarge(MSTMazeEnv):
"""A 11x11 Maze environment."""
def __init__(self, seed=None):
super().__init__(size=51, seed=seed)
class PerfectMazeXL(MSTMazeEnv):
"""A 11x11 Maze environment."""
def __init__(self, seed=None):
super().__init__(size=101, seed=seed)
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
register.register(
id='MultiGrid-PerfectMazeSmall-v0',
entry_point=module_path + ':PerfectMazeSmall'
)
register.register(
id='MultiGrid-PerfectMazeMedium-v0',
entry_point=module_path + ':PerfectMazeMedium'
)
register.register(
id='MultiGrid-PerfectMazeLarge-v0',
entry_point=module_path + ':PerfectMazeLarge'
)
register.register(
id='MultiGrid-PerfectMazeXL-v0',
entry_point=module_path + ':PerfectMazeXL'
)
|
dcd-main
|
envs/multigrid/mst_maze.py
|
# Copyright (c) 2019 Maxime Chevalier-Boisvert.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
from gym_minigrid.minigrid import *
import itertools as itt
from envs.registration import register as register
class CrossingEnv(MiniGridEnv):
"""
Environment with wall or lava obstacles, sparse reward.
"""
def __init__(self, size=9, num_crossings=1, obstacle_type=Lava, seed=None):
self.num_crossings = num_crossings
self.obstacle_type = obstacle_type
super().__init__(
grid_size=size,
max_steps=4*size*size,
# Set this to True for maximum speed
see_through_walls=False,
seed=None,
agent_view_size=5
)
direction_obs_space = gym.spaces.Box(
low=0, high=3, shape=(1,), dtype='uint8')
self.observation_space = spaces.Dict({
'image': self.observation_space['image'],
'direction': direction_obs_space
})
def _gen_grid(self, width, height):
assert width % 2 == 1 and height % 2 == 1 # odd size
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place the agent in the top-left corner
self.agent_pos = (1, 1)
self.agent_dir = 0
# Place a goal square in the bottom-right corner
self.put_obj(Goal(), width - 2, height - 2)
# Place obstacles (lava or walls)
v, h = object(), object() # singleton `vertical` and `horizontal` objects
# Lava rivers or walls specified by direction and position in grid
rivers = [(v, i) for i in range(2, height - 2, 2)]
rivers += [(h, j) for j in range(2, width - 2, 2)]
self.np_random.shuffle(rivers)
rivers = rivers[:self.num_crossings] # sample random rivers
rivers_v = sorted([pos for direction, pos in rivers if direction is v])
rivers_h = sorted([pos for direction, pos in rivers if direction is h])
obstacle_pos = itt.chain(
itt.product(range(1, width - 1), rivers_h),
itt.product(rivers_v, range(1, height - 1)),
)
for i, j in obstacle_pos:
self.put_obj(self.obstacle_type(), i, j)
# Sample path to goal
path = [h] * len(rivers_v) + [v] * len(rivers_h)
self.np_random.shuffle(path)
# Create openings
limits_v = [0] + rivers_v + [height - 1]
limits_h = [0] + rivers_h + [width - 1]
room_i, room_j = 0, 0
for direction in path:
if direction is h:
i = limits_v[room_i + 1]
j = self.np_random.choice(
range(limits_h[room_j] + 1, limits_h[room_j + 1]))
room_i += 1
elif direction is v:
i = self.np_random.choice(
range(limits_v[room_i] + 1, limits_v[room_i + 1]))
j = limits_h[room_j + 1]
room_j += 1
else:
assert False
self.grid.set(i, j, None)
self.mission = (
"avoid the lava and get to the green goal square"
if self.obstacle_type == Lava
else "find the opening and get to the green goal square"
)
def step(self, action):
obs, rewards, done, info = super().step(action)
del obs['mission']
obs['image'] = obs['image']
obs['direction'] = [self.agent_dir]
return obs, rewards, done, info
def reset(self):
obs = super().reset()
del obs['mission']
obs['image'] = obs['image']
obs['direction'] = [self.agent_dir]
return obs
class LavaCrossingEnv(CrossingEnv):
def __init__(self):
super().__init__(size=9, num_crossings=1)
class LavaCrossingS9N2Env(CrossingEnv):
def __init__(self):
super().__init__(size=9, num_crossings=2)
class LavaCrossingS9N3Env(CrossingEnv):
def __init__(self):
super().__init__(size=9, num_crossings=3)
class LavaCrossingS11N5Env(CrossingEnv):
def __init__(self):
super().__init__(size=11, num_crossings=5)
register(
id='MiniGrid-LavaCrossingS9N1-v0',
entry_point='gym_minigrid.envs:LavaCrossingEnv'
)
register(
id='MiniGrid-LavaCrossingS9N2-v0',
entry_point='gym_minigrid.envs:LavaCrossingS9N2Env'
)
register(
id='MiniGrid-LavaCrossingS9N3-v0',
entry_point='gym_minigrid.envs:LavaCrossingS9N3Env'
)
register(
id='MiniGrid-LavaCrossingS11N5-v0',
entry_point='gym_minigrid.envs:LavaCrossingS11N5Env'
)
class SimpleCrossingEnv(CrossingEnv):
def __init__(self):
super().__init__(size=9, num_crossings=1, obstacle_type=Wall)
class SimpleCrossingS9N2Env(CrossingEnv):
def __init__(self):
super().__init__(size=9, num_crossings=2, obstacle_type=Wall)
class SimpleCrossingS9N3Env(CrossingEnv):
def __init__(self):
super().__init__(size=9, num_crossings=3, obstacle_type=Wall)
class SimpleCrossingS11N5Env(CrossingEnv):
def __init__(self):
super().__init__(size=11, num_crossings=5, obstacle_type=Wall)
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
register(
id='MiniGrid-SimpleCrossingS9N1-v0',
entry_point=module_path+':SimpleCrossingEnv'
)
register(
id='MiniGrid-SimpleCrossingS9N2-v0',
entry_point=module_path+':SimpleCrossingS9N2Env'
)
register(
id='MiniGrid-SimpleCrossingS9N3-v0',
entry_point=module_path+':SimpleCrossingS9N3Env'
)
register(
id='MiniGrid-SimpleCrossingS11N5-v0',
entry_point=module_path+':SimpleCrossingS11N5Env'
)
|
dcd-main
|
envs/multigrid/crossing.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
dcd-main
|
envs/runners/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import deque, defaultdict
import numpy as np
import torch
from baselines.common.running_mean_std import RunningMeanStd
from level_replay import LevelSampler, LevelStore
from util import \
array_to_csv, \
is_discrete_actions, \
get_obs_at_index, \
set_obs_at_index
from teachDeepRL.teachers.teacher_controller import TeacherController
import matplotlib as mpl
import matplotlib.pyplot as plt
class AdversarialRunner(object):
"""
Performs rollouts of an adversarial environment, given
protagonist (agent), antogonist (adversary_agent), and
environment adversary (advesary_env)
"""
def __init__(
self,
args,
venv,
agent,
ued_venv=None,
adversary_agent=None,
adversary_env=None,
flexible_protagonist=False,
train=False,
plr_args=None,
device='cpu'):
"""
venv: Vectorized, adversarial gym env with agent-specific wrappers.
agent: Protagonist trainer.
ued_venv: Vectorized, adversarial gym env with adversary-env-specific wrappers.
adversary_agent: Antogonist trainer.
adversary_env: Environment adversary trainer.
flexible_protagonist: Which agent plays the role of protagonist in
calculating the regret depends on which has the lowest score.
"""
self.args = args
self.venv = venv
if ued_venv is None:
self.ued_venv = venv
else:
self.ued_venv = ued_venv # Since adv env can have different env wrappers
self.is_discrete_actions = is_discrete_actions(self.venv)
self.is_discrete_adversary_env_actions = is_discrete_actions(self.venv, adversary=True)
self.agents = {
'agent': agent,
'adversary_agent': adversary_agent,
'adversary_env': adversary_env,
}
self.agent_rollout_steps = args.num_steps
self.adversary_env_rollout_steps = self.venv.adversary_observation_space['time_step'].high[0]
self.is_dr = args.ued_algo == 'domain_randomization'
self.is_training_env = args.ued_algo in ['paired', 'flexible_paired', 'minimax']
self.is_paired = args.ued_algo in ['paired', 'flexible_paired']
self.requires_batched_vloss = args.use_editor and args.base_levels == 'easy'
self.is_alp_gmm = args.ued_algo == 'alp_gmm'
# Track running mean and std of env returns for return normalization
if args.adv_normalize_returns:
self.env_return_rms = RunningMeanStd(shape=())
self.device = device
if train:
self.train()
else:
self.eval()
self.reset()
# Set up PLR
self.level_store = None
self.level_samplers = {}
self.current_level_seeds = None
self.weighted_num_edits = 0
self.latest_env_stats = defaultdict(float)
if plr_args:
if self.is_paired:
if not args.protagonist_plr and not args.antagonist_plr:
self.level_samplers.update({
'agent': LevelSampler(**plr_args),
'adversary_agent': LevelSampler(**plr_args)
})
elif args.protagonist_plr:
self.level_samplers['agent'] = LevelSampler(**plr_args)
elif args.antagonist_plr:
self.level_samplers['adversary_agent'] = LevelSampler(**plr_args)
else:
self.level_samplers['agent'] = LevelSampler(**plr_args)
if self.use_byte_encoding:
example = self.ued_venv.get_encodings()[0]
data_info = {
'numpy': True,
'dtype': example.dtype,
'shape': example.shape
}
self.level_store = LevelStore(data_info=data_info)
else:
self.level_store = LevelStore()
self.current_level_seeds = [-1 for i in range(args.num_processes)]
self._default_level_sampler = self.all_level_samplers[0]
self.use_editor = args.use_editor
self.edit_prob = args.level_editor_prob
self.base_levels = args.base_levels
else:
self.use_editor = False
self.edit_prob = 0
self.base_levels = None
# Set up ALP-GMM
if self.is_alp_gmm:
self._init_alp_gmm()
@property
def use_byte_encoding(self):
env_name = self.args.env_name
if self.args.use_editor \
or env_name.startswith('BipedalWalker') \
or (env_name.startswith('MultiGrid') and self.args.use_reset_random_dr):
return True
else:
return False
def _init_alp_gmm(self):
args = self.args
param_env_bounds = []
if args.env_name.startswith('MultiGrid'):
param_env_bounds = {'actions':[0,168,26]}
reward_bounds = None
elif args.env_name.startswith('Bipedal'):
if 'POET' in args.env_name:
param_env_bounds = {'actions': [0,2,5]}
else:
param_env_bounds = {'actions': [0,2,8]}
reward_bounds = (-200, 350)
else:
raise ValueError(f'Environment {args.env_name} not supported for ALP-GMM')
self.alp_gmm_teacher = TeacherController(
teacher='ALP-GMM',
nb_test_episodes=0,
param_env_bounds=param_env_bounds,
reward_bounds=reward_bounds,
seed=args.seed,
teacher_params={}) # Use defaults
def reset(self):
self.num_updates = 0
self.total_num_edits = 0
self.total_episodes_collected = 0
self.total_seeds_collected = 0
self.student_grad_updates = 0
self.sampled_level_info = None
max_return_queue_size = 10
self.agent_returns = deque(maxlen=max_return_queue_size)
self.adversary_agent_returns = deque(maxlen=max_return_queue_size)
def train(self):
self.is_training = True
[agent.train() if agent else agent for _,agent in self.agents.items()]
def eval(self):
self.is_training = False
[agent.eval() if agent else agent for _,agent in self.agents.items()]
def state_dict(self):
agent_state_dict = {}
optimizer_state_dict = {}
for k, agent in self.agents.items():
if agent:
agent_state_dict[k] = agent.algo.actor_critic.state_dict()
optimizer_state_dict[k] = agent.algo.optimizer.state_dict()
return {
'agent_state_dict': agent_state_dict,
'optimizer_state_dict': optimizer_state_dict,
'agent_returns': self.agent_returns,
'adversary_agent_returns': self.adversary_agent_returns,
'num_updates': self.num_updates,
'total_episodes_collected': self.total_episodes_collected,
'total_seeds_collected': self.total_seeds_collected,
'total_num_edits': self.total_num_edits,
'student_grad_updates': self.student_grad_updates,
'latest_env_stats': self.latest_env_stats,
'level_store': self.level_store,
'level_samplers': self.level_samplers,
}
def load_state_dict(self, state_dict):
agent_state_dict = state_dict.get('agent_state_dict')
for k,state in agent_state_dict.items():
self.agents[k].algo.actor_critic.load_state_dict(state)
optimizer_state_dict = state_dict.get('optimizer_state_dict')
for k, state in optimizer_state_dict.items():
self.agents[k].algo.optimizer.load_state_dict(state)
self.agent_returns = state_dict.get('agent_returns')
self.adversary_agent_returns = state_dict.get('adversary_agent_returns')
self.num_updates = state_dict.get('num_updates')
self.total_episodes_collected = state_dict.get('total_episodes_collected')
self.total_seeds_collected = state_dict.get('total_seeds_collected')
self.total_num_edits = state_dict.get('total_num_edits')
self.student_grad_updates = state_dict.get('student_grad_updates')
self.latest_env_stats = state_dict.get('latest_env_stats')
self.level_store = state_dict.get('level_store')
self.level_samplers = state_dict.get('level_samplers')
if self.args.use_plr:
self._default_level_sampler = self.all_level_samplers[0]
if self.use_editor:
self.weighted_num_edits = self._get_weighted_num_edits()
def _get_batched_value_loss(self, agent, clipped=True, batched=True):
batched_value_loss = agent.storage.get_batched_value_loss(
signed=False,
positive_only=False,
clipped=clipped,
batched=batched)
return batched_value_loss
def _get_rollout_return_stats(self, rollout_returns):
mean_return = torch.zeros(self.args.num_processes, 1)
max_return = torch.zeros(self.args.num_processes, 1)
for b, returns in enumerate(rollout_returns):
if len(returns) > 0:
mean_return[b] = float(np.mean(returns))
max_return[b] = float(np.max(returns))
stats = {
'mean_return': mean_return,
'max_return': max_return,
'returns': rollout_returns
}
return stats
def _get_env_stats_multigrid(self, agent_info, adversary_agent_info):
num_blocks = np.mean(agent_info.get(
'num_blocks', self.venv.get_num_blocks()))
passable_ratio = np.mean(agent_info.get(
'passable_ratio', self.venv.get_passable()))
shortest_path_lengths = agent_info.get(
'shortest_path_lengths', self.venv.get_shortest_path_length())
shortest_path_length = np.mean(shortest_path_lengths)
solved_idx = agent_info.get('solved_idx', None)
if solved_idx is None:
if 'max_returns' in adversary_agent_info:
solved_idx = \
(torch.max(agent_info['max_return'], \
adversary_agent_info['max_return']) > 0).numpy().squeeze()
else:
solved_idx = (agent_info['max_return'] > 0).numpy().squeeze()
solved_path_lengths = np.array(shortest_path_lengths)[solved_idx]
solved_path_length = np.mean(solved_path_lengths) if len(solved_path_lengths) > 0 else 0
stats = {
'num_blocks': num_blocks,
'passable_ratio': passable_ratio,
'shortest_path_length': shortest_path_length,
'solved_path_length': solved_path_length
}
return stats
def _get_plr_buffer_stats(self):
stats = {}
for k,sampler in self.level_samplers.items():
stats[k + '_plr_passable_mass'] = sampler.solvable_mass
stats[k + '_plr_max_score'] = sampler.max_score
stats[k + '_plr_weighted_num_edits'] = self.weighted_num_edits
return stats
def _get_env_stats_car_racing(self, agent_info, adversary_agent_info):
infos = self.venv.get_complexity_info()
num_envs = len(infos)
sums = defaultdict(float)
for info in infos:
for k,v in info.items():
sums[k] += v
stats = {}
for k,v in sums.items():
stats['track_' + k] = sums[k]/num_envs
return stats
def _get_env_stats_bipedalwalker(self, agent_info, adversary_agent_info):
infos = self.venv.get_complexity_info()
num_envs = len(infos)
sums = defaultdict(float)
for info in infos:
for k,v in info.items():
sums[k] += v
stats = {}
for k,v in sums.items():
stats['track_' + k] = sums[k]/num_envs
return stats
def _get_env_stats(self, agent_info, adversary_agent_info, log_replay_complexity=False):
env_name = self.args.env_name
if env_name.startswith('MultiGrid'):
stats = self._get_env_stats_multigrid(agent_info, adversary_agent_info)
elif env_name.startswith('CarRacing'):
stats = self._get_env_stats_car_racing(agent_info, adversary_agent_info)
elif env_name.startswith('BipedalWalker'):
stats = self._get_env_stats_bipedalwalker(agent_info, adversary_agent_info)
else:
raise ValueError(f'Unsupported environment, {self.args.env_name}')
stats_ = {}
for k,v in stats.items():
stats_['plr_' + k] = v if log_replay_complexity else None
stats_[k] = v if not log_replay_complexity else None
return stats_
def _get_active_levels(self):
assert self.args.use_plr, 'Only call _get_active_levels when using PLR.'
env_name = self.args.env_name
is_multigrid = env_name.startswith('MultiGrid')
is_car_racing = env_name.startswith('CarRacing')
is_bipedal_walker = env_name.startswith('BipedalWalker')
if self.use_byte_encoding:
return [x.tobytes() for x in self.ued_venv.get_encodings()]
elif is_multigrid:
return self.agents['adversary_env'].storage.get_action_traj(as_string=True)
else:
return self.ued_venv.get_level()
def _get_level_sampler(self, name):
other = 'adversary_agent'
if name == 'adversary_agent':
other = 'agent'
level_sampler = self.level_samplers.get(name) or self.level_samplers.get(other)
updateable = name in self.level_samplers
return level_sampler, updateable
@property
def all_level_samplers(self):
if len(self.level_samplers) == 0:
return []
return list(filter(lambda x: x is not None, [v for _, v in self.level_samplers.items()]))
def _should_edit_level(self):
if self.use_editor:
return np.random.rand() < self.edit_prob
else:
return False
def _update_plr_with_current_unseen_levels(self, parent_seeds=None):
args = self.args
levels = self._get_active_levels()
self.current_level_seeds = \
self.level_store.insert(levels, parent_seeds=parent_seeds)
if args.log_plr_buffer_stats or args.reject_unsolvable_seeds:
passable = self.venv.get_passable()
else:
passable = None
self._update_level_samplers_with_external_unseen_sample(
self.current_level_seeds, solvable=passable)
def _update_level_samplers_with_external_unseen_sample(self, seeds, solvable=None):
level_samplers = self.all_level_samplers
if self.args.reject_unsolvable_seeds:
solvable = np.array(solvable, dtype=np.bool)
seeds = np.array(seeds, dtype=np.int)[solvable]
solvable = solvable[solvable]
for level_sampler in level_samplers:
level_sampler.observe_external_unseen_sample(seeds, solvable)
def _reconcile_level_store_and_samplers(self):
all_replay_seeds = set()
for level_sampler in self.all_level_samplers:
all_replay_seeds.update([x for x in level_sampler.seeds if x >= 0])
self.level_store.reconcile_seeds(all_replay_seeds)
def _get_weighted_num_edits(self):
level_sampler = self.all_level_samplers[0]
seed_num_edits = np.zeros(level_sampler.seed_buffer_size)
for idx, value in enumerate(self.level_store.seed2parent.values()):
seed_num_edits[idx] = len(value)
weighted_num_edits = np.dot(level_sampler.sample_weights(), seed_num_edits)
return weighted_num_edits
def _sample_replay_decision(self):
return self._default_level_sampler.sample_replay_decision()
def agent_rollout(self,
agent,
num_steps,
update=False,
is_env=False,
level_replay=False,
level_sampler=None,
update_level_sampler=False,
discard_grad=False,
edit_level=False,
num_edits=0,
fixed_seeds=None):
args = self.args
if is_env:
if edit_level: # Get mutated levels
levels = [self.level_store.get_level(seed) for seed in fixed_seeds]
self.ued_venv.reset_to_level_batch(levels)
self.ued_venv.mutate_level(num_edits=num_edits)
self._update_plr_with_current_unseen_levels(parent_seeds=fixed_seeds)
return
if level_replay: # Get replay levels
self.current_level_seeds = [level_sampler.sample_replay_level() for _ in range(args.num_processes)]
levels = [self.level_store.get_level(seed) for seed in self.current_level_seeds]
self.ued_venv.reset_to_level_batch(levels)
return self.current_level_seeds
elif self.is_dr and not args.use_plr:
obs = self.ued_venv.reset_random() # don't need obs here
self.total_seeds_collected += args.num_processes
return
elif self.is_dr and args.use_plr and args.use_reset_random_dr:
obs = self.ued_venv.reset_random() # don't need obs here
self._update_plr_with_current_unseen_levels(parent_seeds=fixed_seeds)
self.total_seeds_collected += args.num_processes
return
elif self.is_alp_gmm:
obs = self.alp_gmm_teacher.set_env_params(self.ued_venv)
self.total_seeds_collected += args.num_processes
return
else:
obs = self.ued_venv.reset() # Prepare for constructive rollout
self.total_seeds_collected += args.num_processes
else:
obs = self.venv.reset_agent()
# Initialize first observation
agent.storage.copy_obs_to_index(obs,0)
rollout_info = {}
rollout_returns = [[] for _ in range(args.num_processes)]
if level_sampler and level_replay:
rollout_info.update({
'solved_idx': np.zeros(args.num_processes, dtype=np.bool)
})
for step in range(num_steps):
if args.render:
self.venv.render_to_screen()
# Sample actions
with torch.no_grad():
obs_id = agent.storage.get_obs(step)
value, action, action_log_dist, recurrent_hidden_states = agent.act(
obs_id, agent.storage.get_recurrent_hidden_state(step), agent.storage.masks[step])
if self.is_discrete_actions:
action_log_prob = action_log_dist.gather(-1, action)
else:
action_log_prob = action_log_dist
# Observe reward and next obs
reset_random = self.is_dr and not args.use_plr
_action = agent.process_action(action.cpu())
if is_env:
obs, reward, done, infos = self.ued_venv.step_adversary(_action)
else:
obs, reward, done, infos = self.venv.step_env(_action, reset_random=reset_random)
if args.clip_reward:
reward = torch.clamp(reward, -args.clip_reward, args.clip_reward)
if not is_env and step >= num_steps - 1:
# Handle early termination due to cliffhanger rollout
if agent.storage.use_proper_time_limits:
for i, done_ in enumerate(done):
if not done_:
infos[i]['cliffhanger'] = True
infos[i]['truncated'] = True
infos[i]['truncated_obs'] = get_obs_at_index(obs, i)
done = np.ones_like(done, dtype=np.float)
if level_sampler and level_replay:
next_level_seeds = [s for s in self.current_level_seeds]
for i, info in enumerate(infos):
if 'episode' in info.keys():
rollout_returns[i].append(info['episode']['r'])
if reset_random:
self.total_seeds_collected += 1
if not is_env:
self.total_episodes_collected += 1
# Handle early termination
if agent.storage.use_proper_time_limits:
if 'truncated_obs' in info.keys():
truncated_obs = info['truncated_obs']
agent.storage.insert_truncated_obs(truncated_obs, index=i)
# If using PLR, sample next level
if level_sampler and level_replay:
level_seed = level_sampler.sample_replay_level()
level = self.level_store.get_level(level_seed)
obs_i = self.venv.reset_to_level(level, i)
set_obs_at_index(obs, obs_i, i)
next_level_seeds[i] = level_seed
rollout_info['solved_idx'][i] = True
# If using ALP-GMM, sample next level
if self.is_alp_gmm:
self.alp_gmm_teacher.record_train_episode(rollout_returns[i][-1], index=i)
self.alp_gmm_teacher.set_env_params(self.venv)
# If done then clean the history of observations.
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'truncated' in info.keys() else [1.0]
for info in infos])
cliffhanger_masks = torch.FloatTensor(
[[0.0] if 'cliffhanger' in info.keys() else [1.0]
for info in infos])
# Need to store level seeds alongside non-env agent steps
current_level_seeds = None
if (not is_env) and level_sampler:
current_level_seeds = torch.tensor(self.current_level_seeds, dtype=torch.int).view(-1, 1)
agent.insert(
obs, recurrent_hidden_states,
action, action_log_prob, action_log_dist,
value, reward, masks, bad_masks,
level_seeds=current_level_seeds,
cliffhanger_masks=cliffhanger_masks)
if level_sampler and level_replay:
self.current_level_seeds = next_level_seeds
# Add generated env to level store (as a constructive string representation)
if is_env and args.use_plr and not level_replay:
self._update_plr_with_current_unseen_levels()
rollout_info.update(self._get_rollout_return_stats(rollout_returns))
# Update non-env agent if required
if not is_env and update:
with torch.no_grad():
obs_id = agent.storage.get_obs(-1)
next_value = agent.get_value(
obs_id, agent.storage.get_recurrent_hidden_state(-1),
agent.storage.masks[-1]).detach()
agent.storage.compute_returns(
next_value, args.use_gae, args.gamma, args.gae_lambda)
# Compute batched value loss if using value_l1-maximizing adversary
if self.requires_batched_vloss:
# Don't clip value loss reward if env adversary normalizes returns
clipped = not args.adv_use_popart and not args.adv_normalize_returns
batched_value_loss = self._get_batched_value_loss(
agent, clipped=clipped, batched=True)
rollout_info.update({'batched_value_loss': batched_value_loss})
# Update level sampler and remove any ejected seeds from level store
if level_sampler and update_level_sampler:
level_sampler.update_with_rollouts(agent.storage)
value_loss, action_loss, dist_entropy, info = agent.update(discard_grad=discard_grad)
if level_sampler and update_level_sampler:
level_sampler.after_update()
rollout_info.update({
'value_loss': value_loss,
'action_loss': action_loss,
'dist_entropy': dist_entropy,
'update_info': info,
})
# Compute LZ complexity of action trajectories
if args.log_action_complexity:
rollout_info.update({'action_complexity': agent.storage.get_action_complexity()})
return rollout_info
def _compute_env_return(self, agent_info, adversary_agent_info):
args = self.args
if args.ued_algo == 'paired':
env_return = torch.max(adversary_agent_info['max_return'] - agent_info['mean_return'], \
torch.zeros_like(agent_info['mean_return']))
elif args.ued_algo == 'flexible_paired':
env_return = torch.zeros_like(agent_info['max_return'], dtype=torch.float, device=self.device)
adversary_agent_max_idx = adversary_agent_info['max_return'] > agent_info['max_return']
agent_max_idx = ~adversary_agent_max_idx
env_return[adversary_agent_max_idx] = \
adversary_agent_info['max_return'][adversary_agent_max_idx]
env_return[agent_max_idx] = agent_info['max_return'][agent_max_idx]
env_mean_return = torch.zeros_like(env_return, dtype=torch.float)
env_mean_return[adversary_agent_max_idx] = \
agent_info['mean_return'][adversary_agent_max_idx]
env_mean_return[agent_max_idx] = \
adversary_agent_info['mean_return'][agent_max_idx]
env_return = torch.max(env_return - env_mean_return, torch.zeros_like(env_return))
elif args.ued_algo == 'minimax':
env_return = -agent_info['max_return']
else:
env_return = torch.zeros_like(agent_info['mean_return'])
if args.adv_normalize_returns:
self.env_return_rms.update(env_return.flatten().cpu().numpy())
env_return /= np.sqrt(self.env_return_rms.var + 1e-8)
if args.adv_clip_reward is not None:
clip_max_abs = args.adv_clip_reward
env_return = env_return.clamp(-clip_max_abs, clip_max_abs)
return env_return
def run(self):
args = self.args
adversary_env = self.agents['adversary_env']
agent = self.agents['agent']
adversary_agent = self.agents['adversary_agent']
level_replay = False
if args.use_plr and self.is_training:
level_replay = self._sample_replay_decision()
# Discard student gradients if not level replay (sampling new levels)
student_discard_grad = False
no_exploratory_grad_updates = \
vars(args).get('no_exploratory_grad_updates', False)
if args.use_plr and (not level_replay) and no_exploratory_grad_updates:
student_discard_grad = True
if self.is_training and not student_discard_grad:
self.student_grad_updates += 1
# Generate a batch of adversarial environments
env_info = self.agent_rollout(
agent=adversary_env,
num_steps=self.adversary_env_rollout_steps,
update=False,
is_env=True,
level_replay=level_replay,
level_sampler=self._get_level_sampler('agent')[0],
update_level_sampler=False)
# Run agent episodes
level_sampler, is_updateable = self._get_level_sampler('agent')
agent_info = self.agent_rollout(
agent=agent,
num_steps=self.agent_rollout_steps,
update=self.is_training,
level_replay=level_replay,
level_sampler=level_sampler,
update_level_sampler=is_updateable,
discard_grad=student_discard_grad)
# Use a separate PLR curriculum for the antagonist
if level_replay and self.is_paired and (args.protagonist_plr == args.antagonist_plr):
self.agent_rollout(
agent=adversary_env,
num_steps=self.adversary_env_rollout_steps,
update=False,
is_env=True,
level_replay=level_replay,
level_sampler=self._get_level_sampler('adversary_agent')[0],
update_level_sampler=False)
adversary_agent_info = defaultdict(float)
if self.is_paired:
# Run adversary agent episodes
level_sampler, is_updateable = self._get_level_sampler('adversary_agent')
adversary_agent_info = self.agent_rollout(
agent=adversary_agent,
num_steps=self.agent_rollout_steps,
update=self.is_training,
level_replay=level_replay,
level_sampler=level_sampler,
update_level_sampler=is_updateable,
discard_grad=student_discard_grad)
# Sample whether the decision to edit levels
edit_level = self._should_edit_level() and level_replay
if level_replay:
sampled_level_info = {
'level_replay': True,
'num_edits': [len(self.level_store.seed2parent[x])+1 for x in env_info],
}
else:
sampled_level_info = {
'level_replay': False,
'num_edits': [0 for _ in range(args.num_processes)]
}
# ==== This part performs ACCEL ====
# If editing, mutate levels just replayed by PLR
if level_replay and edit_level:
# Choose base levels for mutation
if self.base_levels == 'batch':
fixed_seeds = env_info
elif self.base_levels == 'easy':
if args.num_processes >= 4:
# take top 4
easy = list(np.argsort((agent_info['mean_return'].detach().cpu().numpy() - agent_info['batched_value_loss'].detach().cpu().numpy()))[:4])
fixed_seeds = [env_info[x.item()] for x in easy] * int(args.num_processes/4)
else:
# take top 1
easy = np.argmax((agent_info['mean_return'].detach().cpu().numpy() - agent_info['batched_value_loss'].detach().cpu().numpy()))
fixed_seeds = [env_info[easy]] * args.num_processes
level_sampler, is_updateable = self._get_level_sampler('agent')
# Edit selected levels
self.agent_rollout(
agent=None,
num_steps=None,
is_env=True,
edit_level=True,
num_edits=args.num_edits,
fixed_seeds=fixed_seeds)
self.total_num_edits += 1
sampled_level_info['num_edits'] = [x+1 for x in sampled_level_info['num_edits']]
# Evaluate edited levels
agent_info_edited_level = self.agent_rollout(
agent=agent,
num_steps=self.agent_rollout_steps,
update=self.is_training,
level_replay=False,
level_sampler=level_sampler,
update_level_sampler=is_updateable,
discard_grad=True)
# ==== ACCEL end ====
if args.use_plr:
self._reconcile_level_store_and_samplers()
if self.use_editor:
self.weighted_num_edits = self._get_weighted_num_edits()
# Update adversary agent final return
env_return = self._compute_env_return(agent_info, adversary_agent_info)
adversary_env_info = defaultdict(float)
if self.is_training and self.is_training_env:
with torch.no_grad():
obs_id = adversary_env.storage.get_obs(-1)
next_value = adversary_env.get_value(
obs_id, adversary_env.storage.get_recurrent_hidden_state(-1),
adversary_env.storage.masks[-1]).detach()
adversary_env.storage.replace_final_return(env_return)
adversary_env.storage.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda)
env_value_loss, env_action_loss, env_dist_entropy, info = adversary_env.update()
adversary_env_info.update({
'action_loss': env_action_loss,
'value_loss': env_value_loss,
'dist_entropy': env_dist_entropy,
'update_info': info
})
if self.is_training:
self.num_updates += 1
# === LOGGING ===
# Only update env-related stats when run generates new envs (not level replay)
log_replay_complexity = level_replay and args.log_replay_complexity
if (not level_replay) or log_replay_complexity:
stats = self._get_env_stats(agent_info, adversary_agent_info,
log_replay_complexity=log_replay_complexity)
stats.update({
'mean_env_return': env_return.mean().item(),
'adversary_env_pg_loss': adversary_env_info['action_loss'],
'adversary_env_value_loss': adversary_env_info['value_loss'],
'adversary_env_dist_entropy': adversary_env_info['dist_entropy'],
})
if args.use_plr:
self.latest_env_stats.update(stats) # Log latest UED curriculum stats instead of PLR env stats
else:
stats = self.latest_env_stats.copy()
# Log PLR buffer stats
if args.use_plr and args.log_plr_buffer_stats:
stats.update(self._get_plr_buffer_stats())
[self.agent_returns.append(r) for b in agent_info['returns'] for r in reversed(b)]
mean_agent_return = 0
if len(self.agent_returns) > 0:
mean_agent_return = np.mean(self.agent_returns)
mean_adversary_agent_return = 0
if self.is_paired:
[self.adversary_agent_returns.append(r) for b in adversary_agent_info['returns'] for r in reversed(b)]
if len(self.adversary_agent_returns) > 0:
mean_adversary_agent_return = np.mean(self.adversary_agent_returns)
self.sampled_level_info = sampled_level_info
stats.update({
'steps': (self.num_updates + self.total_num_edits) * args.num_processes * args.num_steps,
'total_episodes': self.total_episodes_collected,
'total_seeds': self.total_seeds_collected,
'total_student_grad_updates': self.student_grad_updates,
'mean_agent_return': mean_agent_return,
'agent_value_loss': agent_info['value_loss'],
'agent_pg_loss': agent_info['action_loss'],
'agent_dist_entropy': agent_info['dist_entropy'],
'mean_adversary_agent_return': mean_adversary_agent_return,
'adversary_value_loss': adversary_agent_info['value_loss'],
'adversary_pg_loss': adversary_agent_info['action_loss'],
'adversary_dist_entropy': adversary_agent_info['dist_entropy'],
})
if args.log_grad_norm:
agent_grad_norm = np.mean(agent_info['update_info']['grad_norms'])
adversary_grad_norm = 0
adversary_env_grad_norm = 0
if self.is_paired:
adversary_grad_norm = np.mean(adversary_agent_info['update_info']['grad_norms'])
if self.is_training_env:
adversary_env_grad_norm = np.mean(adversary_env_info['update_info']['grad_norms'])
stats.update({
'agent_grad_norm': agent_grad_norm,
'adversary_grad_norm': adversary_grad_norm,
'adversary_env_grad_norm': adversary_env_grad_norm
})
if args.log_action_complexity:
stats.update({
'agent_action_complexity': agent_info['action_complexity'],
'adversary_action_complexity': adversary_agent_info['action_complexity']
})
return stats
|
dcd-main
|
envs/runners/adversarial_runner.py
|
# Copyright (c) OpenAI
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
"""
2D rendering framework
"""
import os
import sys
import pyvirtualdisplay
if "Apple" in sys.version:
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib'
# (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite
from gym import error
try:
import pyglet
except ImportError as e:
raise ImportError('''
Cannot import pyglet.
HINT: you can install pyglet directly via 'pip install pyglet'.
But if you really just want to install all Gym dependencies and not have to think about it,
'pip install -e .[all]' or 'pip install gym[all]' will do it.
''')
try:
from pyglet.gl import *
except ImportError as e:
raise ImportError('''
Error occurred while running `from pyglet.gl import *`
HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'.
If you're running on a server, you may need a virtual frame buffer; something like this should work:
'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'
''')
import math
import numpy as np
RAD2DEG = 57.29577951308232
def get_display(spec):
"""Convert a display specification (such as :0) into an actual Display
object.
Pyglet only supports multiple Displays on Linux.
"""
if spec is None:
return pyglet.canvas.get_display()
# returns already available pyglet_display,
# if there is no pyglet display available then it creates one
elif isinstance(spec, str):
return pyglet.canvas.Display(spec)
else:
raise error.Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))
def get_window(width, height, display, **kwargs):
"""
Will create a pyglet window from the display specification provided.
"""
screen = display.get_screens() #available screens
config = screen[0].get_best_config()
context = config.create_context(None) #create GL context
return pyglet.window.Window(width=width, height=height, display=display, config=config, context=context, **kwargs)
class Viewer(object):
def __init__(self, width, height, display=None):
display = get_display(display)
self.width = width
self.height = height
self.window = get_window(width=width, height=height, display=display)
self.window.on_close = self.window_closed_by_user
self.isopen = True
self.geoms = []
self.onetime_geoms = []
self.transform = Transform()
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def close(self):
if self.isopen and sys.meta_path:
# ^^^ check sys.meta_path to avoid 'ImportError: sys.meta_path is None, Python is likely shutting down'
self.window.close()
self.isopen = False
def window_closed_by_user(self):
self.isopen = False
def set_bounds(self, left, right, bottom, top):
assert right > left and top > bottom
scalex = self.width/(right-left)
scaley = self.height/(top-bottom)
self.transform = Transform(
translation=(-left*scalex, -bottom*scaley),
scale=(scalex, scaley))
def add_geom(self, geom):
self.geoms.append(geom)
def add_onetime(self, geom):
self.onetime_geoms.append(geom)
def render(self, return_rgb_array=False):
glClearColor(1,1,1,1)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
self.transform.enable()
for geom in self.geoms:
geom.render()
for geom in self.onetime_geoms:
geom.render()
self.transform.disable()
arr = None
if return_rgb_array:
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
image_data = buffer.get_image_data()
arr = np.frombuffer(image_data.get_data(), dtype=np.uint8)
# In https://github.com/openai/gym-http-api/issues/2, we
# discovered that someone using Xmonad on Arch was having
# a window of size 598 x 398, though a 600 x 400 window
# was requested. (Guess Xmonad was preserving a pixel for
# the boundary.) So we use the buffer height/width rather
# than the requested one.
arr = arr.reshape(buffer.height, buffer.width, 4)
arr = arr[::-1,:,0:3]
self.window.flip()
self.onetime_geoms = []
return arr if return_rgb_array else self.isopen
# Convenience
def draw_circle(self, radius=10, res=30, filled=True, **attrs):
geom = make_circle(radius=radius, res=res, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polygon(self, v, filled=True, **attrs):
geom = make_polygon(v=v, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polyline(self, v, **attrs):
geom = make_polyline(v=v)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_line(self, start, end, **attrs):
geom = Line(start, end)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def get_array(self):
self.window.flip()
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
self.window.flip()
arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep='')
arr = arr.reshape(self.height, self.width, 4)
return arr[::-1,:,0:3]
def __del__(self):
self.close()
def _add_attrs(geom, attrs):
if "color" in attrs:
geom.set_color(*attrs["color"])
if "linewidth" in attrs:
geom.set_linewidth(attrs["linewidth"])
class Geom(object):
def __init__(self):
self._color=Color((0, 0, 0, 1.0))
self.attrs = [self._color]
def render(self):
for attr in reversed(self.attrs):
attr.enable()
self.render1()
for attr in self.attrs:
attr.disable()
def render1(self):
raise NotImplementedError
def add_attr(self, attr):
self.attrs.append(attr)
def set_color(self, r, g, b):
self._color.vec4 = (r, g, b, 1)
class Attr(object):
def enable(self):
raise NotImplementedError
def disable(self):
pass
class Transform(Attr):
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1,1)):
self.set_translation(*translation)
self.set_rotation(rotation)
self.set_scale(*scale)
def enable(self):
glPushMatrix()
glTranslatef(self.translation[0], self.translation[1], 0) # translate to GL loc ppint
glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)
glScalef(self.scale[0], self.scale[1], 1)
def disable(self):
glPopMatrix()
def set_translation(self, newx, newy):
self.translation = (float(newx), float(newy))
def set_rotation(self, new):
self.rotation = float(new)
def set_scale(self, newx, newy):
self.scale = (float(newx), float(newy))
class Color(Attr):
def __init__(self, vec4):
self.vec4 = vec4
def enable(self):
glColor4f(*self.vec4)
class LineStyle(Attr):
def __init__(self, style):
self.style = style
def enable(self):
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, self.style)
def disable(self):
glDisable(GL_LINE_STIPPLE)
class LineWidth(Attr):
def __init__(self, stroke):
self.stroke = stroke
def enable(self):
glLineWidth(self.stroke)
class Point(Geom):
def __init__(self):
Geom.__init__(self)
def render1(self):
glBegin(GL_POINTS) # draw point
glVertex3f(0.0, 0.0, 0.0)
glEnd()
class FilledPolygon(Geom):
def __init__(self, v):
Geom.__init__(self)
self.v = v
def render1(self):
if len(self.v) == 4 : glBegin(GL_QUADS)
elif len(self.v) > 4 : glBegin(GL_POLYGON)
else: glBegin(GL_TRIANGLES)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
def make_circle(radius=10, res=30, filled=True):
points = []
for i in range(res):
ang = 2*math.pi*i / res
points.append((math.cos(ang)*radius, math.sin(ang)*radius))
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True)
def make_polygon(v, filled=True):
if filled: return FilledPolygon(v)
else: return PolyLine(v, True)
def make_polyline(v):
return PolyLine(v, False)
def make_capsule(length, width):
l, r, t, b = 0, length, width/2, -width/2
box = make_polygon([(l,b), (l,t), (r,t), (r,b)])
circ0 = make_circle(width/2)
circ1 = make_circle(width/2)
circ1.add_attr(Transform(translation=(length, 0)))
geom = Compound([box, circ0, circ1])
return geom
class Compound(Geom):
def __init__(self, gs):
Geom.__init__(self)
self.gs = gs
for g in self.gs:
g.attrs = [a for a in g.attrs if not isinstance(a, Color)]
def render1(self):
for g in self.gs:
g.render()
class PolyLine(Geom):
def __init__(self, v, close):
Geom.__init__(self)
self.v = v
self.close = close
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
def set_linewidth(self, x):
self.linewidth.stroke = x
class Line(Geom):
def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):
Geom.__init__(self)
self.start = start
self.end = end
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINES)
glVertex2f(*self.start)
glVertex2f(*self.end)
glEnd()
class Image(Geom):
def __init__(self, fname, width, height):
Geom.__init__(self)
self.set_color(1.0, 1.0, 1.0)
self.width = width
self.height = height
img = pyglet.image.load(fname)
self.img = img
self.flip = False
def render1(self):
self.img.blit(-self.width/2, -self.height/2, width=self.width, height=self.height)
# ================================================================
class SimpleImageViewer(object):
def __init__(self, display=None, maxwidth=500):
self.window = None
self.isopen = False
self.display = get_display(display)
self.maxwidth = maxwidth
def imshow(self, arr):
if self.window is None:
height, width, _channels = arr.shape
if width > self.maxwidth:
scale = self.maxwidth / width
width = int(scale * width)
height = int(scale * height)
self.window = get_window(width=width, height=height, display=self.display, vsync=False, resizable=True)
self.width = width
self.height = height
self.isopen = True
@self.window.event
def on_resize(width, height):
self.width = width
self.height = height
@self.window.event
def on_close():
self.isopen = False
assert len(arr.shape) == 3, "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(arr.shape[1], arr.shape[0],
'RGB', arr.tobytes(), pitch=arr.shape[1]*-3)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
texture = image.get_texture()
texture.width = self.width
texture.height = self.height
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
texture.blit(0, 0) # draw
self.window.flip()
def close(self):
if self.isopen and sys.meta_path:
# ^^^ check sys.meta_path to avoid 'ImportError: sys.meta_path is None, Python is likely shutting down'
self.window.close()
self.isopen = False
def __del__(self):
self.close()
|
dcd-main
|
envs/box2d/rendering.py
|
# Copyright (c) Stack Exchange, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the CC BY-SA 3.0 license.
import numpy as np
from scipy.special import binom
import matplotlib.pyplot as plt
bernstein = lambda n, k, t: binom(n,k)* t**k * (1.-t)**(n-k)
def bezier(points, num=200):
N = len(points)
t = np.linspace(0, 1, num=num)
curve = np.zeros((num, 2))
for i in range(N):
curve += np.outer(bernstein(N - 1, i, t), points[i])
return curve
class Segment():
def __init__(self, p1, p2, angle1, angle2, **kw):
self.p1 = p1; self.p2 = p2
self.angle1 = angle1; self.angle2 = angle2
self.numpoints = kw.get("numpoints", 100)
r = kw.get("r", 0.3)
d = np.sqrt(np.sum((self.p2-self.p1)**2))
self.r = r*d
self.p = np.zeros((4,2))
self.p[0,:] = self.p1[:]
self.p[3,:] = self.p2[:]
self.calc_intermediate_points(self.r)
def calc_intermediate_points(self,r):
self.p[1,:] = self.p1 + np.array([self.r*np.cos(self.angle1),
self.r*np.sin(self.angle1)])
self.p[2,:] = self.p2 + np.array([self.r*np.cos(self.angle2+np.pi),
self.r*np.sin(self.angle2+np.pi)])
self.curve = bezier(self.p,self.numpoints)
def get_curve(points, **kw):
segments = []
for i in range(len(points)-1):
seg = Segment(points[i,:2], points[i+1,:2], points[i,2],points[i+1,2],**kw)
segments.append(seg)
curve = np.concatenate([s.curve for s in segments])
return segments, curve
def ccw_sort(p):
d = p-np.mean(p,axis=0)
s = np.arctan2(d[:,0], d[:,1])
return p[np.argsort(s),:]
def get_bezier_curve(a=None, rad=0.2, edgy=0, **kw):
""" Given an array of points *a*, create a curve through
those points.
*rad* is a number between 0 and 1 to steer the distance of
control points.
*edgy* is a parameter which controls how "edgy" the curve is,
edgy=0 is smoothest."""
if a is None:
a = get_random_points(**kw)
numpoints = kw.get('numpoints', 30)
p = np.arctan(edgy)/np.pi+.5
a = ccw_sort(a)
a = np.append(a, np.atleast_2d(a[0,:]), axis=0)
d = np.diff(a, axis=0)
ang = np.arctan2(d[:,1],d[:,0])
f = lambda ang : (ang>=0)*ang + (ang<0)*(ang+2*np.pi)
ang = f(ang)
ang1 = ang
ang2 = np.roll(ang,1)
ang = p*ang1 + (1-p)*ang2 + (np.abs(ang2-ang1) > np.pi )*np.pi
ang = np.append(ang, [ang[0]])
a = np.append(a, np.atleast_2d(ang).T, axis=1)
s, c = get_curve(a, r=rad, method="var", numpoints=numpoints)
x,y = c.T
return x,y,a
def get_random_points(n=5, scale=0.8, mindst=None, rec=0, **kw):
"""Create n random points in the unit square, which are *mindst*
apart, then scale them."""
mindst = mindst or 0.7/n
np_random = kw.get('np_random', np.random)
a = np_random.rand(n,2)
d = np.sqrt(np.sum(np.diff(ccw_sort(a), axis=0), axis=1)**2)
if np.all(d >= mindst) or rec>=200:
return a*scale
else:
return get_random_points(n=n,
scale=scale, mindst=mindst, rec=rec+1, np_random=np_random)
if __name__ == '__main__':
fig, ax = plt.subplots()
ax.set_aspect("equal")
rad = 0.2
edgy = 0.5
for c in np.array([[0,0], [0,1], [1,0], [1,1]]):
a = get_random_points(n=12, scale=1) + c
x,y, _ = get_bezier_curve(a,rad=rad, edgy=edgy)
plt.plot(x,y)
plt.show()
|
dcd-main
|
envs/box2d/bezier.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from envs.registration import register as gym_register
from .racetracks import RaceTrack
from .racetracks import formula1
from .car_racing_bezier import CarRacingBezier
def set_global(name, value):
globals()[name] = value
racetracks = dict([(name, cls) for name, cls in formula1.__dict__.items() if isinstance(cls, RaceTrack)])
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
def _create_constructor(track):
def constructor(self, **kwargs):
return CarRacingBezier.__init__(self,
track_name=track.name,
**kwargs)
return constructor
for name, track in racetracks.items():
class_name = f"CarRacingF1-{track.name}"
env = type(class_name, (CarRacingBezier, ), {
"__init__": _create_constructor(track),
})
set_global(class_name, env)
gym_register(
id=f'CarRacingF1-{track.name}-v0',
entry_point=module_path + f':{class_name}',
max_episode_steps=track.max_episode_steps,
reward_threshold=900)
|
dcd-main
|
envs/box2d/car_racing_f1.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .car_racing import CarRacing
from .car_racing_bezier import CarRacingBezier
from .car_racing_adversarial import CarRacingBezierAdversarial
from .car_racing_f1 import *
|
dcd-main
|
envs/box2d/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import math
import random
import numpy as np
import gym
from gym.envs.box2d.car_dynamics import Car
from envs.registration import register as gym_register
from .car_racing_bezier import CarRacingBezier
from envs.box2d import *
class CarRacingBezierAdversarial(CarRacingBezier):
def __init__(self,
n_control_points=12,
random_z_dim=4,
track_name=None,
bezier=True,
show_borders=True,
show_indicators=True,
birdseye=False,
seed=None,
fixed_environment=False,
animate_zoom=False,
min_rad_ratio=None,
max_rad_ratio=None,
use_sketch=None,
choose_start_pos=False,
use_categorical=False,
clip_reward=None,
sparse_rewards=False,
num_goal_bins=24,
verbose=1):
super().__init__(
track_name=track_name,
bezier=bezier,
show_borders=show_borders,
show_indicators=show_indicators,
birdseye=birdseye,
seed=seed,
fixed_environment=fixed_environment,
animate_zoom=False,
clip_reward=clip_reward,
sparse_rewards=sparse_rewards,
num_goal_bins=num_goal_bins,
verbose=verbose)
self.passable = True
self.random_z_dim = random_z_dim
self.choose_start_pos = choose_start_pos
self._adv_start_alpha = None
self.n_control_points = n_control_points
self._adversary_control_points = []
# sketch_dim = int(np.round(self.playfield/self.track_width))
sketch_dim = 10
self.sketch_dim = sketch_dim # Should be 50
self.sketch_ratio = self.playfield/self.sketch_dim
self._adversary_sketch = np.zeros((self.sketch_dim, self.sketch_dim))
self.adversary_max_steps = n_control_points
if choose_start_pos:
self.adversary_max_steps += 1 # Extra step to choose start pos
if sparse_rewards:
self.adversary_max_steps += 1 # Extra step to choose goal bin (last action)
self.adversary_step_count = 0
# === Adversary env observations ===
self._clear_adversary_sketch()
self._adversary_sketch_dirty = False
# === Adversary observation and action space ===
self.adversary_ts_obs_space = gym.spaces.Box(
low=0, high=self.adversary_max_steps, shape=(1,), dtype='uint8')
self.adversary_randomz_obs_space = gym.spaces.Box(
low=0, high=1.0, shape=(random_z_dim,), dtype=np.float32)
self.adversary_control_points_obs_space = gym.spaces.Box(
low=0,
high=sketch_dim,
shape=(1, sketch_dim, sketch_dim),
dtype='uint8')
if sparse_rewards:
self.adversary_goal_bin_obs_space = gym.spaces.Box(
low=0,
high=num_goal_bins + 1, # +1 for placeholder (at last, extra index)
shape=(1,),
dtype='uint8'
)
self.adversary_observation_space = gym.spaces.Dict(
{'control_points': self.adversary_control_points_obs_space,
'time_step': self.adversary_ts_obs_space,
'random_z': self.adversary_randomz_obs_space,
'goal_bin': self.adversary_goal_bin_obs_space})
else:
self.adversary_observation_space = gym.spaces.Dict(
{'control_points': self.adversary_control_points_obs_space,
'time_step': self.adversary_ts_obs_space,
'random_z': self.adversary_randomz_obs_space})
# Note adversary_action_space is only used to communicate to storage
# the proper dimensions for storing the *unprocessed* actions
if use_categorical:
action_low, action_high = np.array((0,)), np.array((self.sketch_dim**2 + 1,)) # +1 for skip action
if sparse_rewards:
action_low = np.array((0, *action_low))
action_high = np.array((1, *action_high))
self.adversary_action_space = gym.spaces.Box(
low=action_low, high=action_high, dtype='uint8')
else:
action_shape = (3,)
if sparse_rewards:
action_shape = (4,) # First dim stores goal flag
self.adversary_action_space = gym.spaces.Box(
low=0, high=1, shape=action_shape, dtype='float32')
@property
def processed_action_dim(self):
return 3
def reset(self):
self.steps = 0
self.adversary_step_count = 0
if self._adversary_sketch_dirty:
self._clear_adversary_sketch()
# Clear track and agent status
self.reset_agent_status()
obs = {
'control_points': np.expand_dims(self._adversary_sketch,0),
'time_step': [self.adversary_step_count],
'random_z': self.generate_random_z()
}
# Set goal bin to 1 more than max 0-indexed goal bin
if self.sparse_rewards:
obs.update({'goal_bin': [self.num_goal_bins]})
self.goal_bin = None
return obs
def _alpha_from_xy(self, x,y):
alpha = np.arctan2(y,x)
if alpha < 0:
alpha += 2*math.pi
return alpha
def _set_start_position(self, x, y):
_,_,unnorm_x,unnorm_y = self.unnormalize_xy(x,y)
u = np.mean(np.array(self._adversary_control_points), axis=0)
alpha = self._alpha_from_xy(unnorm_x-u[0],unnorm_y-u[1])
self._adv_start_alpha = alpha
return alpha
def _closest_track_index(self, alpha):
if len(self._adversary_control_points) == 0:
return 0
u = np.mean(np.array(self._adversary_control_points), axis=0)
track_alphas = np.array([self._alpha_from_xy(x-u[0],y-u[1]) for _,_,x,y in self.track])
i = np.argmin(np.abs(track_alphas - alpha))
return np.argmin(np.abs(track_alphas - alpha))
def reset_agent_status(self):
# Reset env-specific meta-data
self._destroy()
self.reward = 0.0
self.prev_reward = 0.0
self.tile_visited_count = 0
self.t = 0.0
self.road_poly = []
self.steps = 0
self._create_track(control_points=self.track_data)
if self._adv_start_alpha is None:
start_idx = 0
else:
start_idx = self._closest_track_index(self._adv_start_alpha)
beta0, x0, y0 = 0,0,0
if self._adversary_sketch_dirty: # Car only if track (because reset was called)
beta0, x0, y0 = self.track[start_idx][1:4]
x0 -= self.x_offset
y0 -= self.y_offset
if self.car:
self.car.destroy()
self.car = None
self.car = Car(self.world, beta0, x0, y0)
self.reset_sparse_state()
def reset_agent(self):
self.reset_agent_status()
return self.step(None)[0]
def reset_to_level(self, level):
self.reset()
level_features = eval(level)
self._adversary_control_points = level_features[:-1]
self._adv_start_alpha = level_features[-1]
# Build new level
self._adversary_sketch_dirty = True
self._create_track_adversary()
obs = self.reset_agent()
return obs
@property
def level(self):
return str(tuple(self._adversary_control_points + [self._adv_start_alpha,]))
def generate_random_z(self):
return np.random.uniform(size=(self.random_z_dim,)).astype(np.float32)
def unnormalize_xy(self, x,y):
scaled_x = int(np.minimum(np.maximum(np.round(self.sketch_dim*x), 0), self.sketch_dim - 1))
scaled_y = int(np.minimum(np.maximum(np.round(self.sketch_dim*y), 0), self.sketch_dim - 1))
unnorm_x = (scaled_x + 1)*self.sketch_ratio
unnorm_y = (scaled_y + 1)*self.sketch_ratio
return scaled_x, scaled_y, unnorm_x, unnorm_y
def _update_adversary_sketch(self, x, y):
# Update sketch based on latest control points
scaled_x, scaled_y, unnorm_x, unnorm_y = self.unnormalize_xy(x,y)
self._adversary_control_points.append((unnorm_x, unnorm_y))
self._adversary_sketch_dirty = True
self._adversary_sketch[scaled_x][scaled_y] = 1.0
return unnorm_x, unnorm_y, self._adversary_sketch
def _clear_adversary_sketch(self):
self._adversary_sketch.fill(0)
self._adversary_control_points = []
self._adversary_sketch_dirty = False
self._adv_start_alpha = None
def _create_track_adversary(self):
# Compile adversary control points into playfield coordinates
# Note that each sketch grid point corresponds to to at least a track width apart
if self.bezier:
self.track_data = self._adversary_control_points
else:
raise NotImplementedError
@property
def is_goal_step(self):
if self.sparse_rewards:
return self.adversary_step_count == self.adversary_max_steps - 1
else:
return False
@property
def is_start_pos_step(self):
if self.choose_start_pos:
return self.adversary_step_count == self.n_control_points
else:
return False
def step_adversary(self, action):
# Updates sketch with a new control pt (action)
# Obs is the latest sketch of control points scaled by self.sketch_dim.
done = False
goal_bin = self.num_goal_bins
if self.is_goal_step:
goal_bin = action
else:
x,y,skip = action
# Place control point
if self.adversary_step_count < self.n_control_points:
if not (self.adversary_step_count > 3 and np.isclose(skip, 1)):
self._update_adversary_sketch(x,y)
elif self.is_start_pos_step:
self._set_start_position(x,y)
elif self.is_goal_step:
self.goal_bin = goal_bin
self.set_goal(goal_bin)
self.adversary_step_count += 1
if self.adversary_step_count == self.adversary_max_steps:
self._create_track_adversary()
self.reset_agent_status()
done = True
obs = {
'control_points': np.expand_dims(self._adversary_sketch,0), # 1 x sketch_dim x sketch_dim
'time_step': [self.adversary_step_count],
'random_z': self.generate_random_z()
}
if self.sparse_rewards:
obs.update({'goal_bin': [goal_bin]})
return obs, 0., done, {}
def reset_random(self):
self._adversary_sketch_dirty = True
if self.fixed_environment:
self.seed(self.level_seed)
if self.sparse_rewards:
self.goal_bin = None
self.set_goal()
return super().reset()
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
gym_register(
id='CarRacing-Bezier-Adversarial-v0',
entry_point=module_path + ':CarRacingBezierAdversarial',
max_episode_steps=1000,
reward_threshold=900)
|
dcd-main
|
envs/box2d/car_racing_adversarial.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.