python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
import argparse
import collections
import functools
import itertools
import json
import multiprocessing as mp
import os
import pathlib
import re
import subprocess
import warnings
os.environ['NO_AT_BRIDGE'] = '1' # Hide X org false warning.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
np.set_string_function(lambda x: f'<np.array shape={x.shape} dtype={x.dtype}>')
Run = collections.namedtuple('Run', 'task method seed xs ys')
PALETTES = dict(
discrete=(
'#377eb8', '#4daf4a', '#984ea3', '#e41a1c', '#ff7f00', '#a65628',
'#f781bf', '#888888', '#a6cee3', '#b2df8a', '#cab2d6', '#fb9a99',
),
contrast=(
'#0022ff', '#33aa00', '#ff0011', '#ddaa00', '#cc44dd', '#0088aa',
'#001177', '#117700', '#990022', '#885500', '#553366', '#006666',
),
gradient=(
'#fde725', '#a0da39', '#4ac16d', '#1fa187', '#277f8e', '#365c8d',
'#46327e', '#440154',
),
baselines=(
'#222222', '#666666', '#aaaaaa', '#cccccc',
),
)
LEGEND = dict(
fontsize='medium', numpoints=1, labelspacing=0, columnspacing=1.2,
handlelength=1.5, handletextpad=0.5, loc='lower center')
DEFAULT_BASELINES = ['d4pg', 'rainbow_sticky', 'human_gamer', 'impala']
def find_keys(args):
filenames = []
for indir in args.indir:
task = next(indir.iterdir()) # First only.
for method in task.iterdir():
seed = next(indir.iterdir()) # First only.
filenames += list(seed.glob('**/*.jsonl'))
keys = set()
for filename in filenames:
keys |= set(load_jsonl(filename).columns)
print(f'Keys ({len(keys)}):', ', '.join(keys), flush=True)
def load_runs(args):
total, toload = [], []
for indir in args.indir:
filenames = list(indir.glob('**/*.jsonl'))
total += filenames
for filename in filenames:
task, method, seed = filename.relative_to(indir).parts[:-1]
if not any(p.search(task) for p in args.tasks):
continue
if not any(p.search(method) for p in args.methods):
continue
toload.append((filename, indir))
print(f'Loading {len(toload)} of {len(total)} runs...')
jobs = [functools.partial(load_run, f, i, args) for f, i in toload]
# Disable async data loading:
# runs = [j() for j in jobs]
with mp.Pool(10) as pool:
promises = [pool.apply_async(j) for j in jobs]
runs = [p.get() for p in promises]
runs = [r for r in runs if r is not None]
return runs
def load_run(filename, indir, args):
task, method, seed = filename.relative_to(indir).parts[:-1]
prefix = f'indir{args.indir.index(indir)+1}_'
if task == 'atari_jamesbond':
task = 'atari_james_bond'
seed = prefix + seed
if args.prefix:
method = prefix + method
df = load_jsonl(filename)
if df is None:
print('Skipping empty run')
return
try:
df = df[[args.xaxis, args.yaxis]].dropna()
if args.maxval:
df = df.replace([+np.inf], +args.maxval)
df = df.replace([-np.inf], -args.maxval)
df[args.yaxis] = df[args.yaxis].clip(-args.maxval, +args.maxval)
except KeyError:
return
xs = df[args.xaxis].to_numpy()
if args.xmult != 1:
xs = xs.astype(np.float32) * args.xmult
ys = df[args.yaxis].to_numpy()
bins = {
'atari': 1e6,
'dmc': 1e4,
'crafter': 1e4,
}.get(task.split('_')[0], 1e5) if args.bins == -1 else args.bins
if bins:
borders = np.arange(0, xs.max() + 1e-8, bins)
xs, ys = bin_scores(xs, ys, borders)
if not len(xs):
print('Skipping empty run', task, method, seed)
return
return Run(task, method, seed, xs, ys)
def load_baselines(patterns, prefix=False):
runs = []
directory = pathlib.Path(__file__).parent.parent / 'scores'
for filename in directory.glob('**/*_baselines.json'):
for task, methods in json.loads(filename.read_text()).items():
for method, score in methods.items():
if prefix:
method = f'baseline_{method}'
if not any(p.search(method) for p in patterns):
continue
runs.append(Run(task, method, None, None, score))
return runs
def stats(runs, baselines):
tasks = sorted(set(r.task for r in runs))
methods = sorted(set(r.method for r in runs))
seeds = sorted(set(r.seed for r in runs))
baseline = sorted(set(r.method for r in baselines))
print('Loaded', len(runs), 'runs.')
print(f'Tasks ({len(tasks)}):', ', '.join(tasks))
print(f'Methods ({len(methods)}):', ', '.join(methods))
print(f'Seeds ({len(seeds)}):', ', '.join(seeds))
print(f'Baselines ({len(baseline)}):', ', '.join(baseline))
def order_methods(runs, baselines, args):
methods = []
for pattern in args.methods:
for method in sorted(set(r.method for r in runs)):
if pattern.search(method):
if method not in methods:
methods.append(method)
if method not in args.colors:
index = len(args.colors) % len(args.palette)
args.colors[method] = args.palette[index]
non_baseline_colors = len(args.colors)
for pattern in args.baselines:
for method in sorted(set(r.method for r in baselines)):
if pattern.search(method):
if method not in methods:
methods.append(method)
if method not in args.colors:
index = len(args.colors) - non_baseline_colors
index = index % len(PALETTES['baselines'])
args.colors[method] = PALETTES['baselines'][index]
return methods
def figure(runs, methods, args):
tasks = sorted(set(r.task for r in runs if r.xs is not None))
rows = int(np.ceil((len(tasks) + len(args.add)) / args.cols))
figsize = args.size[0] * args.cols, args.size[1] * rows
fig, axes = plt.subplots(rows, args.cols, figsize=figsize, squeeze=False)
for task, ax in zip(tasks, axes.flatten()):
relevant = [r for r in runs if r.task == task]
plot(task, ax, relevant, methods, args)
for name, ax in zip(args.add, axes.flatten()[len(tasks):]):
ax.set_facecolor((0.9, 0.9, 0.9))
if name == 'median':
plot_combined(
'combined_median', ax, runs, methods, args,
agg=lambda x: np.nanmedian(x, -1))
elif name == 'mean':
plot_combined(
'combined_mean', ax, runs, methods, args,
agg=lambda x: np.nanmean(x, -1))
elif name == 'gamer_median':
plot_combined(
'combined_gamer_median', ax, runs, methods, args,
lo='random', hi='human_gamer',
agg=lambda x: np.nanmedian(x, -1))
elif name == 'gamer_mean':
plot_combined(
'combined_gamer_mean', ax, runs, methods, args,
lo='random', hi='human_gamer',
agg=lambda x: np.nanmean(x, -1))
elif name == 'record_mean':
plot_combined(
'combined_record_mean', ax, runs, methods, args,
lo='random', hi='record',
agg=lambda x: np.nanmean(x, -1))
elif name == 'clip_record_mean':
plot_combined(
'combined_clipped_record_mean', ax, runs, methods, args,
lo='random', hi='record', clip=True,
agg=lambda x: np.nanmean(x, -1))
elif name == 'seeds':
plot_combined(
'combined_seeds', ax, runs, methods, args,
agg=lambda x: np.isfinite(x).sum(-1))
elif name == 'human_above':
plot_combined(
'combined_above_human_gamer', ax, runs, methods, args,
agg=lambda y: (y >= 1.0).astype(float).sum(-1))
elif name == 'human_below':
plot_combined(
'combined_below_human_gamer', ax, runs, methods, args,
agg=lambda y: (y <= 1.0).astype(float).sum(-1))
else:
raise NotImplementedError(name)
if args.xlim:
for ax in axes[:-1].flatten():
ax.xaxis.get_offset_text().set_visible(False)
if args.xlabel:
for ax in axes[-1]:
ax.set_xlabel(args.xlabel)
if args.ylabel:
for ax in axes[:, 0]:
ax.set_ylabel(args.ylabel)
for ax in axes.flatten()[len(tasks) + len(args.add):]:
ax.axis('off')
legend(fig, args.labels, ncol=args.legendcols, **LEGEND)
return fig
def plot(task, ax, runs, methods, args):
assert runs
try:
title = task.split('_', 1)[1].replace('_', ' ').title()
except IndexError:
title = task.title()
ax.set_title(title)
xlim = [+np.inf, -np.inf]
for index, method in enumerate(methods):
relevant = [r for r in runs if r.method == method]
if not relevant:
continue
if any(r.xs is None for r in relevant):
baseline(index, method, ax, relevant, args)
else:
if args.agg == 'none':
xs, ys = curve_lines(index, task, method, ax, relevant, args)
else:
xs, ys = curve_area(index, task, method, ax, relevant, args)
if len(xs) == len(ys) == 0:
print(f'Skipping empty: {task} {method}')
continue
xlim = [min(xlim[0], np.nanmin(xs)), max(xlim[1], np.nanmax(xs))]
ax.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))
steps = [1, 2, 2.5, 5, 10]
ax.xaxis.set_major_locator(ticker.MaxNLocator(args.xticks, steps=steps))
ax.yaxis.set_major_locator(ticker.MaxNLocator(args.yticks, steps=steps))
if np.isfinite(xlim).all():
ax.set_xlim(args.xlim or xlim)
if args.xlim:
ticks = sorted({*ax.get_xticks(), *args.xlim})
ticks = [x for x in ticks if args.xlim[0] <= x <= args.xlim[1]]
ax.set_xticks(ticks)
if args.ylim:
ax.set_ylim(args.ylim)
if args.ylimticks:
ticks = sorted({*ax.get_yticks(), *args.ylim})
ticks = [x for x in ticks if args.ylim[0] <= x <= args.ylim[1]]
ax.set_yticks(ticks)
def plot_combined(
name, ax, runs, methods, args, agg, lo=None, hi=None, clip=False):
tasks = sorted(set(run.task for run in runs if run.xs is not None))
seeds = list(set(run.seed for run in runs))
runs = [r for r in runs if r.task in tasks] # Discard unused baselines.
# Bin all runs onto the same X steps.
borders = sorted(
[r.xs for r in runs if r.xs is not None],
key=lambda x: np.nanmax(x))[-1]
for index, run in enumerate(runs):
if run.xs is None:
continue
xs, ys = bin_scores(run.xs, run.ys, borders, fill='last')
runs[index] = run._replace(xs=xs, ys=ys)
# Per-task normalization by low and high baseline.
if lo or hi:
mins = collections.defaultdict(list)
maxs = collections.defaultdict(list)
[mins[r.task].append(r.ys) for r in load_baselines([re.compile(lo)])]
[maxs[r.task].append(r.ys) for r in load_baselines([re.compile(hi)])]
mins = {task: min(ys) for task, ys in mins.items() if task in tasks}
maxs = {task: max(ys) for task, ys in maxs.items() if task in tasks}
missing_baselines = []
for task in tasks:
if task not in mins or task not in maxs:
missing_baselines.append(task)
if set(missing_baselines) == set(tasks):
print(f'No baselines found to normalize any tasks in {name} plot.')
else:
for task in missing_baselines:
print(f'No baselines found to normalize {task} in {name} plot.')
for index, run in enumerate(runs):
if run.task not in mins or run.task not in maxs:
continue
ys = (run.ys - mins[run.task]) / (maxs[run.task] - mins[run.task])
if clip:
ys = np.minimum(ys, 1.0)
runs[index] = run._replace(ys=ys)
# Aggregate across tasks but not methods or seeds.
combined = []
for method, seed in itertools.product(methods, seeds):
relevant = [r for r in runs if r.method == method and r.seed == seed]
if not relevant:
continue
if relevant[0].xs is None:
xs, ys = None, np.array([r.ys for r in relevant])
else:
xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in relevant]))
with warnings.catch_warnings(): # Ignore empty slice warnings.
warnings.simplefilter('ignore', category=RuntimeWarning)
combined.append(Run('combined', method, seed, xs, agg(ys)))
plot(name, ax, combined, methods, args)
def curve_lines(index, task, method, ax, runs, args):
zorder = 10000 - 10 * index - 1
for run in runs:
color = args.colors[method]
ax.plot(run.xs, run.ys, label=method, color=color, zorder=zorder)
xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in runs]))
return xs, ys
def curve_area(index, task, method, ax, runs, args):
xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in runs]))
with warnings.catch_warnings(): # NaN buckets remain NaN.
warnings.simplefilter('ignore', category=RuntimeWarning)
if args.agg == 'std1':
mean, std = np.nanmean(ys, -1), np.nanstd(ys, -1)
lo, mi, hi = mean - std, mean, mean + std
elif args.agg == 'per0':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (0, 50, 100)]
elif args.agg == 'per5':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (5, 50, 95)]
elif args.agg == 'per25':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (25, 50, 75)]
else:
raise NotImplementedError(args.agg)
color = args.colors[method]
kw = dict(color=color, zorder=1000 - 10 * index, alpha=0.1, linewidths=0)
mask = ~np.isnan(mi)
xs, lo, mi, hi = xs[mask], lo[mask], mi[mask], hi[mask]
ax.fill_between(xs, lo, hi, **kw)
ax.plot(xs, mi, label=method, color=color, zorder=10000 - 10 * index - 1)
return xs, mi
def baseline(index, method, ax, runs, args):
assert all(run.xs is None for run in runs)
ys = np.array([run.ys for run in runs])
mean, std = ys.mean(), ys.std()
color = args.colors[method]
kw = dict(color=color, zorder=500 - 20 * index - 1, alpha=0.1, linewidths=0)
ax.fill_between([-np.inf, np.inf], [mean - std] * 2, [mean + std] * 2, **kw)
kw = dict(ls='--', color=color, zorder=5000 - 10 * index - 1)
ax.axhline(mean, label=method, **kw)
def legend(fig, mapping=None, **kwargs):
entries = {}
for ax in fig.axes:
for handle, label in zip(*ax.get_legend_handles_labels()):
if mapping and label in mapping:
label = mapping[label]
entries[label] = handle
leg = fig.legend(entries.values(), entries.keys(), **kwargs)
leg.get_frame().set_edgecolor('white')
extent = leg.get_window_extent(fig.canvas.get_renderer())
extent = extent.transformed(fig.transFigure.inverted())
yloc, xloc = kwargs['loc'].split()
y0 = dict(lower=extent.y1, center=0, upper=0)[yloc]
y1 = dict(lower=1, center=1, upper=extent.y0)[yloc]
x0 = dict(left=extent.x1, center=0, right=0)[xloc]
x1 = dict(left=1, center=1, right=extent.x0)[xloc]
fig.tight_layout(rect=[x0, y0, x1, y1], h_pad=0.5, w_pad=0.5)
def save(fig, args):
args.outdir.mkdir(parents=True, exist_ok=True)
filename = args.outdir / 'curves.png'
fig.savefig(filename, dpi=args.dpi)
print('Saved to', filename)
filename = args.outdir / 'curves.pdf'
fig.savefig(filename)
try:
subprocess.call(['pdfcrop', str(filename), str(filename)])
except FileNotFoundError:
print('Install texlive-extra-utils to crop PDF outputs.')
def bin_scores(xs, ys, borders, reducer=np.nanmean, fill='nan'):
order = np.argsort(xs)
xs, ys = xs[order], ys[order]
binned = []
with warnings.catch_warnings(): # Empty buckets become NaN.
warnings.simplefilter('ignore', category=RuntimeWarning)
for start, stop in zip(borders[:-1], borders[1:]):
left = (xs <= start).sum()
right = (xs <= stop).sum()
if left < right:
value = reducer(ys[left:right])
elif binned:
value = {'nan': np.nan, 'last': binned[-1]}[fill]
else:
value = np.nan
binned.append(value)
return borders[1:], np.array(binned)
def stack_scores(multiple_xs, multiple_ys, fill='last'):
longest_xs = sorted(multiple_xs, key=lambda x: len(x))[-1]
multiple_padded_ys = []
for xs, ys in zip(multiple_xs, multiple_ys):
assert (longest_xs[:len(xs)] == xs).all(), (list(xs), list(longest_xs))
value = {'nan': np.nan, 'last': ys[-1]}[fill]
padding = [value] * (len(longest_xs) - len(xs))
padded_ys = np.concatenate([ys, padding])
multiple_padded_ys.append(padded_ys)
stacked_ys = np.stack(multiple_padded_ys, -1)
return longest_xs, stacked_ys
def load_jsonl(filename):
try:
with filename.open() as f:
lines = list(f.readlines())
records = []
for index, line in enumerate(lines):
try:
records.append(json.loads(line))
except Exception:
if index == len(lines) - 1:
continue # Silently skip last line if it is incomplete.
raise ValueError(
f'Skipping invalid JSON line ({index+1}/{len(lines)+1}) in'
f'{filename}: {line}')
return pd.DataFrame(records)
except ValueError as e:
print('Invalid', filename, e)
return None
def save_runs(runs, filename):
filename.parent.mkdir(parents=True, exist_ok=True)
records = []
for run in runs:
if run.xs is None:
continue
records.append(dict(
task=run.task, method=run.method, seed=run.seed,
xs=run.xs.tolist(), ys=run.ys.tolist()))
runs = json.dumps(records)
filename.write_text(runs)
print('Saved', filename)
def main(args):
find_keys(args)
runs = load_runs(args)
save_runs(runs, args.outdir / 'runs.json')
baselines = load_baselines(args.baselines, args.prefix)
stats(runs, baselines)
methods = order_methods(runs, baselines, args)
if not runs:
print('Noting to plot.')
return
# Adjust options based on loaded runs.
tasks = set(r.task for r in runs)
if 'auto' in args.add:
index = args.add.index('auto')
del args.add[index]
atari = any(run.task.startswith('atari_') for run in runs)
if len(tasks) < 2:
pass
elif atari:
args.add[index:index] = [
'gamer_median', 'gamer_mean', 'record_mean', 'clip_record_mean',
]
else:
args.add[index:index] = ['mean', 'median']
args.cols = min(args.cols, len(tasks) + len(args.add))
args.legendcols = min(args.legendcols, args.cols)
print('Plotting...')
fig = figure(runs + baselines, methods, args)
save(fig, args)
def parse_args():
boolean = lambda x: bool(['False', 'True'].index(x))
parser = argparse.ArgumentParser()
parser.add_argument('--indir', nargs='+', type=pathlib.Path, required=True)
parser.add_argument('--indir-prefix', type=pathlib.Path)
parser.add_argument('--outdir', type=pathlib.Path, required=True)
parser.add_argument('--subdir', type=boolean, default=True)
parser.add_argument('--xaxis', type=str, default='step')
parser.add_argument('--yaxis', type=str, default='eval_return')
parser.add_argument('--tasks', nargs='+', default=[r'.*'])
parser.add_argument('--methods', nargs='+', default=[r'.*'])
parser.add_argument('--baselines', nargs='+', default=DEFAULT_BASELINES)
parser.add_argument('--prefix', type=boolean, default=False)
parser.add_argument('--bins', type=float, default=-1)
parser.add_argument('--agg', type=str, default='std1')
parser.add_argument('--size', nargs=2, type=float, default=[2.5, 2.3])
parser.add_argument('--dpi', type=int, default=80)
parser.add_argument('--cols', type=int, default=6)
parser.add_argument('--xlim', nargs=2, type=float, default=None)
parser.add_argument('--ylim', nargs=2, type=float, default=None)
parser.add_argument('--ylimticks', type=boolean, default=True)
parser.add_argument('--xlabel', type=str, default=None)
parser.add_argument('--ylabel', type=str, default=None)
parser.add_argument('--xticks', type=int, default=6)
parser.add_argument('--yticks', type=int, default=5)
parser.add_argument('--xmult', type=float, default=1)
parser.add_argument('--labels', nargs='+', default=None)
parser.add_argument('--palette', nargs='+', default=['contrast'])
parser.add_argument('--legendcols', type=int, default=4)
parser.add_argument('--colors', nargs='+', default={})
parser.add_argument('--maxval', type=float, default=0)
parser.add_argument('--add', nargs='+', type=str, default=['auto', 'seeds'])
args = parser.parse_args()
if args.subdir:
args.outdir /= args.indir[0].stem
if args.indir_prefix:
args.indir = [args.indir_prefix / indir for indir in args.indir]
args.indir = [d.expanduser() for d in args.indir]
args.outdir = args.outdir.expanduser()
if args.labels:
assert len(args.labels) % 2 == 0
args.labels = {k: v for k, v in zip(args.labels[:-1], args.labels[1:])}
if args.colors:
assert len(args.colors) % 2 == 0
args.colors = {k: v for k, v in zip(args.colors[:-1], args.colors[1:])}
args.tasks = [re.compile(p) for p in args.tasks]
args.methods = [re.compile(p) for p in args.methods]
args.baselines = [re.compile(p) for p in args.baselines]
if 'return' not in args.yaxis:
args.baselines = []
if args.prefix is None:
args.prefix = len(args.indir) > 1
if len(args.palette) == 1 and args.palette[0] in PALETTES:
args.palette = 10 * PALETTES[args.palette[0]]
if len(args.add) == 1 and args.add[0] == 'none':
args.add = []
return args
if __name__ == '__main__':
main(parse_args())
|
cascade-main
|
dreamerv2/common/plot.py
|
import json
import pathlib
import re
class Config(dict):
SEP = '.'
IS_PATTERN = re.compile(r'.*[^A-Za-z0-9_.-].*')
def __init__(self, *args, **kwargs):
mapping = dict(*args, **kwargs)
mapping = self._flatten(mapping)
mapping = self._ensure_keys(mapping)
mapping = self._ensure_values(mapping)
self._flat = mapping
self._nested = self._nest(mapping)
# Need to assign the values to the base class dictionary so that
# conversion to dict does not lose the content.
super().__init__(self._nested)
@property
def flat(self):
return self._flat.copy()
def save(self, filename):
filename = pathlib.Path(filename)
if filename.suffix == '.json':
filename.write_text(json.dumps(dict(self)))
elif filename.suffix in ('.yml', '.yaml'):
import ruamel.yaml as yaml
with filename.open('w') as f:
yaml.safe_dump(dict(self), f)
else:
raise NotImplementedError(filename.suffix)
@classmethod
def load(cls, filename):
filename = pathlib.Path(filename)
if filename.suffix == '.json':
return cls(json.loads(filename.read_text()))
elif filename.suffix in ('.yml', '.yaml'):
import ruamel.yaml as yaml
return cls(yaml.safe_load(filename.read_text()))
else:
raise NotImplementedError(filename.suffix)
def parse_flags(self, argv=None, known_only=False, help_exists=None):
from . import flags
return flags.Flags(self).parse(argv, known_only, help_exists)
def __contains__(self, name):
try:
self[name]
return True
except KeyError:
return False
def __getattr__(self, name):
if name.startswith('_'):
return super().__getattr__(name)
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, name):
result = self._nested
for part in name.split(self.SEP):
result = result[part]
if isinstance(result, dict):
result = type(self)(result)
return result
def __setattr__(self, key, value):
if key.startswith('_'):
return super().__setattr__(key, value)
message = f"Tried to set key '{key}' on immutable config. Use update()."
raise AttributeError(message)
def __setitem__(self, key, value):
if key.startswith('_'):
return super().__setitem__(key, value)
message = f"Tried to set key '{key}' on immutable config. Use update()."
raise AttributeError(message)
def __reduce__(self):
return (type(self), (dict(self),))
def __str__(self):
lines = ['\nConfig:']
keys, vals, typs = [], [], []
for key, val in self.flat.items():
keys.append(key + ':')
vals.append(self._format_value(val))
typs.append(self._format_type(val))
max_key = max(len(k) for k in keys) if keys else 0
max_val = max(len(v) for v in vals) if vals else 0
for key, val, typ in zip(keys, vals, typs):
key = key.ljust(max_key)
val = val.ljust(max_val)
lines.append(f'{key} {val} ({typ})')
return '\n'.join(lines)
def update(self, *args, **kwargs):
result = self._flat.copy()
inputs = self._flatten(dict(*args, **kwargs))
for key, new in inputs.items():
if self.IS_PATTERN.match(key):
pattern = re.compile(key)
keys = {k for k in result if pattern.match(k)}
else:
keys = [key]
if not keys:
raise KeyError(f'Unknown key or pattern {key}.')
for key in keys:
old = result[key]
try:
if isinstance(old, int) and isinstance(new, float):
if float(int(new)) != new:
message = f"Cannot convert fractional float {new} to int."
raise ValueError(message)
result[key] = type(old)(new)
except (ValueError, TypeError):
raise TypeError(
f"Cannot convert '{new}' to type '{type(old).__name__}' " +
f"of value '{old}' for key '{key}'.")
return type(self)(result)
def _flatten(self, mapping):
result = {}
for key, value in mapping.items():
if isinstance(value, dict):
for k, v in self._flatten(value).items():
if self.IS_PATTERN.match(key) or self.IS_PATTERN.match(k):
combined = f'{key}\\{self.SEP}{k}'
else:
combined = f'{key}{self.SEP}{k}'
result[combined] = v
else:
result[key] = value
return result
def _nest(self, mapping):
result = {}
for key, value in mapping.items():
parts = key.split(self.SEP)
node = result
for part in parts[:-1]:
if part not in node:
node[part] = {}
node = node[part]
node[parts[-1]] = value
return result
def _ensure_keys(self, mapping):
for key in mapping:
assert not self.IS_PATTERN.match(key), key
return mapping
def _ensure_values(self, mapping):
result = json.loads(json.dumps(mapping))
for key, value in result.items():
if isinstance(value, list):
value = tuple(value)
if isinstance(value, tuple):
if len(value) == 0:
message = 'Empty lists are disallowed because their type is unclear.'
raise TypeError(message)
if not isinstance(value[0], (str, float, int, bool)):
message = 'Lists can only contain strings, floats, ints, bools'
message += f' but not {type(value[0])}'
raise TypeError(message)
if not all(isinstance(x, type(value[0])) for x in value[1:]):
message = 'Elements of a list must all be of the same type.'
raise TypeError(message)
result[key] = value
return result
def _format_value(self, value):
if isinstance(value, (list, tuple)):
return '[' + ', '.join(self._format_value(x) for x in value) + ']'
return str(value)
def _format_type(self, value):
if isinstance(value, (list, tuple)):
assert len(value) > 0, value
return self._format_type(value[0]) + 's'
return str(type(value).__name__)
|
cascade-main
|
dreamerv2/common/config.py
|
import pathlib
import pickle
import re
import numpy as np
import tensorflow as tf
from tensorflow.keras import mixed_precision as prec
try:
from tensorflow.python.distribute import values
except Exception:
from google3.third_party.tensorflow.python.distribute import values
tf.tensor = tf.convert_to_tensor
for base in (tf.Tensor, tf.Variable, values.PerReplica):
base.mean = tf.math.reduce_mean
base.std = tf.math.reduce_std
base.var = tf.math.reduce_variance
base.sum = tf.math.reduce_sum
base.any = tf.math.reduce_any
base.all = tf.math.reduce_all
base.min = tf.math.reduce_min
base.max = tf.math.reduce_max
base.abs = tf.math.abs
base.logsumexp = tf.math.reduce_logsumexp
base.transpose = tf.transpose
base.reshape = tf.reshape
base.astype = tf.cast
# values.PerReplica.dtype = property(lambda self: self.values[0].dtype)
# tf.TensorHandle.__repr__ = lambda x: '<tensor>'
# tf.TensorHandle.__str__ = lambda x: '<tensor>'
# np.set_printoptions(threshold=5, edgeitems=0)
class Module(tf.Module):
def save(self, filename):
values = tf.nest.map_structure(lambda x: x.numpy(), self.variables)
amount = len(tf.nest.flatten(values))
count = int(sum(np.prod(x.shape) for x in tf.nest.flatten(values)))
print(f'Save checkpoint with {amount} tensors and {count} parameters.')
with pathlib.Path(filename).open('wb') as f:
pickle.dump(values, f)
def load(self, filename):
with pathlib.Path(filename).open('rb') as f:
values = pickle.load(f)
amount = len(tf.nest.flatten(values))
count = int(sum(np.prod(x.shape) for x in tf.nest.flatten(values)))
print(f'Load checkpoint with {amount} tensors and {count} parameters.')
amount_agent = len(tf.nest.flatten(self.variables))
count_agent = int(sum(np.prod(x.shape) for x in tf.nest.flatten(self.variables)))
print(f'Agent checkpoint has {amount_agent} tensors and {count_agent} parameters.')
tf.nest.map_structure(lambda x, y: x.assign(y), self.variables, values)
def get(self, name, ctor, *args, **kwargs):
# Create or get layer by name to avoid mentioning it in the constructor.
if not hasattr(self, '_modules'):
self._modules = {}
if name not in self._modules:
self._modules[name] = ctor(*args, **kwargs)
return self._modules[name]
class Optimizer(tf.Module):
def __init__(
self, name, lr, eps=1e-4, clip=None, wd=None,
opt='adam', wd_pattern=r'.*'):
assert 0 <= wd < 1
assert not clip or 1 <= clip
self._name = name
self._clip = clip
self._wd = wd
self._wd_pattern = wd_pattern
self._opt = {
'adam': lambda: tf.optimizers.Adam(lr, epsilon=eps),
'nadam': lambda: tf.optimizers.Nadam(lr, epsilon=eps),
'adamax': lambda: tf.optimizers.Adamax(lr, epsilon=eps),
'sgd': lambda: tf.optimizers.SGD(lr),
'momentum': lambda: tf.optimizers.SGD(lr, 0.9),
}[opt]()
self._mixed = (prec.global_policy().compute_dtype == tf.float16)
if self._mixed:
self._opt = prec.LossScaleOptimizer(self._opt, dynamic=True)
self._once = True
@property
def variables(self):
return self._opt.variables()
def __call__(self, tape, loss, modules):
assert loss.dtype is tf.float32, (self._name, loss.dtype)
assert len(loss.shape) == 0, (self._name, loss.shape)
metrics = {}
# Find variables.
modules = modules if hasattr(modules, '__len__') else (modules,)
varibs = tf.nest.flatten([module.variables for module in modules])
count = sum(np.prod(x.shape) for x in varibs)
if self._once:
print(f'Found {count} {self._name} parameters.')
self._once = False
# Check loss.
tf.debugging.check_numerics(loss, self._name + '_loss')
metrics[f'{self._name}_loss'] = loss
# Compute scaled gradient.
if self._mixed:
with tape:
loss = self._opt.get_scaled_loss(loss)
grads = tape.gradient(loss, varibs)
if self._mixed:
grads = self._opt.get_unscaled_gradients(grads)
if self._mixed:
metrics[f'{self._name}_loss_scale'] = self._opt.loss_scale
# Distributed sync.
context = tf.distribute.get_replica_context()
if context:
grads = context.all_reduce('mean', grads)
# Gradient clipping.
norm = tf.linalg.global_norm(grads)
if not self._mixed:
tf.debugging.check_numerics(norm, self._name + '_norm')
if self._clip:
grads, _ = tf.clip_by_global_norm(grads, self._clip, norm)
metrics[f'{self._name}_grad_norm'] = norm
# Weight decay.
if self._wd:
self._apply_weight_decay(varibs)
# Apply gradients.
self._opt.apply_gradients(
zip(grads, varibs),
experimental_aggregate_gradients=False)
return metrics
def _apply_weight_decay(self, varibs):
nontrivial = (self._wd_pattern != r'.*')
if nontrivial:
print('Applied weight decay to variables:')
for var in varibs:
if re.search(self._wd_pattern, self._name + '/' + var.name):
if nontrivial:
print('- ' + self._name + '/' + var.name)
var.assign((1 - self._wd) * var)
|
cascade-main
|
dreamerv2/common/tfutils.py
|
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
# Patch to ignore seed to avoid synchronization across GPUs.
_orig_random_categorical = tf.random.categorical
def random_categorical(*args, **kwargs):
kwargs['seed'] = None
return _orig_random_categorical(*args, **kwargs)
tf.random.categorical = random_categorical
# Patch to ignore seed to avoid synchronization across GPUs.
_orig_random_normal = tf.random.normal
def random_normal(*args, **kwargs):
kwargs['seed'] = None
return _orig_random_normal(*args, **kwargs)
tf.random.normal = random_normal
class SampleDist:
def __init__(self, dist, samples=100):
self._dist = dist
self._samples = samples
@property
def name(self):
return 'SampleDist'
def __getattr__(self, name):
return getattr(self._dist, name)
def mean(self):
samples = self._dist.sample(self._samples)
return samples.mean(0)
def mode(self):
sample = self._dist.sample(self._samples)
logprob = self._dist.log_prob(sample)
return tf.gather(sample, tf.argmax(logprob))[0]
def entropy(self):
sample = self._dist.sample(self._samples)
logprob = self.log_prob(sample)
return -logprob.mean(0)
class OneHotDist(tfd.OneHotCategorical):
def __init__(self, logits=None, probs=None, dtype=None):
self._sample_dtype = dtype or tf.float32
super().__init__(logits=logits, probs=probs)
def mode(self):
return tf.cast(super().mode(), self._sample_dtype)
def sample(self, sample_shape=(), seed=None):
# Straight through biased gradient estimator.
sample = tf.cast(super().sample(sample_shape, seed), self._sample_dtype)
probs = self._pad(super().probs_parameter(), sample.shape)
sample += tf.cast(probs - tf.stop_gradient(probs), self._sample_dtype)
return sample
def _pad(self, tensor, shape):
tensor = super().probs_parameter()
while len(tensor.shape) < len(shape):
tensor = tensor[None]
return tensor
class TruncNormalDist(tfd.TruncatedNormal):
def __init__(self, loc, scale, low, high, clip=1e-6, mult=1):
super().__init__(loc, scale, low, high)
self._clip = clip
self._mult = mult
def sample(self, *args, **kwargs):
event = super().sample(*args, **kwargs)
if self._clip:
clipped = tf.clip_by_value(
event, self.low + self._clip, self.high - self._clip)
event = event - tf.stop_gradient(event) + tf.stop_gradient(clipped)
if self._mult:
event *= self._mult
return event
class TanhBijector(tfp.bijectors.Bijector):
def __init__(self, validate_args=False, name='tanh'):
super().__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return tf.nn.tanh(x)
def _inverse(self, y):
dtype = y.dtype
y = tf.cast(y, tf.float32)
y = tf.where(
tf.less_equal(tf.abs(y), 1.),
tf.clip_by_value(y, -0.99999997, 0.99999997), y)
y = tf.atanh(y)
y = tf.cast(y, dtype)
return y
def _forward_log_det_jacobian(self, x):
log2 = tf.math.log(tf.constant(2.0, dtype=x.dtype))
return 2.0 * (log2 - x - tf.nn.softplus(-2.0 * x))
|
cascade-main
|
dreamerv2/common/dists.py
|
import re
import sys
class Flags:
def __init__(self, *args, **kwargs):
from .config import Config
self._config = Config(*args, **kwargs)
def parse(self, argv=None, known_only=False, help_exists=None):
if help_exists is None:
help_exists = not known_only
if argv is None:
argv = sys.argv[1:]
if '--help' in argv:
print('\nHelp:')
lines = str(self._config).split('\n')[2:]
print('\n'.join('--' + re.sub(r'[:,\[\]]', '', x) for x in lines))
help_exists and sys.exit()
parsed = {}
remaining = []
key = None
vals = None
for arg in argv:
if arg.startswith('--'):
if key:
self._submit_entry(key, vals, parsed, remaining)
if '=' in arg:
key, val = arg.split('=', 1)
vals = [val]
else:
key, vals = arg, []
else:
if key:
vals.append(arg)
else:
remaining.append(arg)
self._submit_entry(key, vals, parsed, remaining)
parsed = self._config.update(parsed)
if known_only:
return parsed, remaining
else:
for flag in remaining:
if flag.startswith('--'):
raise ValueError(f"Flag '{flag}' did not match any config keys.")
assert not remaining, remaining
return parsed
def _submit_entry(self, key, vals, parsed, remaining):
if not key and not vals:
return
if not key:
vals = ', '.join(f"'{x}'" for x in vals)
raise ValueError(f"Values {vals} were not preceeded by any flag.")
name = key[len('--'):]
if '=' in name:
remaining.extend([key] + vals)
return
if self._config.IS_PATTERN.match(name):
pattern = re.compile(name)
keys = {k for k in self._config.flat if pattern.match(k)}
elif name in self._config:
keys = [name]
else:
keys = []
if not keys:
remaining.extend([key] + vals)
return
if not vals:
raise ValueError(f"Flag '{key}' was not followed by any values.")
for key in keys:
parsed[key] = self._parse_flag_value(self._config[key], vals, key)
def _parse_flag_value(self, default, value, key):
value = value if isinstance(value, (tuple, list)) else (value,)
if isinstance(default, (tuple, list)):
if len(value) == 1 and ',' in value[0]:
value = value[0].split(',')
return tuple(self._parse_flag_value(default[0], [x], key) for x in value)
assert len(value) == 1, value
value = str(value[0])
if default is None:
return value
if isinstance(default, bool):
try:
return bool(['False', 'True'].index(value))
except ValueError:
message = f"Expected bool but got '{value}' for key '{key}'."
raise TypeError(message)
if isinstance(default, int):
value = float(value) # Allow scientific notation for integers.
if float(int(value)) != value:
message = f"Expected int but got float '{value}' for key '{key}'."
raise TypeError(message)
return int(value)
return type(default)(value)
|
cascade-main
|
dreamerv2/common/flags.py
|
import datetime
import json
import pathlib
import imageio
import numpy as np
class Recorder:
def __init__(
self, env, directory, save_stats=True, save_video=True,
save_episode=True, video_size=(512, 512)):
if directory and save_stats:
env = StatsRecorder(env, directory)
if directory and save_video:
env = VideoRecorder(env, directory, video_size)
if directory and save_episode:
env = EpisodeRecorder(env, directory)
if not directory:
env = NoopRecorder(env)
self._env = env
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
class NoopRecorder:
def __init__(self, env):
self._env = env
def reset(self):
obs = self._env.reset()
return obs
def step(self, action, policy_idx=0):
return self._env.step(action)
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
class StatsRecorder:
def __init__(self, env, directory):
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._file = (self._directory / 'stats.jsonl').open('a')
self._length = None
self._reward = None
self._unlocked = None
self._stats = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._length = 0
self._reward = 0
self._unlocked = None
self._stats = None
return obs
def step(self, action, policy_idx=0):
obs, reward, done, info = self._env.step(action)
self._length += 1
self._reward += info['reward']
if done:
self._stats = {'length': self._length, 'reward': round(self._reward, 1), 'policy_idx': policy_idx}
for key, value in info['achievements'].items():
self._stats[f'achievement_{key}'] = value
self._save()
return obs, reward, done, info
def _save(self):
self._file.write(json.dumps(self._stats) + '\n')
self._file.flush()
class VideoRecorder:
def __init__(self, env, directory, size=(512, 512)):
if not hasattr(env, 'episode_name'):
env = EpisodeName(env)
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._size = size
self._frames = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._frames = [self._env.render(self._size)]
return obs
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._frames.append(self._env.render(self._size))
if done:
self._save()
return obs, reward, done, info
def _save(self):
filename = str(self._directory / (self._env.episode_name + '.mp4'))
imageio.mimsave(filename, self._frames)
class EpisodeRecorder:
def __init__(self, env, directory):
if not hasattr(env, 'episode_name'):
env = EpisodeName(env)
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._episode = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._episode = [{'image': obs}]
return obs
def step(self, action):
# Transitions are defined from the environment perspective, meaning that a
# transition contains the action and the resulting reward and next
# observation produced by the environment in response to said action.
obs, reward, done, info = self._env.step(action)
transition = {
'action': action, 'image': obs, 'reward': reward, 'done': done,
}
for key, value in info.items():
if key in ('inventory', 'achievements'):
continue
transition[key] = value
for key, value in info['achievements'].items():
transition[f'achievement_{key}'] = value
for key, value in info['inventory'].items():
transition[f'ainventory_{key}'] = value
self._episode.append(transition)
if done:
self._save()
return obs, reward, done, info
def _save(self):
filename = str(self._directory / (self._env.episode_name + '.npz'))
# Fill in zeros for keys missing at the first time step.
for key, value in self._episode[1].items():
if key not in self._episode[0]:
self._episode[0][key] = np.zeros_like(value)
episode = {
k: np.array([step[k] for step in self._episode])
for k in self._episode[0]}
np.savez_compressed(filename, **episode)
class EpisodeName:
def __init__(self, env):
self._env = env
self._timestamp = None
self._unlocked = None
self._length = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._timestamp = None
self._unlocked = None
self._length = 0
return obs
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._length += 1
if done:
self._timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
self._unlocked = sum(int(v >= 1) for v in info['achievements'].values())
return obs, reward, done, info
@property
def episode_name(self):
return f'{self._timestamp}-ach{self._unlocked}-len{self._length}'
|
cascade-main
|
dreamerv2/common/recorder.py
|
# General tools.
from .config import *
from .counter import *
from .flags import *
from .logger import *
from .when import *
from .eval import *
from .cdmc import *
# RL tools.
from .other import *
from .driver import *
from .envs import *
from .replay import *
# TensorFlow tools.
from .tfutils import *
from .dists import *
from .nets import *
|
cascade-main
|
dreamerv2/common/__init__.py
|
import collections
import contextlib
import re
import time
import numpy as np
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from . import dists
from . import tfutils
class RandomAgent:
def __init__(self, act_space, logprob=False):
self.act_space = act_space['action']
self.logprob = logprob
if hasattr(self.act_space, 'n'):
self._dist = dists.OneHotDist(tf.zeros(self.act_space.n))
else:
dist = tfd.Uniform(self.act_space.low, self.act_space.high)
self._dist = tfd.Independent(dist, 1)
def __call__(self, obs, state=None, mode=None):
action = self._dist.sample(len(obs['is_first']))
output = {'action': action}
if self.logprob:
output['logprob'] = self._dist.log_prob(action)
return output, None
def static_scan(fn, inputs, start, reverse=False):
last = start
outputs = [[] for _ in tf.nest.flatten(start)]
indices = range(tf.nest.flatten(inputs)[0].shape[0])
if reverse:
indices = reversed(indices)
for index in indices:
inp = tf.nest.map_structure(lambda x: x[index], inputs)
last = fn(last, inp)
[o.append(l) for o, l in zip(outputs, tf.nest.flatten(last))]
if reverse:
outputs = [list(reversed(x)) for x in outputs]
outputs = [tf.stack(x, 0) for x in outputs]
return tf.nest.pack_sequence_as(start, outputs)
def schedule(string, step):
try:
return float(string)
except ValueError:
step = tf.cast(step, tf.float32)
match = re.match(r'linear\((.+),(.+),(.+)\)', string)
if match:
initial, final, duration = [float(group) for group in match.groups()]
mix = tf.clip_by_value(step / duration, 0, 1)
return (1 - mix) * initial + mix * final
match = re.match(r'warmup\((.+),(.+)\)', string)
if match:
warmup, value = [float(group) for group in match.groups()]
scale = tf.clip_by_value(step / warmup, 0, 1)
return scale * value
match = re.match(r'exp\((.+),(.+),(.+)\)', string)
if match:
initial, final, halflife = [float(group) for group in match.groups()]
return (initial - final) * 0.5 ** (step / halflife) + final
match = re.match(r'horizon\((.+),(.+),(.+)\)', string)
if match:
initial, final, duration = [float(group) for group in match.groups()]
mix = tf.clip_by_value(step / duration, 0, 1)
horizon = (1 - mix) * initial + mix * final
return 1 - 1 / horizon
raise NotImplementedError(string)
def lambda_return(
reward, value, pcont, bootstrap, lambda_, axis):
# Setting lambda=1 gives a discounted Monte Carlo return.
# Setting lambda=0 gives a fixed 1-step return.
assert reward.shape.ndims == value.shape.ndims, (reward.shape, value.shape)
if isinstance(pcont, (int, float)):
pcont = pcont * tf.ones_like(reward)
dims = list(range(reward.shape.ndims))
dims = [axis] + dims[1:axis] + [0] + dims[axis + 1:]
if axis != 0:
reward = tf.transpose(reward, dims)
value = tf.transpose(value, dims)
pcont = tf.transpose(pcont, dims)
if bootstrap is None:
bootstrap = tf.zeros_like(value[-1])
next_values = tf.concat([value[1:], bootstrap[None]], 0)
inputs = reward + pcont * next_values * (1 - lambda_)
returns = static_scan(
lambda agg, cur: cur[0] + cur[1] * lambda_ * agg,
(inputs, pcont), bootstrap, reverse=True)
if axis != 0:
returns = tf.transpose(returns, dims)
return returns
def action_noise(action, amount, act_space):
if amount == 0:
return action
amount = tf.cast(amount, action.dtype)
if hasattr(act_space, 'n'):
probs = amount / action.shape[-1] + (1 - amount) * action
return dists.OneHotDist(probs=probs).sample()
else:
return tf.clip_by_value(tfd.Normal(action, amount).sample(), -1, 1)
class StreamNorm(tfutils.Module):
def __init__(self, shape=(), momentum=0.99, scale=1.0, eps=1e-8):
# Momentum of 0 normalizes only based on the current batch.
# Momentum of 1 disables normalization.
self._shape = tuple(shape)
self._momentum = momentum
self._scale = scale
self._eps = eps
self.mag = tf.Variable(tf.ones(shape, tf.float64), False)
def __call__(self, inputs):
metrics = {}
self.update(inputs)
metrics['mean'] = inputs.mean()
metrics['std'] = inputs.std()
outputs = self.transform(inputs)
metrics['normed_mean'] = outputs.mean()
metrics['normed_std'] = outputs.std()
return outputs, metrics
def reset(self):
self.mag.assign(tf.ones_like(self.mag))
def update(self, inputs):
batch = inputs.reshape((-1,) + self._shape)
mag = tf.abs(batch).mean(0).astype(tf.float64)
self.mag.assign(self._momentum * self.mag + (1 - self._momentum) * mag)
def transform(self, inputs):
values = inputs.reshape((-1,) + self._shape)
values /= self.mag.astype(inputs.dtype)[None] + self._eps
values *= self._scale
return values.reshape(inputs.shape)
class Timer:
def __init__(self):
self._indurs = collections.defaultdict(list)
self._outdurs = collections.defaultdict(list)
self._start_times = {}
self._end_times = {}
@contextlib.contextmanager
def section(self, name):
self.start(name)
yield
self.end(name)
def wrap(self, function, name):
def wrapped(*args, **kwargs):
with self.section(name):
return function(*args, **kwargs)
return wrapped
def start(self, name):
now = time.time()
self._start_times[name] = now
if name in self._end_times:
last = self._end_times[name]
self._outdurs[name].append(now - last)
def end(self, name):
now = time.time()
self._end_times[name] = now
self._indurs[name].append(now - self._start_times[name])
def result(self):
metrics = {}
for key in self._indurs:
indurs = self._indurs[key]
outdurs = self._outdurs[key]
metrics[f'timer_count_{key}'] = len(indurs)
metrics[f'timer_inside_{key}'] = np.sum(indurs)
metrics[f'timer_outside_{key}'] = np.sum(outdurs)
indurs.clear()
outdurs.clear()
return metrics
class CarryOverState:
def __init__(self, fn):
self._fn = fn
self._state = None
def __call__(self, *args):
self._state, out = self._fn(*args, self._state)
return out
|
cascade-main
|
dreamerv2/common/other.py
|
import json
import os
import pathlib
import time
import numpy as np
class Logger:
def __init__(self, step, outputs, multiplier=1):
self._step = step
self._outputs = outputs
self._multiplier = multiplier
self._last_step = None
self._last_time = None
self._metrics = []
def add(self, mapping, prefix=None):
step = int(self._step) * self._multiplier
for name, value in dict(mapping).items():
name = f'{prefix}_{name}' if prefix else name
value = np.array(value)
if len(value.shape) not in (0, 2, 3, 4):
raise ValueError(
f"Shape {value.shape} for name '{name}' cannot be "
"interpreted as scalar, image, or video.")
self._metrics.append((step, name, value))
def scalar(self, name, value):
self.add({name: value})
def image(self, name, value):
self.add({name: value})
def video(self, name, value):
self.add({name: value})
def write(self, fps=False):
fps and self.scalar('fps', self._compute_fps())
if not self._metrics:
return
for output in self._outputs:
output(self._metrics)
self._metrics.clear()
def _compute_fps(self):
step = int(self._step) * self._multiplier
if self._last_step is None:
self._last_time = time.time()
self._last_step = step
return 0
steps = step - self._last_step
duration = time.time() - self._last_time
self._last_time += duration
self._last_step = step
return steps / duration
class TerminalOutput:
def __call__(self, summaries):
step = max(s for s, _, _, in summaries)
scalars = {k: float(v) for _, k, v in summaries if len(v.shape) == 0}
formatted = {k: self._format_value(v) for k, v in scalars.items()}
print(f'[{step}]', ' / '.join(f'{k} {v}' for k, v in formatted.items()))
def _format_value(self, value):
if value == 0:
return '0'
elif 0.01 < abs(value) < 10000:
value = f'{value:.2f}'
value = value.rstrip('0')
value = value.rstrip('0')
value = value.rstrip('.')
return value
else:
value = f'{value:.1e}'
value = value.replace('.0e', 'e')
value = value.replace('+0', '')
value = value.replace('+', '')
value = value.replace('-0', '-')
return value
class JSONLOutput:
def __init__(self, logdir):
self._logdir = pathlib.Path(logdir).expanduser()
def __call__(self, summaries):
scalars = {k: float(v) for _, k, v in summaries if len(v.shape) == 0}
step = max(s for s, _, _, in summaries)
with (self._logdir / 'metrics.jsonl').open('a') as f:
f.write(json.dumps({'step': step, **scalars}) + '\n')
class TensorBoardOutput:
def __init__(self, logdir, fps=20):
# The TensorFlow summary writer supports file protocols like gs://. We use
# os.path over pathlib here to preserve those prefixes.
self._logdir = os.path.expanduser(logdir)
self._writer = None
self._fps = fps
def __call__(self, summaries):
import tensorflow as tf
self._ensure_writer()
self._writer.set_as_default()
for step, name, value in summaries:
if len(value.shape) == 0:
tf.summary.scalar('scalars/' + name, value, step)
elif len(value.shape) == 2:
tf.summary.image(name, value, step)
elif len(value.shape) == 3:
tf.summary.image(name, value, step)
elif len(value.shape) == 4:
self._video_summary(name, value, step)
self._writer.flush()
def _ensure_writer(self):
if not self._writer:
import tensorflow as tf
self._writer = tf.summary.create_file_writer(
self._logdir, max_queue=1000)
def _video_summary(self, name, video, step):
import tensorflow as tf
import tensorflow.compat.v1 as tf1
name = name if isinstance(name, str) else name.decode('utf-8')
if np.issubdtype(video.dtype, np.floating):
video = np.clip(255 * video, 0, 255).astype(np.uint8)
try:
T, H, W, C = video.shape
summary = tf1.Summary()
image = tf1.Summary.Image(height=H, width=W, colorspace=C)
image.encoded_image_string = encode_gif(video, self._fps)
summary.value.add(tag=name, image=image)
tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step)
except (IOError, OSError) as e:
print('GIF summaries require ffmpeg in $PATH.', e)
tf.summary.image(name, video, step)
def encode_gif(frames, fps):
from subprocess import Popen, PIPE
h, w, c = frames[0].shape
pxfmt = {1: 'gray', 3: 'rgb24'}[c]
cmd = ' '.join([
'ffmpeg -y -f rawvideo -vcodec rawvideo',
f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex',
'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse',
f'-r {fps:.02f} -f gif -'])
proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in frames:
proc.stdin.write(image.tobytes())
out, err = proc.communicate()
if proc.returncode:
raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')]))
del proc
return out
|
cascade-main
|
dreamerv2/common/logger.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import datetime
import io
import pathlib
import uuid
import numpy as np
import tensorflow as tf
class Replay:
def __init__(
self, directory, capacity=0, offline_init=False, ongoing=False, minlen=1, maxlen=0,
prioritize_ends=False, multi_reward=False, offline_directory=None):
self._capacity = capacity
self._ongoing = ongoing
self._minlen = minlen
self._maxlen = maxlen
self._prioritize_ends = prioritize_ends
self._random = np.random.RandomState()
self._eval_score = 0
self.achievements = collections.defaultdict(list)
self._solved_levels = 0
self._multi_reward = multi_reward
self._max_scores = 0
self.rewards = []
self._mean_scores = 0
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(parents=True, exist_ok=True)
if offline_init:
self._total_episodes = 0
self._total_steps = 0
self._loaded_episodes = 0
self._loaded_steps = 0
self._complete_eps = {}
if type(offline_directory) is not list:
offline_directory = [offline_directory]
for d in offline_directory:
print(f"\nloading...{d}")
path = pathlib.Path(d).expanduser()
complete_eps, t_steps, t_eps = self.load_episodes(path, capacity, minlen)
saved_eps = save_episodes(self._directory, complete_eps)
self._complete_eps.update(saved_eps)
self._enforce_limit()
self._loaded_episodes += len(complete_eps)
self._loaded_steps += sum(eplen(x) for x in complete_eps.values())
# filename -> key -> value_sequence
self._complete_eps, _, _ = self.load_episodes(self._directory, capacity, minlen)
# worker -> key -> value_sequence
self._total_episodes, self._total_steps = count_episodes(directory)
self._loaded_episodes = len(self._complete_eps)
self._loaded_steps = sum(eplen(x) for x in self._complete_eps.values())
self._ongoing_eps = collections.defaultdict(lambda: collections.defaultdict(list))
@property
def stats(self):
return {
'total_steps': self._total_steps,
'total_episodes': self._total_episodes,
'loaded_steps': self._loaded_steps,
'loaded_episodes': self._loaded_episodes,
'running_score': self._eval_score,
'solved_levels': self._solved_levels,
'max_scores': self._max_scores,
'mean_scores': self._mean_scores
}
def add_step(self, transition, worker=0):
episode = self._ongoing_eps[worker]
for key, value in transition.items():
episode[key].append(value)
if transition['is_last']:
self.add_episode(episode)
episode.clear()
def add_episode(self, episode):
length = eplen(episode)
if 'log_achievement_collect_diamond' in episode.keys():
self.update_crafter_score(episode)
if self._multi_reward:
pass # in case we need to do something here
elif 'reward' in episode.keys() and sum(episode['reward']) > 0:
rew = sum(episode['reward'])
self._solved_levels += 1
self._max_scores = max(self._max_scores, rew)
self.rewards.append(rew)
self._mean_scores = np.mean(self.rewards)
if length < self._minlen:
print(f'Skipping short episode of length {length}.')
return
self._total_steps += length
self._loaded_steps += length
self._total_episodes += 1
self._loaded_episodes += 1
episode = {key: convert(value) for key, value in episode.items()}
if self._multi_reward:
episode['reward'] = reshape_rewards_dmc(episode)
filename = save_episode(self._directory, episode)
self._complete_eps[str(filename)] = episode
self._enforce_limit()
def dataset(self, batch, length):
example = next(iter(self._generate_chunks(length)))
dataset = tf.data.Dataset.from_generator(
lambda: self._generate_chunks(length),
{k: v.dtype for k, v in example.items()},
{k: v.shape for k, v in example.items()})
dataset = dataset.batch(batch, drop_remainder=True)
dataset = dataset.prefetch(5)
return dataset
def _generate_chunks(self, length):
sequence = self._sample_sequence()
while True:
chunk = collections.defaultdict(list)
added = 0
while added < length:
needed = length - added
adding = {k: v[:needed] for k, v in sequence.items()}
sequence = {k: v[needed:] for k, v in sequence.items()}
for key, value in adding.items():
chunk[key].append(value)
added += len(adding['action'])
if len(sequence['action']) < 1:
sequence = self._sample_sequence()
chunk = {k: np.concatenate(v) for k, v in chunk.items()}
yield chunk
def _sample_sequence(self):
episodes = list(self._complete_eps.values())
if self._ongoing:
episodes += [
x for x in self._ongoing_eps.values()
if eplen(x) >= self._minlen]
episode = self._random.choice(episodes)
total = len(episode['action'])
length = total
if self._maxlen:
length = min(length, self._maxlen)
# Randomize length to avoid all chunks ending at the same time in case the
# episodes are all of the same length.
length -= np.random.randint(self._minlen)
length = max(self._minlen, length)
upper = total - length + 1
if self._prioritize_ends:
upper += self._minlen
index = min(self._random.randint(upper), total - length)
sequence = {
k: convert(v[index: index + length])
for k, v in episode.items() if not k.startswith('log_')}
sequence['is_first'] = np.zeros(len(sequence['action']), np.bool)
sequence['is_first'][0] = True
if self._maxlen:
assert self._minlen <= len(sequence['action']) <= self._maxlen
return sequence
def _enforce_limit(self):
if not self._capacity:
return
while self._loaded_episodes > 1 and self._loaded_steps > self._capacity:
# Relying on Python preserving the insertion order of dicts.
oldest, episode = next(iter(self._complete_eps.items()))
self._loaded_steps -= eplen(episode)
self._loaded_episodes -= 1
del self._complete_eps[oldest]
def update_crafter_score(self, episode):
for key, val in episode.items():
if 'log_achievement' in key:
self.achievements[key] += [int(any([x.item() for x in episode[key]]))]
means = [np.mean(vals)*100 for vals in self.achievements.values()]
self._eval_score = (np.exp(np.nanmean(np.log(1 + np.array(means)), -1)) - 1)
def load_episodes(self, directory, capacity=None, minlen=1):
# The returned directory from filenames to episodes is guaranteed to be in
# temporally sorted order.
filenames = sorted(directory.glob('*.npz'))
if capacity:
num_steps = 0
num_episodes = 0
for filename in reversed(filenames):
length = int(str(filename).split('-')[-1][:-4])
num_steps += length
num_episodes += 1
if num_steps >= capacity:
break
filenames = filenames[-num_episodes:]
episodes = {}
num_steps = 0
num_episodes = 0
for filename in filenames:
try:
with filename.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
for key, val in episode.items():
if 'log_achievement' in key:
self.achievements[key] += [int(any([x.item() for x in episode[key]]))]
if not self._multi_reward:
if 'reward' in episode.keys() and sum(episode['reward']) > 0:
rew = sum(episode['reward'])
self._solved_levels += 1
self._max_scores = max(self._max_scores, rew)
self.rewards.append(rew)
self._mean_scores = np.mean(self.rewards)
num_steps += 1
num_episodes += 1
except Exception as e:
print(f'Could not load episode {str(filename)}: {e}')
continue
if 'is_terminal' not in episode:
episode['is_terminal'] = episode['discount'] == 0
episodes[str(filename)] = episode
return episodes, num_steps, num_episodes
def count_episodes(directory):
filenames = list(directory.glob('*.npz'))
num_episodes = len(filenames)
num_steps = sum(int(str(n).split('-')[-1][:-4]) - 1 for n in filenames)
return num_episodes, num_steps
def save_episode(directory, episode):
timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
identifier = str(uuid.uuid4().hex)
length = eplen(episode)
filename = directory / f'{timestamp}-{identifier}-{length}.npz'
with io.BytesIO() as f1:
np.savez_compressed(f1, **episode)
f1.seek(0)
with filename.open('wb') as f2:
f2.write(f1.read())
return filename
def save_episodes(directory, episodes):
saved_eps = {}
for _, ep in episodes.items():
filename = save_episode(directory, ep)
saved_eps[str(filename)] = ep
return saved_eps
def convert(value):
value = np.array(value)
if np.issubdtype(value.dtype, np.floating):
return value.astype(np.float32)
elif np.issubdtype(value.dtype, np.signedinteger):
return value.astype(np.int32)
elif np.issubdtype(value.dtype, np.uint8):
return value.astype(np.uint8)
return value
def reshape_rewards_dmc(episode):
rew = np.concatenate([r.reshape(1, -1) for r in episode['reward'][1:]], 0)
rew = np.concatenate((np.zeros(rew.shape[1]).reshape(1, rew.shape[1]), rew))
return rew
def eplen(episode):
return len(episode['action']) - 1
|
cascade-main
|
dreamerv2/common/replay.py
|
"""In gym, the RAM is represented as an 128-element array, where each element in the array can range from 0 to 255
The atari_dict below is organized as so:
key: the name of the game
value: the game dictionary
Game dictionary is organized as:
key: state variable name
value: the element in the RAM array where the value of that state variable is stored
e.g. the value of the x coordinate of the player in asteroids is stored in the 73rd (counting up from 0)
element of the RAM array (when the player in asteroids moves horizontally, ram_array[73] should change
in value correspondingly)
"""
""" MZR player_direction values:
72: facing left,
40: facing left, climbing down ladder/rope
24: facing left, climbing up ladder/rope
128: facing right
32: facing right, climbing down ladder/rope
16: facing right climbing up ladder/rope """
atari_dict = {
"asteroids": dict(enemy_asteroids_y=[3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19],
enemy_asteroids_x=[21, 22, 23, 24, 25, 26, 27, 30, 31, 32, 33, 34, 35, 36, 37],
player_x=73,
player_y=74,
num_lives_direction=60,
player_score_high=61,
player_score_low=62,
player_missile_x1=83,
player_missile_x2=84,
player_missile_y1=86,
player_missile_y2=87,
player_missile1_direction=89,
player_missile2_direction=90),
"battlezone": dict( # red_enemy_x=75,
blue_tank_facing_direction=46, # 17 left 21 forward 29 right
blue_tank_size_y=47, # tank gets larger as it gets closer
blue_tank_x=48,
blue_tank2_facing_direction=52,
blue_tank2_size_y=53,
blue_tank2_x=54,
num_lives=58,
missile_y=105,
compass_needles_angle=84,
angle_of_tank=4, # as shown by what the mountains look like
left_tread_position=59, # got to mod this number by 8 to get unique values
right_tread_position=60, # got to mod this number by 8 to get unique values
crosshairs_color=108, # 0 if black 46 if yellow
score=29),
"berzerk": dict(player_x=19,
player_y=11,
player_direction=14,
player_missile_x=22,
player_missile_y=23,
player_missile_direction=21,
robot_missile_direction=26,
robot_missile_x=29,
robot_missile_y=30,
num_lives=90,
robots_killed_count=91,
game_level=92,
enemy_evilOtto_x=46,
enemy_evilOtto_y=89,
enemy_robots_x=range(65, 73),
enemy_robots_y=range(56, 65),
player_score=range(93, 96)),
"bowling": dict(ball_x=30,
ball_y=41,
player_x=29,
player_y=40,
frame_number_display=36,
pin_existence=range(57, 67),
score=33),
"boxing": dict(player_x=32,
player_y=34,
enemy_x=33,
enemy_y=35,
enemy_score=19,
clock=17,
player_score=18),
"breakout": dict(ball_x=99,
ball_y=101,
player_x=72,
blocks_hit_count=77,
block_bit_map=range(30), # see breakout bitmaps tab
score=84), # 5 for each hit
"demonattack": dict(level=62,
player_x=22,
enemy_x1=17,
enemy_x2=18,
enemy_x3=19,
missile_y=21,
enemy_y1=69,
enemy_y2=70,
enemy_y3=71,
num_lives=114),
"freeway": dict(player_y=14,
score=103,
enemy_car_x=range(108, 118)), # which lane the car collided with player
"frostbite": dict(
top_row_iceflow_x=34,
second_row_iceflow_x=33,
third_row_iceflow_x=32,
fourth_row_iceflow_x=31,
enemy_bear_x=104,
num_lives=76,
igloo_blocks_count=77, # 255 is none and 15 is all "
enemy_x=range(84, 88), # 84 bottom row - 87 top row
player_x=102,
player_y=100,
player_direction=4,
score=[72, 73, 74]),
"hero": dict(player_x=27,
player_y=31,
power_meter=43,
room_number=28,
level_number=117,
dynamite_count=50,
score=[56, 57]),
"montezumarevenge": dict(room_number=3,
player_x=42,
player_y=43,
player_direction=52, # 72: facing left, 40: facing left, climbing down ladder/rope 24: facing left, climbing up ladder/rope 128: facing right 32: facing right, climbing down ladder/rope, 16: facing right climbing up ladder/rope
enemy_skull_x=47,
enemy_skull_y=46,
key_monster_x=44,
key_monster_y=45,
level=57,
num_lives=58,
items_in_inventory_count=61,
room_state=62,
score_0=19,
score_1=20,
score_2=21),
"mspacman": dict(enemy_sue_x=6,
enemy_inky_x=7,
enemy_pinky_x=8,
enemy_blinky_x=9,
enemy_sue_y=12,
enemy_inky_y=13,
enemy_pinky_y=14,
enemy_blinky_y=15,
player_x=10,
player_y=16,
fruit_x=11,
fruit_y=17,
ghosts_count=19,
player_direction=56,
dots_eaten_count=119,
player_score=120,
num_lives=123),
"pitfall": dict(player_x=97, # 8-148
player_y=105, # 21-86 except for when respawning then 0-255 with confusing wraparound
enemy_logs_x=98, # 0-160
enemy_scorpion_x=99,
# player_y_on_ladder= 108, # 0-20
# player_collided_with_rope= 5, #yes if bit 6 is 1
bottom_of_rope_y=18, # 0-20 varies even when you can't see rope
clock_sec=89,
clock_min=88
),
"pong": dict(player_y=51,
player_x=46,
enemy_y=50,
enemy_x=45,
ball_x=49,
ball_y=54,
enemy_score=13,
player_score=14),
"privateeye": dict(player_x=63,
player_y=86,
room_number=92,
clock=[67, 69],
player_direction=58,
score=[73, 74],
dove_x=48,
dove_y=39),
"qbert": dict(player_x=43,
player_y=67,
player_column=35,
red_enemy_column=69,
green_enemy_column=105,
score=[89, 90, 91], # binary coded decimal score
tile_color=[ 21, # row of 1
52, 54, # row of 2
83, 85, 87, # row of 3
98, 100, 102, 104, # row of 4
1, 3, 5, 7, 9, # row of 5
32, 34, 36, 38, 40, 42]), # row of 6
"riverraid": dict(player_x=51,
missile_x=117,
missile_y=50,
fuel_meter_high=55, # high value displayed
fuel_meter_low=56 # low value
),
"seaquest": dict(enemy_obstacle_x=range(30, 34),
player_x=70,
player_y=97,
diver_or_enemy_missile_x=range(71, 75),
player_direction=86,
player_missile_direction=87,
oxygen_meter_value=102,
player_missile_x=103,
score=[57, 58],
num_lives=59,
divers_collected_count=62),
"skiing": dict(player_x=25,
clock_m=104,
clock_s=105,
clock_ms=106,
score=107,
object_y=range(87, 94)), # object_y_1 is y position of whatever topmost object on the screen is
"spaceinvaders": dict(invaders_left_count=17,
player_score=104,
num_lives=73,
player_x=28,
enemies_x=26,
missiles_y=9,
enemies_y=24),
"tennis": dict(enemy_x=27,
enemy_y=25,
enemy_score=70,
ball_x=16,
ball_y=17,
player_x=26,
player_y=24,
player_score=69),
"venture": dict(sprite0_y=20,
sprite1_y=21,
sprite2_y=22,
sprite3_y=23,
sprite4_y=24,
sprite5_y=25,
sprite0_x=79,
sprite1_x=80,
sprite2_x=81,
sprite3_x=82,
sprite4_x=83,
sprite5_x=84,
player_x=85,
player_y=26,
current_room=90, # The number of the room the player is currently in 0 to 9_
num_lives=70,
score_1_2=71,
score_3_4=72),
"videopinball": dict(ball_x=67,
ball_y=68,
player_left_paddle_y=98,
player_right_paddle_y=102,
score_1=48,
score_2=50),
"yarsrevenge": dict(player_x=32,
player_y=31,
player_missile_x=38,
player_missile_y=37,
enemy_x=43,
enemy_y=42,
enemy_missile_x=47,
enemy_missile_y=46)
}
# break up any lists (e.g. dict(clock=[67, 69]) -> dict(clock_0=67, clock_1=69) )
update_dict = {k: {} for k in atari_dict.keys()}
remove_dict = {k: [] for k in atari_dict.keys()}
for game, d in atari_dict.items():
for k, v in d.items():
if isinstance(v, range) or isinstance(v, list):
for i, vi in enumerate(v):
update_dict[game]["%s_%i" % (k, i)] = vi
remove_dict[game].append(k)
for k in atari_dict.keys():
atari_dict[k].update(update_dict[k])
for rk in remove_dict[k]:
atari_dict[k].pop(rk)
|
cascade-main
|
dreamerv2/common/ram_annotations.py
|
class Every:
def __init__(self, every):
self._every = every
self._last = None
def __call__(self, step):
step = int(step)
if not self._every:
return False
if self._last is None:
self._last = step
return True
if step >= self._last + self._every:
self._last += self._every
return True
return False
class Once:
def __init__(self):
self._once = True
def __call__(self):
if self._once:
self._once = False
return True
return False
class Until:
def __init__(self, until):
self._until = until
def __call__(self, step):
step = int(step)
if not self._until:
return True
return step < self._until
|
cascade-main
|
dreamerv2/common/when.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from .cdmc import DMC_TASK_IDS
import numpy as np
from scipy.stats import gmean
def get_stats_at_idx(driver, task, idx):
"""
Get the train / eval stats from driver from the idx env.
"""
prefix = "eval_"
eps = driver._eps[idx]
eval_data = defaultdict(list)
if task == 'crafter_noreward':
for ep in eps:
for key, val in ep.items():
if 'log_achievement_' in key:
eval_data[prefix + 'rew_'+key.split('log_achievement_')[1]].append(val.item())
eval_data[prefix + 'sr_'+key.split('log_achievement_')[1]].append(1 if val.item() > 0 else 0)
eval_data['reward'].append(ep['log_reward'].item())
eval_data = {key: np.mean(val) for key, val in eval_data.items()}
eval_data[prefix + 'crafter_score'] = gmean([val for key, val in eval_data.items() if 'eval_sr' in key])
elif task in DMC_TASK_IDS:
rewards = [ep['reward'] for ep in eps[1:]]
for idx, goal in enumerate(DMC_TASK_IDS[task]):
eval_data[prefix + 'reward_' + goal] = np.sum([r[idx] for r in rewards])
else:
eval_data[prefix + 'reward'] = np.sum([ep['reward'] for ep in eps])
return eval_data
def get_stats(driver, task):
per_env_data = defaultdict(list)
num_envs = len(driver._envs)
for i in range(num_envs):
stat = get_stats_at_idx(driver, task, i)
for k, v in stat.items():
per_env_data[k].append(v)
data = {}
for k, v in per_env_data.items():
data[k] = np.mean(v)
return data
def eval(driver, config, expl_policies, logdir):
## reward for the exploration agents
mets = {}
mean_pop = {}
for idx in range(config.num_agents):
policy = expl_policies[idx]
driver(policy, episodes=config.eval_eps, policy_idx=idx)
data = get_stats(driver, task=config.task)
if idx == 0:
for key, val in data.items():
mean_pop[key] = np.mean(val)
else:
for key,val in data.items():
mean_pop[key] += np.mean(val)
mets.update({key: np.mean(val) for key, val in mean_pop.items()})
return mets
|
cascade-main
|
dreamerv2/common/eval.py
|
import numpy as np
class Driver:
def __init__(self, envs, **kwargs):
self._envs = envs
self._kwargs = kwargs
self._on_steps = []
self._on_resets = []
self._on_episodes = []
self._act_spaces = [env.act_space for env in envs]
self.reset()
def on_step(self, callback):
self._on_steps.append(callback)
def on_reset(self, callback):
self._on_resets.append(callback)
def on_episode(self, callback):
self._on_episodes.append(callback)
def reset(self):
self._obs = [None] * len(self._envs)
self._eps = [None] * len(self._envs)
self._state = None
def __call__(self, policy, steps=0, episodes=0, policy_idx=0, save_img=False):
step, episode = 0, 0
while step < steps or episode < episodes:
obs = {
i: self._envs[i].reset()
for i, ob in enumerate(self._obs) if ob is None or ob['is_last']}
for i, ob in obs.items():
self._obs[i] = ob() if callable(ob) else ob
act = {k: np.zeros(v.shape) for k, v in self._act_spaces[i].items()}
tran = {k: self._convert(v) for k, v in {**ob, **act}.items()}
[fn(tran, worker=i, **self._kwargs) for fn in self._on_resets]
self._eps[i] = [tran]
obs = {k: np.stack([o[k] for o in self._obs]) for k in self._obs[0]}
actions, self._state = policy(obs, self._state, **self._kwargs)
actions = [
{k: np.array(actions[k][i]) for k in actions}
for i in range(len(self._envs))]
assert len(actions) == len(self._envs)
# if episode == 0:
should_save_img = save_img
# else:
# should_save_img = False
obs = [e.step(a) for e, a in zip(self._envs, actions)]
obs = [ob() if callable(ob) else ob for ob in obs]
for i, (act, ob) in enumerate(zip(actions, obs)):
tran = {k: self._convert(v) for k, v in {**ob, **act}.items()}
[fn(tran, worker=i, **self._kwargs) for fn in self._on_steps]
self._eps[i].append(tran)
step += 1
if ob['is_last']:
ep = self._eps[i]
ep = {k: self._convert([t[k] for t in ep]) for k in ep[0]}
[fn(ep, **self._kwargs) for fn in self._on_episodes]
episode += 1
self._obs = obs
def _convert(self, value):
value = np.array(value)
if np.issubdtype(value.dtype, np.floating):
return value.astype(np.float32)
elif np.issubdtype(value.dtype, np.signedinteger):
return value.astype(np.int32)
elif np.issubdtype(value.dtype, np.uint8):
return value.astype(np.uint8)
return value
|
cascade-main
|
dreamerv2/common/driver.py
|
import functools
@functools.total_ordering
class Counter:
def __init__(self, initial=0):
self.value = initial
def __int__(self):
return int(self.value)
def __eq__(self, other):
return int(self) == other
def __ne__(self, other):
return int(self) != other
def __lt__(self, other):
return int(self) < other
def __add__(self, other):
return int(self) + other
def increment(self, amount=1):
self.value += amount
|
cascade-main
|
dreamerv2/common/counter.py
|
import re
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers as tfkl
from tensorflow_probability import distributions as tfd
from tensorflow.keras.mixed_precision import experimental as prec
import common
class EnsembleRSSM(common.Module):
def __init__(
self, ensemble=5, stoch=30, deter=200, hidden=200, discrete=False,
act='elu', norm='none', std_act='softplus', min_std=0.1):
super().__init__()
self._ensemble = ensemble
self._stoch = stoch
self._deter = deter
self._hidden = hidden
self._discrete = discrete
self._act = get_act(act)
self._norm = norm
self._std_act = std_act
self._min_std = min_std
self._cell = GRUCell(self._deter, norm=True)
self._cast = lambda x: tf.cast(x, prec.global_policy().compute_dtype)
def initial(self, batch_size):
dtype = prec.global_policy().compute_dtype
if self._discrete:
state = dict(
logit=tf.zeros([batch_size, self._stoch, self._discrete], dtype),
stoch=tf.zeros([batch_size, self._stoch, self._discrete], dtype),
deter=self._cell.get_initial_state(None, batch_size, dtype))
else:
state = dict(
mean=tf.zeros([batch_size, self._stoch], dtype),
std=tf.zeros([batch_size, self._stoch], dtype),
stoch=tf.zeros([batch_size, self._stoch], dtype),
deter=self._cell.get_initial_state(None, batch_size, dtype))
return state
@tf.function
def observe(self, embed, action, is_first, state=None):
swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape))))
if state is None:
state = self.initial(tf.shape(action)[0])
post, prior = common.static_scan(
lambda prev, inputs: self.obs_step(prev[0], *inputs),
(swap(action), swap(embed), swap(is_first)), (state, state))
post = {k: swap(v) for k, v in post.items()}
prior = {k: swap(v) for k, v in prior.items()}
return post, prior
@tf.function
def imagine(self, action, state=None):
swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape))))
if state is None:
state = self.initial(tf.shape(action)[0])
assert isinstance(state, dict), state
action = swap(action)
prior = common.static_scan(self.img_step, action, state)
prior = {k: swap(v) for k, v in prior.items()}
return prior
def get_feat(self, state):
stoch = self._cast(state['stoch'])
if self._discrete:
shape = stoch.shape[:-2] + [self._stoch * self._discrete]
stoch = tf.reshape(stoch, shape)
return tf.concat([stoch, state['deter']], -1)
def get_dist(self, state, ensemble=False):
if ensemble:
state = self._suff_stats_ensemble(state['deter'])
if self._discrete:
logit = state['logit']
logit = tf.cast(logit, tf.float32)
dist = tfd.Independent(common.OneHotDist(logit), 1)
else:
mean, std = state['mean'], state['std']
mean = tf.cast(mean, tf.float32)
std = tf.cast(std, tf.float32)
dist = tfd.MultivariateNormalDiag(mean, std)
return dist
@tf.function
def obs_step(self, prev_state, prev_action, embed, is_first, sample=True):
# if is_first.any():
prev_state, prev_action = tf.nest.map_structure(
lambda x: tf.einsum(
'b,b...->b...', 1.0 - is_first.astype(x.dtype), x),
(prev_state, prev_action))
prior = self.img_step(prev_state, prev_action, sample)
x = tf.concat([prior['deter'], embed], -1)
x = self.get('obs_out', tfkl.Dense, self._hidden)(x)
x = self.get('obs_out_norm', NormLayer, self._norm)(x)
x = self._act(x)
stats = self._suff_stats_layer('obs_dist', x)
dist = self.get_dist(stats)
stoch = dist.sample() if sample else dist.mode()
post = {'stoch': stoch, 'deter': prior['deter'], **stats}
return post, prior
@tf.function
def img_step(self, prev_state, prev_action, sample=True):
prev_stoch = self._cast(prev_state['stoch'])
prev_action = self._cast(prev_action)
if self._discrete:
shape = prev_stoch.shape[:-2] + [self._stoch * self._discrete]
prev_stoch = tf.reshape(prev_stoch, shape)
x = tf.concat([prev_stoch, prev_action], -1)
x = self.get('img_in', tfkl.Dense, self._hidden)(x)
x = self.get('img_in_norm', NormLayer, self._norm)(x)
x = self._act(x)
deter = prev_state['deter']
x, deter = self._cell(x, [deter])
deter = deter[0] # Keras wraps the state in a list.
stats = self._suff_stats_ensemble(x)
index = tf.random.uniform((), 0, self._ensemble, tf.int32)
stats = {k: v[index] for k, v in stats.items()}
dist = self.get_dist(stats)
stoch = dist.sample() if sample else dist.mode()
prior = {'stoch': stoch, 'deter': deter, **stats}
return prior
def _suff_stats_ensemble(self, inp):
bs = list(inp.shape[:-1])
inp = inp.reshape([-1, inp.shape[-1]])
stats = []
for k in range(self._ensemble):
x = self.get(f'img_out_{k}', tfkl.Dense, self._hidden)(inp)
x = self.get(f'img_out_norm_{k}', NormLayer, self._norm)(x)
x = self._act(x)
stats.append(self._suff_stats_layer(f'img_dist_{k}', x))
stats = {
k: tf.stack([x[k] for x in stats], 0)
for k, v in stats[0].items()}
stats = {
k: v.reshape([v.shape[0]] + bs + list(v.shape[2:]))
for k, v in stats.items()}
return stats
def _suff_stats_layer(self, name, x):
if self._discrete:
x = self.get(name, tfkl.Dense, self._stoch * self._discrete, None)(x)
logit = tf.reshape(x, x.shape[:-1] + [self._stoch, self._discrete])
return {'logit': logit}
else:
x = self.get(name, tfkl.Dense, 2 * self._stoch, None)(x)
mean, std = tf.split(x, 2, -1)
std = {
'softplus': lambda: tf.nn.softplus(std),
'sigmoid': lambda: tf.nn.sigmoid(std),
'sigmoid2': lambda: 2 * tf.nn.sigmoid(std / 2),
}[self._std_act]()
std = std + self._min_std
return {'mean': mean, 'std': std}
def kl_loss(self, post, prior, forward, balance, free, free_avg):
kld = tfd.kl_divergence
sg = lambda x: tf.nest.map_structure(tf.stop_gradient, x)
lhs, rhs = (prior, post) if forward else (post, prior)
mix = balance if forward else (1 - balance)
if balance == 0.5:
value = kld(self.get_dist(lhs), self.get_dist(rhs))
loss = tf.maximum(value, free).mean()
else:
value_lhs = value = kld(self.get_dist(lhs), self.get_dist(sg(rhs)))
value_rhs = kld(self.get_dist(sg(lhs)), self.get_dist(rhs))
if free_avg:
loss_lhs = tf.maximum(value_lhs.mean(), free)
loss_rhs = tf.maximum(value_rhs.mean(), free)
else:
loss_lhs = tf.maximum(value_lhs, free).mean()
loss_rhs = tf.maximum(value_rhs, free).mean()
loss = mix * loss_lhs + (1 - mix) * loss_rhs
return loss, value
class Encoder(common.Module):
def __init__(
self, shapes, cnn_keys=r'.*', mlp_keys=r'.*', act='elu', norm='none',
cnn_depth=48, cnn_kernels=(4, 4, 4, 4), mlp_layers=[400, 400, 400, 400]):
self.shapes = shapes
self.cnn_keys = [
k for k, v in shapes.items() if re.match(cnn_keys, k) and len(v) == 3]
self.mlp_keys = [
k for k, v in shapes.items() if re.match(mlp_keys, k) and len(v) == 1]
print('Encoder CNN inputs:', list(self.cnn_keys))
print('Encoder MLP inputs:', list(self.mlp_keys))
self._act = get_act(act)
self._norm = norm
self._cnn_depth = cnn_depth
self._cnn_kernels = cnn_kernels
self._mlp_layers = mlp_layers
@tf.function
def __call__(self, data):
key, shape = list(self.shapes.items())[0]
batch_dims = data[key].shape[:-len(shape)]
data = {
k: tf.reshape(v, (-1,) + tuple(v.shape)[len(batch_dims):])
for k, v in data.items()}
outputs = []
if self.cnn_keys:
outputs.append(self._cnn({k: data[k] for k in self.cnn_keys}))
if self.mlp_keys:
outputs.append(self._mlp({k: data[k] for k in self.mlp_keys}))
output = tf.concat(outputs, -1)
return output.reshape(batch_dims + output.shape[1:])
def _cnn(self, data):
x = tf.concat(list(data.values()), -1)
x = x.astype(prec.global_policy().compute_dtype)
for i, kernel in enumerate(self._cnn_kernels):
depth = 2 ** i * self._cnn_depth
x = self.get(f'conv{i}', tfkl.Conv2D, depth, kernel, 2)(x)
x = self.get(f'convnorm{i}', NormLayer, self._norm)(x)
x = self._act(x)
return x.reshape(tuple(x.shape[:-3]) + (-1,))
def _mlp(self, data):
x = tf.concat(list(data.values()), -1)
x = x.astype(prec.global_policy().compute_dtype)
for i, width in enumerate(self._mlp_layers):
x = self.get(f'dense{i}', tfkl.Dense, width)(x)
x = self.get(f'densenorm{i}', NormLayer, self._norm)(x)
x = self._act(x)
return x
class Decoder(common.Module):
def __init__(
self, shapes, cnn_keys=r'.*', mlp_keys=r'.*', act='elu', norm='none',
cnn_depth=48, cnn_kernels=(4, 4, 4, 4), mlp_layers=[400, 400, 400, 400]):
self._shapes = shapes
self.cnn_keys = [
k for k, v in shapes.items() if re.match(cnn_keys, k) and len(v) == 3]
self.mlp_keys = [
k for k, v in shapes.items() if re.match(mlp_keys, k) and len(v) == 1]
print('Decoder CNN outputs:', list(self.cnn_keys))
print('Decoder MLP outputs:', list(self.mlp_keys))
self._act = get_act(act)
self._norm = norm
self._cnn_depth = cnn_depth
self._cnn_kernels = cnn_kernels
self._mlp_layers = mlp_layers
def __call__(self, features):
features = tf.cast(features, prec.global_policy().compute_dtype)
outputs = {}
if self.cnn_keys:
outputs.update(self._cnn(features))
if self.mlp_keys:
outputs.update(self._mlp(features))
return outputs
def _cnn(self, features):
channels = {k: self._shapes[k][-1] for k in self.cnn_keys}
ConvT = tfkl.Conv2DTranspose
x = self.get('convin', tfkl.Dense, 32 * self._cnn_depth)(features)
x = tf.reshape(x, [-1, 1, 1, 32 * self._cnn_depth])
for i, kernel in enumerate(self._cnn_kernels):
depth = 2 ** (len(self._cnn_kernels) - i - 2) * self._cnn_depth
act, norm = self._act, self._norm
if i == len(self._cnn_kernels) - 1:
depth, act, norm = sum(channels.values()), tf.identity, 'none'
x = self.get(f'conv{i}', ConvT, depth, kernel, 2)(x)
x = self.get(f'convnorm{i}', NormLayer, norm)(x)
x = act(x)
x = x.reshape(features.shape[:-1] + x.shape[1:])
means = tf.split(x, list(channels.values()), -1)
dists = {
key: tfd.Independent(tfd.Normal(mean, 1), 3)
for (key, shape), mean in zip(channels.items(), means)}
return dists
def _mlp(self, features):
shapes = {k: self._shapes[k] for k in self.mlp_keys}
x = features
for i, width in enumerate(self._mlp_layers):
x = self.get(f'dense{i}', tfkl.Dense, width)(x)
x = self.get(f'densenorm{i}', NormLayer, self._norm)(x)
x = self._act(x)
dists = {}
for key, shape in shapes.items():
dists[key] = self.get(f'dense_{key}', DistLayer, shape)(x)
return dists
class MLP(common.Module):
def __init__(self, shape, layers, units, act='elu', norm='none', **out):
self._shape = (shape,) if isinstance(shape, int) else shape
self._layers = layers
self._units = units
self._norm = norm
self._act = get_act(act)
self._out = out
def __call__(self, features):
x = tf.cast(features, prec.global_policy().compute_dtype)
x = x.reshape([-1, x.shape[-1]])
for index in range(self._layers):
x = self.get(f'dense{index}', tfkl.Dense, self._units)(x)
x = self.get(f'norm{index}', NormLayer, self._norm)(x)
x = self._act(x)
x = x.reshape(features.shape[:-1] + [x.shape[-1]])
return self.get('out', DistLayer, self._shape, **self._out)(x)
class MultiMLP(common.Module):
# initial feature extraction layers
def __init__(self, shape, layers, units, act='elu', norm='none', **out):
self._shape = (shape,) if isinstance(shape, int) else shape
self._layers = layers
self._units = units
self._norm = norm
self._act = get_act(act)
self._out = out
def __call__(self, features, idx=0):
x = tf.cast(features, prec.global_policy().compute_dtype)
x = x.reshape([-1, x.shape[-1]])
for index in range(self._layers):
x = self.get(f'dense{index}', tfkl.Dense, self._units)(x)
x = self.get(f'norm{index}', NormLayer, self._norm)(x)
x = self._act(x)
x = x.reshape(features.shape[:-1] + [x.shape[-1]])
## pass in idx for the MultiDistLayer!
return self.get('out', MultiDistLayer, self._shape, **self._out)(x, idx)
class GRUCell(tf.keras.layers.AbstractRNNCell):
def __init__(self, size, norm=False, act='tanh', update_bias=-1, **kwargs):
super().__init__()
self._size = size
self._act = get_act(act)
self._norm = norm
self._update_bias = update_bias
self._layer = tfkl.Dense(3 * size, use_bias=norm is not None, **kwargs)
if norm:
self._norm = tfkl.LayerNormalization(dtype=tf.float32)
@property
def state_size(self):
return self._size
@tf.function
def call(self, inputs, state):
state = state[0] # Keras wraps the state in a list.
parts = self._layer(tf.concat([inputs, state], -1))
if self._norm:
dtype = parts.dtype
parts = tf.cast(parts, tf.float32)
parts = self._norm(parts)
parts = tf.cast(parts, dtype)
reset, cand, update = tf.split(parts, 3, -1)
reset = tf.nn.sigmoid(reset)
cand = self._act(reset * cand)
update = tf.nn.sigmoid(update + self._update_bias)
output = update * cand + (1 - update) * state
return output, [output]
class DistLayer(common.Module):
def __init__(
self, shape, dist='mse', min_std=0.1, init_std=0.0):
self._shape = shape
self._dist = dist
self._min_std = min_std
self._init_std = init_std
def __call__(self, inputs):
out = self.get('out', tfkl.Dense, np.prod(self._shape))(inputs)
out = tf.reshape(out, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
out = tf.cast(out, tf.float32)
if self._dist in ('normal', 'tanh_normal', 'trunc_normal'):
std = self.get('std', tfkl.Dense, np.prod(self._shape))(inputs)
std = tf.reshape(std, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
std = tf.cast(std, tf.float32)
if self._dist == 'mse':
dist = tfd.Normal(out, 1.0)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'normal':
dist = tfd.Normal(out, std)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'binary':
dist = tfd.Bernoulli(out)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'tanh_normal':
mean = 5 * tf.tanh(out / 5)
std = tf.nn.softplus(std + self._init_std) + self._min_std
dist = tfd.Normal(mean, std)
dist = tfd.TransformedDistribution(dist, common.TanhBijector())
dist = tfd.Independent(dist, len(self._shape))
return common.SampleDist(dist)
if self._dist == 'trunc_normal':
std = 2 * tf.nn.sigmoid((std + self._init_std) / 2) + self._min_std
dist = common.TruncNormalDist(tf.tanh(out), std, -1, 1)
return tfd.Independent(dist, 1)
if self._dist == 'onehot':
return common.OneHotDist(out)
raise NotImplementedError(self._dist)
class MultiDistLayer(common.Module):
def __init__(
self, shape, dist='mse', min_std=0.1, init_std=0.0):
self._shape = shape
self._dist = dist
self._min_std = min_std
self._init_std = init_std
def __call__(self, inputs, idx=0):
out = self.get(f'out{idx}', tfkl.Dense, np.prod(self._shape))(inputs)
out = tf.reshape(out, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
out = tf.cast(out, tf.float32)
if self._dist in ('normal', 'tanh_normal', 'trunc_normal'):
std = self.get(f'std{idx}', tfkl.Dense, np.prod(self._shape))(inputs)
std = tf.reshape(std, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
std = tf.cast(std, tf.float32)
if self._dist == 'mse':
dist = tfd.Normal(out, 1.0)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'normal':
dist = tfd.Normal(out, std)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'binary':
dist = tfd.Bernoulli(out)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'tanh_normal':
mean = 5 * tf.tanh(out / 5)
std = tf.nn.softplus(std + self._init_std) + self._min_std
dist = tfd.Normal(mean, std)
dist = tfd.TransformedDistribution(dist, common.TanhBijector())
dist = tfd.Independent(dist, len(self._shape))
return common.SampleDist(dist)
if self._dist == 'trunc_normal':
std = 2 * tf.nn.sigmoid((std + self._init_std) / 2) + self._min_std
dist = common.TruncNormalDist(tf.tanh(out), std, -1, 1)
return tfd.Independent(dist, 1)
if self._dist == 'onehot':
return common.OneHotDist(out)
raise NotImplementedError(self._dist)
class NormLayer(common.Module):
def __init__(self, name):
if name == 'none':
self._layer = None
elif name == 'layer':
self._layer = tfkl.LayerNormalization()
else:
raise NotImplementedError(name)
def __call__(self, features):
if not self._layer:
return features
return self._layer(features)
def get_act(name):
if name == 'none':
return tf.identity
if name == 'mish':
return lambda x: x * tf.math.tanh(tf.nn.softplus(x))
elif hasattr(tf.nn, name):
return getattr(tf.nn, name)
elif hasattr(tf, name):
return getattr(tf, name)
else:
raise NotImplementedError(name)
|
cascade-main
|
dreamerv2/common/nets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import os
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
from dm_control import suite
_DEFAULT_TIME_LIMIT = 25
_CONTROL_TIMESTEP = .025
# Minimal height of torso over foot above which stand reward is 1.
_STAND_HEIGHT = 1.2
# Horizontal speeds (meters/second) above which move reward is 1.
_WALK_SPEED = 1
_RUN_SPEED = 8
_SPIN_SPEED = 5
SUITE = containers.TaggedTasks()
def make_walker(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward=False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(os.path.join(root_dir, 'cdmc',
'walker.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def flip(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = PlanarWalker(move_speed=_RUN_SPEED,
forward=True,
flip=True,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add('benchmarking')
def all(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = PlanarWalker(move_speed=_RUN_SPEED,
forward=True,
flip=True,
all=True,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Walker domain."""
def torso_upright(self):
"""Returns projection from z-axes of torso to the z-axes of world."""
return self.named.data.xmat['torso', 'zz']
def torso_height(self):
"""Returns the height of the torso."""
return self.named.data.xpos['torso', 'z']
def horizontal_velocity(self):
"""Returns the horizontal velocity of the center-of-mass."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def orientations(self):
"""Returns planar orientations of all bodies."""
return self.named.data.xmat[1:, ['xx', 'xz']].ravel()
def angmomentum(self):
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class PlanarWalker(base.Task):
"""A planar walker task."""
def __init__(self, move_speed, forward=True, flip=False, random=None, all=False):
"""Initializes an instance of `PlanarWalker`.
Args:
move_speed: A float. If this value is zero, reward is given simply for
standing up. Otherwise this specifies a target horizontal velocity for
the walking task.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._move_speed = move_speed
self._forward = 1 if forward else -1
self._flip = flip
self._all = all
super(PlanarWalker, self).__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode.
In 'standing' mode, use initial orientation and small velocities.
In 'random' mode, randomize joint angles and let fall to the floor.
Args:
physics: An instance of `Physics`.
"""
randomizers.randomize_limited_and_rotational_joints(
physics, self.random)
super(PlanarWalker, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of body orientations, height and velocites."""
obs = collections.OrderedDict()
obs['orientations'] = physics.orientations()
obs['height'] = physics.torso_height()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics):
"""Returns a reward to the agent."""
standing = rewards.tolerance(physics.torso_height(),
bounds=(_STAND_HEIGHT, float('inf')),
margin=_STAND_HEIGHT / 2)
upright = (1 + physics.torso_upright()) / 2
stand_reward = (3 * standing + upright) / 4
if self._flip:
move_reward = rewards.tolerance(self._forward *
physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
move_reward = rewards.tolerance(
self._forward * physics.horizontal_velocity(),
bounds=(self._move_speed, float('inf')),
margin=self._move_speed / 2,
value_at_margin=0.5,
sigmoid='linear')
if self._all:
walk_reward = rewards.tolerance(
self._forward * physics.horizontal_velocity(),
bounds=(_WALK_SPEED, float('inf')),
margin=_WALK_SPEED / 2,
value_at_margin=0.5,
sigmoid='linear')
run_reward = rewards.tolerance(
self._forward * physics.horizontal_velocity(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED / 2,
value_at_margin=0.5,
sigmoid='linear')
flip_reward = rewards.tolerance(self._forward *
physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
reward_dict = {
'stand': stand_reward,
'walk': stand_reward * (5*walk_reward + 1) / 6,
'run': stand_reward * (5*run_reward + 1) / 6,
'flip': flip_reward
}
return reward_dict
else:
return stand_reward * (5 * move_reward + 1) / 6
|
cascade-main
|
dreamerv2/common/cdmc/walker.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .walker import make_walker
from .cheetah import make_cheetah
def make_dmc_all(domain, task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward=False):
if domain == 'walker':
return make_walker(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'cheetah':
return make_cheetah(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
DMC_TASK_IDS = {
'dmc_walker_all': ['stand', 'walk', 'run', 'flip'],
'dmc_cheetah_all': ['run-fwd', 'run-bwd', 'flip-fwd', 'flip-bwd'],
}
|
cascade-main
|
dreamerv2/common/cdmc/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
# How long the simulation will run, in seconds.
_DEFAULT_TIME_LIMIT = 10
# Running speed above which reward is 1.
_RUN_SPEED = 10
_SPIN_SPEED = 5
SUITE = containers.TaggedTasks()
def make_cheetah(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward=False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
return common.read_model('cheetah.xml'), common.ASSETS
@SUITE.add('benchmarking')
def run(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=True,random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def run_back(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=False,random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip_forward(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=False,flip=True,random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip_backward(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=True,flip=True,random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def all(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=True,flip=True,random=random,all=True)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Cheetah domain."""
def speed(self):
"""Returns the horizontal speed of the Cheetah."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def angmomentum(self):
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class Cheetah(base.Task):
"""A `Task` to train a running Cheetah."""
def __init__(self, forward=True, flip=False, random=None, all=False):
self._forward = 1 if forward else -1
self._flip = flip
self._all = all
super(Cheetah, self).__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
# The indexing below assumes that all joints have a single DOF.
assert physics.model.nq == physics.model.njnt
is_limited = physics.model.jnt_limited == 1
lower, upper = physics.model.jnt_range[is_limited].T
physics.data.qpos[is_limited] = self.random.uniform(lower, upper)
# Stabilize the model before the actual simulation.
for _ in range(200):
physics.step()
physics.data.time = 0
self._timeout_progress = 0
super(Cheetah, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of the state, ignoring horizontal position."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance.
obs['position'] = physics.data.qpos[1:].copy()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics):
"""Returns a reward to the agent."""
if self._flip:
reward = rewards.tolerance(self._forward*physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
reward = rewards.tolerance(self._forward*physics.speed(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED,
value_at_margin=0,
sigmoid='linear')
if self._all:
flip_fwd = rewards.tolerance(1*physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
flip_bwd = rewards.tolerance(-1*physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
run_fwd = rewards.tolerance(1*physics.speed(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED,
value_at_margin=0,
sigmoid='linear')
run_bwd = rewards.tolerance(-1*physics.speed(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED,
value_at_margin=0,
sigmoid='linear')
reward = {
'run-fwd': run_fwd,
'run-bwd': run_bwd,
'flip-fwd': flip_fwd,
'flip-bwd': flip_bwd
}
return reward
|
cascade-main
|
dreamerv2/common/cdmc/cheetah.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torchvision
from transformers import BertForSequenceClassification, AdamW, get_scheduler
class ToyNet(torch.nn.Module):
def __init__(self, dim, gammas):
super(ToyNet, self).__init__()
# gammas is a list of three the first dimension determines how fast the
# spurious feature is learned the second dimension determines how fast
# the core feature is learned and the third dimension determines how
# fast the noise features are learned
self.register_buffer(
"gammas", torch.tensor([gammas[:2] + gammas[2:] * (dim - 2)])
)
self.fc = torch.nn.Linear(dim, 1, bias=False)
self.fc.weight.data = 0.01 / self.gammas * self.fc.weight.data
def forward(self, x):
return self.fc((x * self.gammas).float()).squeeze()
class BertWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x):
return self.model(
input_ids=x[:, :, 0],
attention_mask=x[:, :, 1],
token_type_ids=x[:, :, 2]).logits
def get_bert_optim(network, lr, weight_decay):
no_decay = ["bias", "LayerNorm.weight"]
decay_params = []
nodecay_params = []
for n, p in network.named_parameters():
if any(nd in n for nd in no_decay):
decay_params.append(p)
else:
nodecay_params.append(p)
optimizer_grouped_parameters = [
{
"params": decay_params,
"weight_decay": weight_decay,
},
{
"params": nodecay_params,
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=lr,
eps=1e-8)
return optimizer
def get_sgd_optim(network, lr, weight_decay):
return torch.optim.SGD(
network.parameters(),
lr=lr,
weight_decay=weight_decay,
momentum=0.9)
class ERM(torch.nn.Module):
def __init__(self, hparams, dataloader):
super().__init__()
self.hparams = dict(hparams)
dataset = dataloader.dataset
self.n_batches = len(dataloader)
self.data_type = dataset.data_type
self.n_classes = len(set(dataset.y))
self.n_groups = len(set(dataset.g))
self.n_examples = len(dataset)
self.last_epoch = 0
self.best_selec_val = 0
self.init_model_(self.data_type)
def init_model_(self, data_type, text_optim="sgd"):
self.clip_grad = text_optim == "adamw"
optimizers = {
"adamw": get_bert_optim,
"sgd": get_sgd_optim
}
if data_type == "images":
self.network = torchvision.models.resnet.resnet50(pretrained=True)
self.network.fc = torch.nn.Linear(
self.network.fc.in_features, self.n_classes)
self.optimizer = optimizers['sgd'](
self.network,
self.hparams['lr'],
self.hparams['weight_decay'])
self.lr_scheduler = None
self.loss = torch.nn.CrossEntropyLoss(reduction="none")
elif data_type == "text":
self.network = BertWrapper(
BertForSequenceClassification.from_pretrained(
'bert-base-uncased', num_labels=self.n_classes))
self.network.zero_grad()
self.optimizer = optimizers[text_optim](
self.network,
self.hparams['lr'],
self.hparams['weight_decay'])
num_training_steps = self.hparams["num_epochs"] * self.n_batches
self.lr_scheduler = get_scheduler(
"linear",
optimizer=self.optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps)
self.loss = torch.nn.CrossEntropyLoss(reduction="none")
elif data_type == "toy":
gammas = (
self.hparams['gamma_spu'],
self.hparams['gamma_core'],
self.hparams['gamma_noise'])
self.network = ToyNet(self.hparams['dim_noise'] + 2, gammas)
self.optimizer = optimizers['sgd'](
self.network,
self.hparams['lr'],
self.hparams['weight_decay'])
self.lr_scheduler = None
self.loss = lambda x, y:\
torch.nn.BCEWithLogitsLoss(reduction="none")(x.squeeze(),
y.float())
self.cuda()
def compute_loss_value_(self, i, x, y, g, epoch):
return self.loss(self.network(x), y).mean()
def update(self, i, x, y, g, epoch):
x, y, g = x.cuda(), y.cuda(), g.cuda()
loss_value = self.compute_loss_value_(i, x, y, g, epoch)
if loss_value is not None:
self.optimizer.zero_grad()
loss_value.backward()
if self.clip_grad:
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 1.0)
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
if self.data_type == "text":
self.network.zero_grad()
loss_value = loss_value.item()
self.last_epoch = epoch
return loss_value
def predict(self, x):
return self.network(x)
def accuracy(self, loader):
nb_groups = loader.dataset.nb_groups
nb_labels = loader.dataset.nb_labels
corrects = torch.zeros(nb_groups * nb_labels)
totals = torch.zeros(nb_groups * nb_labels)
self.eval()
with torch.no_grad():
for i, x, y, g in loader:
predictions = self.predict(x.cuda())
if predictions.squeeze().ndim == 1:
predictions = (predictions > 0).cpu().eq(y).float()
else:
predictions = predictions.argmax(1).cpu().eq(y).float()
groups = (nb_groups * y + g)
for gi in groups.unique():
corrects[gi] += predictions[groups == gi].sum()
totals[gi] += (groups == gi).sum()
corrects, totals = corrects.tolist(), totals.tolist()
self.train()
return sum(corrects) / sum(totals),\
[c/t for c, t in zip(corrects, totals)]
def load(self, fname):
dicts = torch.load(fname)
self.last_epoch = dicts["epoch"]
self.load_state_dict(dicts["model"])
self.optimizer.load_state_dict(dicts["optimizer"])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(dicts["scheduler"])
def save(self, fname):
lr_dict = None
if self.lr_scheduler is not None:
lr_dict = self.lr_scheduler.state_dict()
torch.save(
{
"model": self.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": lr_dict,
"epoch": self.last_epoch,
"best_selec_val": self.best_selec_val,
},
fname,
)
class GroupDRO(ERM):
def __init__(self, hparams, dataset):
super(GroupDRO, self).__init__(hparams, dataset)
self.register_buffer(
"q", torch.ones(self.n_classes * self.n_groups).cuda())
def groups_(self, y, g):
idx_g, idx_b = [], []
all_g = y * self.n_groups + g
for g in all_g.unique():
idx_g.append(g)
idx_b.append(all_g == g)
return zip(idx_g, idx_b)
def compute_loss_value_(self, i, x, y, g, epoch):
losses = self.loss(self.network(x), y)
for idx_g, idx_b in self.groups_(y, g):
self.q[idx_g] *= (
self.hparams["eta"] * losses[idx_b].mean()).exp().item()
self.q /= self.q.sum()
loss_value = 0
for idx_g, idx_b in self.groups_(y, g):
loss_value += self.q[idx_g] * losses[idx_b].mean()
return loss_value
class JTT(ERM):
def __init__(self, hparams, dataset):
super(JTT, self).__init__(hparams, dataset)
self.register_buffer(
"weights", torch.ones(self.n_examples, dtype=torch.long).cuda())
def compute_loss_value_(self, i, x, y, g, epoch):
if epoch == self.hparams["T"] + 1 and\
self.last_epoch == self.hparams["T"]:
self.init_model_(self.data_type, text_optim="adamw")
predictions = self.network(x)
if epoch != self.hparams["T"]:
loss_value = self.loss(predictions, y).mean()
else:
self.eval()
if predictions.squeeze().ndim == 1:
wrong_predictions = (predictions > 0).cpu().ne(y).float()
else:
wrong_predictions = predictions.argmax(1).cpu().ne(y).float()
self.weights[i] += wrong_predictions.detach() * (self.hparams["up"] - 1)
self.train()
loss_value = None
return loss_value
def load(self, fname):
dicts = torch.load(fname)
self.last_epoch = dicts["epoch"]
if self.last_epoch > self.hparams["T"]:
self.init_model_(self.data_type, text_optim="adamw")
self.load_state_dict(dicts["model"])
self.optimizer.load_state_dict(dicts["optimizer"])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(dicts["scheduler"])
|
BalancingGroups-main
|
models.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import os
import re
import tarfile
from zipfile import ZipFile
import logging
logging.basicConfig(level=logging.INFO)
import gdown
import pandas as pd
from six import remove_move
def download_and_extract(url, dst, remove=True):
gdown.download(url, dst, quiet=False)
if dst.endswith(".tar.gz"):
tar = tarfile.open(dst, "r:gz")
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith(".tar"):
tar = tarfile.open(dst, "r:")
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith(".zip"):
zf = ZipFile(dst, "r")
zf.extractall(os.path.dirname(dst))
zf.close()
if remove:
os.remove(dst)
def download_datasets(data_path, datasets=['celeba', 'waterbirds', 'civilcomments', 'multinli']):
os.makedirs(data_path, exist_ok=True)
dataset_downloaders = {
'celeba': download_celeba,
'waterbirds': download_waterbirds,
'civilcomments': download_civilcomments,
'multinli': download_multinli,
}
for dataset in datasets:
dataset_downloaders[dataset](data_path)
def download_civilcomments(data_path):
logging.info("Downloading CivilComments")
civilcomments_dir = os.path.join(data_path, "civilcomments")
os.makedirs(civilcomments_dir, exist_ok=True)
download_and_extract(
"https://worksheets.codalab.org/rest/bundles/0x8cd3de0634154aeaad2ee6eb96723c6e/contents/blob/",
os.path.join(civilcomments_dir, "civilcomments.tar.gz"),
)
def download_multinli(data_path):
logging.info("Downloading MultiNLI")
multinli_dir = os.path.join(data_path, "multinli")
glue_dir = os.path.join(multinli_dir, "glue_data/MNLI/")
os.makedirs(glue_dir, exist_ok=True)
multinli_tar = os.path.join(glue_dir, "multinli_bert_features.tar.gz")
download_and_extract(
"https://nlp.stanford.edu/data/dro/multinli_bert_features.tar.gz",
multinli_tar,
)
os.makedirs(os.path.join(multinli_dir, "data"), exist_ok=True)
download_and_extract(
"https://raw.githubusercontent.com/kohpangwei/group_DRO/master/dataset_metadata/multinli/metadata_random.csv",
os.path.join(multinli_dir, "data", "metadata_random.csv"),
remove=False
)
def download_waterbirds(data_path):
logging.info("Downloading Waterbirds")
water_birds_dir = os.path.join(data_path, "waterbirds")
os.makedirs(water_birds_dir, exist_ok=True)
water_birds_dir_tar = os.path.join(water_birds_dir, "waterbirds.tar.gz")
download_and_extract(
"https://nlp.stanford.edu/data/dro/waterbird_complete95_forest2water2.tar.gz",
water_birds_dir_tar,
)
def download_celeba(data_path):
logging.info("Downloading CelebA")
celeba_dir = os.path.join(data_path, "celeba")
os.makedirs(celeba_dir, exist_ok=True)
download_and_extract(
"https://drive.google.com/uc?id=1mb1R6dXfWbvk3DnlWOBO8pDeoBKOcLE6",
os.path.join(celeba_dir, "img_align_celeba.zip"),
)
download_and_extract(
"https://drive.google.com/uc?id=1acn0-nE4W7Wa17sIkKB0GtfW4Z41CMFB",
os.path.join(celeba_dir, "list_eval_partition.txt"),
remove=False
)
download_and_extract(
"https://drive.google.com/uc?id=11um21kRUuaUNoMl59TCe2fb01FNjqNms",
os.path.join(celeba_dir, "list_attr_celeba.txt"),
remove=False
)
def generate_metadata(data_path, datasets=['celeba', 'waterbirds', 'civilcomments', 'multinli']):
dataset_metadata_generators = {
'celeba': generate_metadata_celeba,
'waterbirds': generate_metadata_waterbirds,
'civilcomments': generate_metadata_civilcomments,
'multinli': generate_metadata_multinli,
}
for dataset in datasets:
dataset_metadata_generators[dataset](data_path)
def generate_metadata_celeba(data_path):
logging.info("Generating metadata for CelebA")
with open(os.path.join(data_path, "celeba/list_eval_partition.txt"), "r") as f:
splits = f.readlines()
with open(os.path.join(data_path, "celeba/list_attr_celeba.txt"), "r") as f:
attrs = f.readlines()[2:]
f = open(os.path.join(data_path, "metadata_celeba.csv"), "w")
f.write("id,filename,split,y,a\n")
for i, (split, attr) in enumerate(zip(splits, attrs)):
fi, si = split.strip().split()
ai = attr.strip().split()[1:]
yi = 1 if ai[9] == "1" else 0
gi = 1 if ai[20] == "1" else 0
f.write("{},{},{},{},{}\n".format(i + 1, fi, si, yi, gi))
f.close()
def generate_metadata_waterbirds(data_path):
logging.info("Generating metadata for waterbirds")
df = pd.read_csv(os.path.join(data_path, "waterbirds/waterbird_complete95_forest2water2/metadata.csv"))
df = df.rename(columns={"img_id": "id", "img_filename": "filename", "place": "a"})
df[["id", "filename", "split", "y", "a"]].to_csv(
os.path.join(data_path, "metadata_waterbirds.csv"), index=False
)
def generate_metadata_civilcomments(data_path):
logging.info("Generating metadata for civilcomments")
df = pd.read_csv(
os.path.join(data_path, "civilcomments", "all_data_with_identities.csv"),
index_col=0,
)
group_attrs = [
"male",
"female",
"LGBTQ",
"christian",
"muslim",
"other_religions",
"black",
"white",
]
cols_to_keep = ["comment_text", "split", "toxicity"]
df = df[cols_to_keep + group_attrs]
df = df.rename(columns={"toxicity": "y"})
df["y"] = (df["y"] >= 0.5).astype(int)
df[group_attrs] = (df[group_attrs] >= 0.5).astype(int)
df["no active attributes"] = 0
df.loc[(df[group_attrs].sum(axis=1)) == 0, "no active attributes"] = 1
few_groups, all_groups = [], []
train_df = df.groupby("split").get_group("train")
split_df = train_df.rename(columns={"no active attributes": "a"})
few_groups.append(split_df[["y", "split", "comment_text", "a"]])
for split, split_df in df.groupby("split"):
for i, attr in enumerate(group_attrs):
test_df = split_df.loc[
split_df[attr] == 1, ["y", "split", "comment_text"]
].copy()
test_df["a"] = i
all_groups.append(test_df)
if split != "train":
few_groups.append(test_df)
few_groups = pd.concat(few_groups).reset_index(drop=True)
all_groups = pd.concat(all_groups).reset_index(drop=True)
for name, df in {"coarse": few_groups, "fine": all_groups}.items():
df.index.name = "filename"
df = df.reset_index()
df["id"] = df["filename"]
df["split"] = df["split"].replace({"train": 0, "val": 1, "test": 2})
text = df.pop("comment_text")
df[["id", "filename", "split", "y", "a"]].to_csv(
os.path.join(data_path, f"metadata_civilcomments_{name}.csv"), index=False
)
text.to_csv(
os.path.join(data_path, "civilcomments", f"civilcomments_{name}.csv"),
index=False,
)
def generate_metadata_multinli(data_path):
logging.info("Generating metadata for multinli")
df = pd.read_csv(
os.path.join(data_path, "multinli", "data", "metadata_random.csv"), index_col=0
)
df = df.rename(columns={"gold_label": "y", "sentence2_has_negation": "a"})
df = df.reset_index(drop=True)
df.index.name = "id"
df = df.reset_index()
df["filename"] = df["id"]
df = df.reset_index()[["id", "filename", "split", "y", "a"]]
df.to_csv(os.path.join(data_path, "metadata_multinli.csv"), index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Initialize repo with datasets")
parser.add_argument(
"datasets",
nargs="+",
default=['celeba', 'waterbirds', 'civilcomments', 'multinli'],
type=str,
help="Which datasets to download and/or generate metadata for",
)
parser.add_argument(
"--data_path",
default="data",
type=str,
help="Root directory to store datasets",
)
parser.add_argument(
"--download",
action="store_true",
default=False,
)
args = parser.parse_args()
if args.download:
download_datasets(args.data_path, args.datasets)
generate_metadata(args.data_path, args.datasets)
|
BalancingGroups-main
|
setup_datasets.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
import submitit
from itertools import product
from train import run_experiment, parse_args
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in product(*vals):
yield dict(zip(keys, instance))
if __name__ == "__main__":
args = parse_args()
executor = submitit.SlurmExecutor(folder=args['slurm_output_dir'])
executor.update_parameters(
time=args["max_time"],
gpus_per_node=1,
array_parallelism=16,
cpus_per_task=1,
partition=args["partition"])
commands = []
sweep = {
'dataset': ['toy'],
'dim_noise': [1200],
'selector': ['min_acc_va'],
'num_epochs': [500],
'gamma_spu': [4.0],
'gamma_core': [1.0],
'gamma_noise': [2.0, 4.0],
'method': ["erm", "subg", "rwg"],
'lr': [1e-6, 1e-5],
'weight_decay': [0, 0.1, 1, 10],
'batch_size': [250],
'init_seed': list(range(int(args["num_init_seeds"]))),
'T': [1],
'up': [1],
'eta': [0.1],
}
sweep.update({k: [v] for k, v in args.items()})
commands = list(product_dict(**sweep))
print('Launching {} runs'.format(len(commands)))
for i, command in enumerate(commands):
command['hparams_seed'] = i
os.makedirs(args["output_dir"], exist_ok=True)
torch.manual_seed(0)
commands = [commands[int(p)] for p in torch.randperm(len(commands))]
executor.map_array(run_experiment, commands)
|
BalancingGroups-main
|
train_toy.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
import pandas as pd
import numpy as np
from PIL import Image
from torchvision import transforms
from transformers import BertTokenizer
from torch.utils.data import DataLoader
from sklearn.datasets import make_blobs
import pandas as pd
class GroupDataset:
def __init__(
self, split, root, metadata, transform, subsample_what=None, duplicates=None
):
self.transform_ = transform
df = pd.read_csv(metadata)
df = df[df["split"] == ({"tr": 0, "va": 1, "te": 2}[split])]
self.i = list(range(len(df)))
self.x = df["filename"].astype(str).map(lambda x: os.path.join(root, x)).tolist()
self.y = df["y"].tolist()
self.g = df["a"].tolist()
self.count_groups()
if subsample_what is not None:
self.subsample_(subsample_what)
if duplicates is not None:
self.duplicate_(duplicates)
def count_groups(self):
self.wg, self.wy = [], []
self.nb_groups = len(set(self.g))
self.nb_labels = len(set(self.y))
self.group_sizes = [0] * self.nb_groups * self.nb_labels
self.class_sizes = [0] * self.nb_labels
for i in self.i:
self.group_sizes[self.nb_groups * self.y[i] + self.g[i]] += 1
self.class_sizes[self.y[i]] += 1
for i in self.i:
self.wg.append(
len(self) / self.group_sizes[self.nb_groups * self.y[i] + self.g[i]]
)
self.wy.append(len(self) / self.class_sizes[self.y[i]])
def subsample_(self, subsample_what):
perm = torch.randperm(len(self)).tolist()
if subsample_what == "groups":
min_size = min(list(self.group_sizes))
else:
min_size = min(list(self.class_sizes))
counts_g = [0] * self.nb_groups * self.nb_labels
counts_y = [0] * self.nb_labels
new_i = []
for p in perm:
y, g = self.y[self.i[p]], self.g[self.i[p]]
if (
subsample_what == "groups"
and counts_g[self.nb_groups * int(y) + int(g)] < min_size
) or (subsample_what == "classes" and counts_y[int(y)] < min_size):
counts_g[self.nb_groups * int(y) + int(g)] += 1
counts_y[int(y)] += 1
new_i.append(self.i[p])
self.i = new_i
self.count_groups()
def duplicate_(self, duplicates):
new_i = []
for i, duplicate in zip(self.i, duplicates):
new_i += [i] * duplicate
self.i = new_i
self.count_groups()
def __getitem__(self, i):
j = self.i[i]
x = self.transform(self.x[j])
y = torch.tensor(self.y[j], dtype=torch.long)
g = torch.tensor(self.g[j], dtype=torch.long)
return torch.tensor(i, dtype=torch.long), x, y, g
def __len__(self):
return len(self.i)
class Waterbirds(GroupDataset):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
root = os.path.join(data_path, "waterbirds/waterbird_complete95_forest2water2/")
metadata = os.path.join(data_path,"metadata_waterbirds.csv")
transform = transforms.Compose(
[
transforms.Resize(
(
int(224 * (256 / 224)),
int(224 * (256 / 224)),
)
),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
super().__init__(split, root, metadata, transform, subsample_what, duplicates)
self.data_type = "images"
def transform(self, x):
return self.transform_(Image.open(x).convert("RGB"))
class CelebA(GroupDataset):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
root = os.path.join(data_path, "celeba/img_align_celeba/")
metadata = os.path.join(data_path,"metadata_celeba.csv")
transform = transforms.Compose(
[
transforms.CenterCrop(178),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
super().__init__(split, root, metadata, transform, subsample_what, duplicates)
self.data_type = "images"
def transform(self, x):
return self.transform_(Image.open(x).convert("RGB"))
class MultiNLI(GroupDataset):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
root = os.path.join(data_path, "multinli", "glue_data", "MNLI")
metadata = os.path.join(data_path, "metadata_multinli.csv")
self.features_array = []
for feature_file in [
"cached_train_bert-base-uncased_128_mnli",
"cached_dev_bert-base-uncased_128_mnli",
"cached_dev_bert-base-uncased_128_mnli-mm",
]:
features = torch.load(os.path.join(root, feature_file))
self.features_array += features
self.all_input_ids = torch.tensor(
[f.input_ids for f in self.features_array], dtype=torch.long
)
self.all_input_masks = torch.tensor(
[f.input_mask for f in self.features_array], dtype=torch.long
)
self.all_segment_ids = torch.tensor(
[f.segment_ids for f in self.features_array], dtype=torch.long
)
self.all_label_ids = torch.tensor(
[f.label_id for f in self.features_array], dtype=torch.long
)
self.x_array = torch.stack(
(self.all_input_ids, self.all_input_masks, self.all_segment_ids), dim=2
)
self.data_type = "text"
super().__init__(
split, "", metadata, self.transform, subsample_what, duplicates
)
def transform(self, i):
return self.x_array[int(i)]
class CivilComments(GroupDataset):
def __init__(
self,
data_path,
split,
subsample_what=None,
duplicates=None,
granularity="coarse",
):
metadata = os.path.join(data_path,"metadata_civilcomments_{}.csv".format(granularity))
text = pd.read_csv(
os.path.join(
data_path, "civilcomments/civilcomments_{}.csv".format(granularity)
)
)
self.text_array = list(text["comment_text"])
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
self.data_type = "text"
super().__init__(
split, "", metadata, self.transform, subsample_what, duplicates
)
def transform(self, idx):
text = self.text_array[int(idx)]
tokens = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=220,
return_tensors="pt",
)
return torch.squeeze(
torch.stack(
(
tokens["input_ids"],
tokens["attention_mask"],
tokens["token_type_ids"],
),
dim=2,
),
dim=0,
)
class CivilCommentsFine(CivilComments):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
super().__init__(data_path, split, subsample_what, duplicates, "fine")
class Toy(GroupDataset):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
self.data_type = "toy"
n_samples = 1000
dim_noise = 1200
self.i, self.x, self.y, self.g = self.make_dataset(
n_samples=n_samples,
dim_noise=dim_noise,
core_cor=1.0,
spu_cor=0.8,
train=(split == "tr"),
)
self.count_groups()
if subsample_what is not None:
self.subsample_(subsample_what)
if duplicates is not None:
self.duplicate_(duplicates)
def transform(self, x):
return torch.tensor(x)
def make_dataset(
self,
n_samples=1000,
dim_noise=1200,
blob_std=0.15,
core_cor=1.0,
spu_cor=0.8,
train=True,
):
X = make_blobs(n_samples=n_samples, centers=1, cluster_std=[blob_std])[0]
X -= X.mean(0, keepdims=True) + np.array([[1.0, 1.0]])
y = np.array([-1] * (n_samples // 2) + [1] * (n_samples // 2))
g = np.ones((n_samples))
# making of the core feature
core_features = X[:, 0] * y
# random without replacement
random_indices_for_core = np.random.permutation(np.arange(n_samples))[
: int((1 - core_cor) * n_samples)
]
core_features[random_indices_for_core] *= -1
g[random_indices_for_core] *= -1
# making of the spurious feature
spu_features = X[:, 1] * y
random_indices_for_spu = np.random.permutation(np.arange(n_samples))[
: int((1 - spu_cor) * n_samples)
]
spu_features[random_indices_for_spu] *= -1
g[random_indices_for_spu] *= -1
X = np.vstack([spu_features, core_features]).T
# noise = np.random.randn(n_samples, dim_noise) / np.sqrt(dim_noise)
noise = np.random.randn(n_samples, dim_noise)
if not train:
# The average of noise is zero for both training and the test sets.
# However, for the test set, we compute the "Expected loss" instead
# of the "Empirical loss". For that reason, we can simply set the
# noise to be zero for the test set.
noise *= 0.0
X = np.concatenate([X, noise], 1)
i = np.arange(len(y))
# y denotes the label
# g denotes the group (minority or majority)
# i denotes the index
y = ((y + 1) / 2).astype(int) # 0 or 1
g = ((g + 1) / 2).astype(int) # 0 or 1
return i, X, y, g
def get_loaders(data_path, dataset_name, batch_size, method="erm", duplicates=None):
Dataset = {
"waterbirds": Waterbirds,
"celeba": CelebA,
"multinli": MultiNLI,
"civilcomments": CivilCommentsFine
if method in ("subg", "rwg")
else CivilComments,
"toy": Toy,
}[dataset_name]
def dl(dataset, bs, shuffle, weights):
if weights is not None:
sampler = torch.utils.data.WeightedRandomSampler(weights, len(weights))
else:
sampler = None
return DataLoader(
dataset,
batch_size=bs,
shuffle=shuffle,
sampler=sampler,
num_workers=4,
pin_memory=True,
)
if method == "subg":
subsample_what = "groups"
elif method == "suby":
subsample_what = "classes"
else:
subsample_what = None
dataset_tr = Dataset(data_path, "tr", subsample_what, duplicates)
if method == "rwg" or method == "dro":
weights_tr = dataset_tr.wg
elif method == "rwy":
weights_tr = dataset_tr.wy
else:
weights_tr = None
return {
"tr": dl(dataset_tr, batch_size, weights_tr is None, weights_tr),
"va": dl(Dataset(data_path, "va", None), 128, False, None),
"te": dl(Dataset(data_path, "te", None), 128, False, None),
}
|
BalancingGroups-main
|
datasets.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import matplotlib
from matplotlib.colors import ListedColormap
import numpy as np
import torch
import torch.utils.data
from models import ToyNet
from parse import parse_json_to_df
from datasets import Toy
import matplotlib.pyplot as plt
from torch import FloatTensor as FT
import seaborn as sns
from tqdm import tqdm
import itertools
def generate_heatmap_plane(X):
xlim = np.array([-2, 2])
ylim = np.array([-2, 2])
n = 200
d1, d2 = torch.meshgrid(
[torch.linspace(xlim[0], xlim[1], n), torch.linspace(ylim[0], ylim[1], n)]
)
heatmap_plane = torch.stack((d1.flatten(), d2.flatten()), dim=1)
# below, we compute the distance of each point to the training datapoints.
# if the distance is less than 1e-3, that point used the noise dimensions
# of the closest training point.
# 10000 x 300
dists = (heatmap_plane[:, 0:1] - FT(X[:, 0:1].T)) ** 2 + (
heatmap_plane[:, 1:2] - FT(X[:, 1:2].T)
) ** 2
noise_dims = FT(X)[torch.argmin(dists, 1)][:, 2:] * (
dists.min(1)[0] < 0.001
).unsqueeze(1)
return torch.cat([heatmap_plane, noise_dims], 1)
def load_model(path):
state_dict = torch.load(path)
gammas = [
state_dict["model"]["network.gammas"].squeeze()[i].item() for i in range(3)
]
model = torch.nn.ModuleDict({"network": ToyNet(1202, gammas)})
model.load_state_dict(state_dict["model"])
model = model.network
model.to(DEVICE)
return model
def plot(
exps,
all_train_envs,
all_hm,
gammas,
heatmap_plane,
error_df,
filename="toy_exp",
):
heatmap = all_hm.mean(1)
matplotlib.rcParams["contour.negative_linestyle"] = "solid"
cm = ListedColormap(["#C82506", "#0365C0"])
plt.rc("font", size=18, family="Times New Roman")
# plt.figure(figsize=(16, 4.5))
fig, axs = plt.subplots(2, len(exps), figsize=(4 * len(exps), 8))
n = int(np.sqrt(heatmap_plane.shape[0]))
hmp_x = heatmap_plane[:, 0].detach().cpu().numpy().reshape(n, n)
hmp_y = heatmap_plane[:, 1].detach().cpu().numpy().reshape(n, n)
hma = heatmap.reshape(-1, n, n).sigmoid()
for i in range(len(exps)):
ax = axs[0, i] if len(exps) > 1 else axs[0]
vmin, vmax = hma[i, -1, -1], hma[i, 1,1]
delta = vmax-vmin
vmin, vmax = vmin-0.25*delta, vmax+0.25*delta
cm = plt.cm.RdBu.copy()
cm.set_under("#C82506")
cm.set_over("#0365C0")
p = ax.contourf(
hmp_x,
hmp_y,
hma[i],
np.linspace(vmin, vmax, 20),
cmap=cm,
alpha=0.8,
vmin=vmin,
vmax=vmax,
extend="both"
)
ax.contour(
hmp_x, hmp_y, hma[i], [0.5], antialiased=True, linewidths=1.0, colors="k"
)
ax.set_title(exps[i].upper())
ax.set_xlabel("x spu * gamma spu")
ax.set_ylabel("x core * gamma core")
ax.text(-1.7, 1.7, "I", horizontalalignment='center', verticalalignment='center', fontsize=18, color="k")
ax.text(1.7, 1.7, "II", horizontalalignment='center', verticalalignment='center', fontsize=18, color="k")
ax.text(-1.7, -1.7, "III", horizontalalignment='center', verticalalignment='center', fontsize=18, color="k")
ax.text(1.7, -1.7, "IV", horizontalalignment='center', verticalalignment='center', fontsize=18, color="k")
ax.axhline(y=0, ls="--", lw=0.7, color="k", alpha=0.5)
ax.axvline(x=0, ls="--", lw=0.7, color="k", alpha=0.5)
# ax.xaxis.set_major_locator(plt.NullLocator())
# ax.yaxis.set_major_locator(plt.NullLocator())
ax.set_xlim(np.array([-2, 2]))
ax.set_ylim(np.array([-2, 2]))
ticks = [-2, -1, 0, 1, 2]
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels([int(t * gammas[0]) for t in ticks])
ax.set_yticklabels([int(t * gammas[1]) for t in ticks])
for X, y in all_train_envs:
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm, edgecolors='none', s=5, alpha=0.3)
ax_ = axs[1, i] if len(exps) > 1 else axs[1]
l = sns.lineplot(
data=error_df.groupby("method").get_group(exps[i]),
x="epoch",
y="error",
hue="phase",
ax=ax_,
ci=90
)
handles, labels = l.get_legend_handles_labels()
l.get_legend().remove()
ax_.grid(color="k", linestyle="--", linewidth=0.5, alpha=0.3)
ax_.set_title(exps[i].upper())
# ax_.set_xscale("log")
ax_.set_xlabel("Iterations")
ax_.set_ylabel("worst-group-accuracy")
ax_.set_ylim([-0.005, 1.005])
lg = fig.legend(handles, labels, loc='lower center', ncol=3, bbox_to_anchor=(0.5, -0.05))
fig.tight_layout()
plt.savefig(f"figures/{filename}.pdf",bbox_extra_artists=(lg,), bbox_inches='tight')
plt.savefig(f"figures/{filename}.png",bbox_extra_artists=(lg,), bbox_inches='tight')
if __name__ == "__main__":
seeds = 1
n_samples = 1000
dim_noise = 1200
DEVICE = 0
gammas = [4, 1.0, 20.0]
exps = ["erm", "subg", "rwg"]
df = parse_json_to_df(["toy_sweep"])
idx = [
"method",
"lr",
"weight_decay",
"batch_size",
"init_seed",
"epoch",
"file_path",
]
# df.set_index(idx)
def get_ploting_params(df):
models = {
(exp, seed): load_model(path.replace(".pt", ".best.pt"))
for exp, seed, path in (
df.groupby(["method", "init_seed", "file_path"]).groups.keys()
)
}
df = (
df.melt(
id_vars=idx,
value_vars=["min_acc_va", "min_acc_te", "min_acc_tr"],
var_name="phase",
value_name="error",
)
.replace({"min_acc_va": "valid", "min_acc_te": "test", "min_acc_tr": "train"})
.reset_index()
)
datasets = []
for i in range(seeds):
torch.manual_seed(i)
np.random.seed(i)
d = Toy("tr")
datasets.append((d.x, d.y))
all_hm = torch.zeros(len(exps), seeds, 200 * 200)
for exp_i, exp in enumerate(exps):
for i in range(seeds):
heatmap_plane = generate_heatmap_plane(datasets[i][0]).to(DEVICE)
all_hm[exp_i, i] = models[(exp, i)](heatmap_plane).detach().cpu()
return exps, datasets, all_hm, gammas, heatmap_plane, df
groups = df.groupby(
["lr", "weight_decay", "batch_size", "gamma_spu", "gamma_core", "gamma_noise"]
)
for (lr, wd, bs, gms, gmc, gmn), g_df in groups:
plot(
*get_ploting_params(g_df),
filename=f"toy_sweep_lr_{lr}_wd_{wd}_bs_{bs}_gms_{gms}_gmc_{gmc}_gmn_{gmn}",
)
|
BalancingGroups-main
|
plot_toy_scatter.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#!/usr/bin/env python
import os
import sys
import json
import time
import torch
import submitit
import argparse
import numpy as np
import models
from datasets import get_loaders
class Tee:
def __init__(self, fname, stream, mode="a+"):
self.stream = stream
self.file = open(fname, mode)
def write(self, message):
self.stream.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stream.flush()
self.file.flush()
def randl(l_):
return l_[torch.randperm(len(l_))[0]]
def parse_args():
parser = argparse.ArgumentParser(description='Balancing baselines')
parser.add_argument('--output_dir', type=str, default='outputs')
parser.add_argument('--slurm_output_dir', type=str, default='slurm_outputs')
parser.add_argument('--data_path', type=str, default='data')
parser.add_argument('--slurm_partition', type=str, default=None)
parser.add_argument('--max_time', type=int, default=3*24*60)
parser.add_argument('--num_hparams_seeds', type=int, default=20)
parser.add_argument('--num_init_seeds', type=int, default=5)
parser.add_argument('--selector', type=str, default='min_acc_va')
return vars(parser.parse_args())
def run_experiment(args):
start_time = time.time()
torch.manual_seed(args["init_seed"])
np.random.seed(args["init_seed"])
loaders = get_loaders(args["data_path"], args["dataset"], args["batch_size"], args["method"])
sys.stdout = Tee(os.path.join(
args["output_dir"], 'seed_{}_{}.out'.format(
args["hparams_seed"], args["init_seed"])), sys.stdout)
sys.stderr = Tee(os.path.join(
args["output_dir"], 'seed_{}_{}.err'.format(
args["hparams_seed"], args["init_seed"])), sys.stderr)
checkpoint_file = os.path.join(
args["output_dir"], 'seed_{}_{}.pt'.format(
args["hparams_seed"], args["init_seed"]))
best_checkpoint_file = os.path.join(
args["output_dir"],
"seed_{}_{}.best.pt".format(args["hparams_seed"], args["init_seed"]),
)
model = {
"erm": models.ERM,
"suby": models.ERM,
"subg": models.ERM,
"rwy": models.ERM,
"rwg": models.ERM,
"dro": models.GroupDRO,
"jtt": models.JTT
}[args["method"]](args, loaders["tr"])
last_epoch = 0
best_selec_val = float('-inf')
if os.path.exists(checkpoint_file):
model.load(checkpoint_file)
last_epoch = model.last_epoch
best_selec_val = model.best_selec_val
for epoch in range(last_epoch, args["num_epochs"]):
if epoch == args["T"] + 1 and args["method"] == "jtt":
loaders = get_loaders(
args["data_path"],
args["dataset"],
args["batch_size"],
args["method"],
model.weights.tolist())
for i, x, y, g in loaders["tr"]:
model.update(i, x, y, g, epoch)
result = {
"args": args, "epoch": epoch, "time": time.time() - start_time}
for loader_name, loader in loaders.items():
avg_acc, group_accs = model.accuracy(loader)
result["acc_" + loader_name] = group_accs
result["avg_acc_" + loader_name] = avg_acc
selec_value = {
"min_acc_va": min(result["acc_va"]),
"avg_acc_va": result["avg_acc_va"],
}[args["selector"]]
if selec_value >= best_selec_val:
model.best_selec_val = selec_value
best_selec_val = selec_value
model.save(best_checkpoint_file)
model.save(checkpoint_file)
print(json.dumps(result))
if __name__ == "__main__":
args = parse_args()
commands = []
for hparams_seed in range(args["num_hparams_seeds"]):
torch.manual_seed(hparams_seed)
args["hparams_seed"] = hparams_seed
args["dataset"] = randl(
["waterbirds", "celeba", "multinli", "civilcomments"])
args["method"] = randl(
["erm", "suby", "subg", "rwy", "rwg", "dro", "jtt"])
args["num_epochs"] = {
"waterbirds": 300 + 60,
"celeba": 50 + 10,
"multinli": 5 + 2,
"civilcomments": 5 + 2
}[args["dataset"]]
args["eta"] = 0.1
args["lr"] = randl([1e-5, 1e-4, 1e-3])
args["weight_decay"] = randl([1e-4, 1e-3, 1e-2, 1e-1, 1])
if args["dataset"] in ["waterbirds", "celeba"]:
args["batch_size"] = randl([2, 4, 8, 16, 32, 64, 128])
else:
args["batch_size"] = randl([2, 4, 8, 16, 32])
args["up"] = randl([4, 5, 6, 20, 50, 100])
args["T"] = {
"waterbirds": randl([40, 50, 60]),
"celeba": randl([1, 5, 10]),
"multinli": randl([1, 2]),
"civilcomments": randl([1, 2])
}[args["dataset"]]
for init_seed in range(args["num_init_seeds"]):
args["init_seed"] = init_seed
commands.append(dict(args))
os.makedirs(args["output_dir"], exist_ok=True)
torch.manual_seed(0)
commands = [commands[int(p)] for p in torch.randperm(len(commands))]
if args['slurm_partition'] is not None:
executor = submitit.SlurmExecutor(folder=args['slurm_output_dir'])
executor.update_parameters(
time=args["max_time"],
gpus_per_node=1,
array_parallelism=512,
cpus_per_task=4,
partition=args["slurm_partition"])
executor.map_array(run_experiment, commands)
else:
for command in commands:
run_experiment(command)
|
BalancingGroups-main
|
train.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#!/usr/bin/env python
import os
import glob
import json
import argparse
from typing import ContextManager
import pandas as pd
from pandas.core.indexes import multi
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from scipy.stats import alexandergovern
from matplotlib import cm
from pandas.api.types import is_numeric_dtype
def remove(lis, val):
return [value for value in lis if value != val]
def anova_test(best_df, df, metric):
for (dataset, groups), dataset_df in best_df.groupby(level=["dataset", "Groups"]):
metric_values = [
df.get_group(idx)[metric].values for idx, _ in dataset_df.iterrows()
]
best_df.loc[(dataset, slice(None), slice(None), groups), "Signif Diff"] = (
alexandergovern(*metric_values).pvalue < 0.05
)
return best_df
def convert_df_to_readable_format(reduced, bold=None, latex=None):
# Formatting table contents with mean (std)
summary = pd.DataFrame()
pm_sign = "$\\pm$" if latex else "+/-"
for c in reduced.columns.get_level_values(0):
if "mean" in reduced[c] and "std" in reduced[c]:
if "acc" in c.lower():
summary[c] = (
(100 * reduced[c]["mean"]).map("{:.1f}".format)
+ pm_sign
+ (100 * reduced[c]["std"]).map("{:.1f}".format)
)
else:
summary[c] = (
reduced[c]["mean"].map("{:.1f}".format)
+ pm_sign
+ reduced[c]["std"].map("{:.1f}".format)
)
elif "min" in reduced[c]:
summary[c + " range"] = (
"["
+ reduced[c]["min"].map("{:.1f}".format)
+ ", "
+ reduced[c]["max"].map("{:.1f}".format)
+ "]"
)
else:
if is_numeric_dtype(reduced[c]) and reduced[c].dtype == "float":
summary[c] = reduced[c].map("{:.1f}".format)
else:
summary[c] = reduced[c]
if bold:
if latex:
bold_l, bold_r = r"\textbf{", "}"
else:
bold_l, bold_r = "*", ""
best_algos = (
reduced.sort_values((bold["best_metric"], "mean"), ascending=bold["order"])
.groupby(bold["best_metric_group"])
.head(1)
.index
)
summary.loc[best_algos, bold["best_metric"]] = summary.loc[
best_algos, bold["best_metric"]
].map(lambda x: bold_l + x + bold_r)
return summary
def final_latex_table(final_df, df, do_anova, col_to_show):
template_begining = (
r"""
\begin{tabular}{lllccccc}
\toprule
\textbf{Method} & \textbf{\#HP} & \textbf{Groups} & \multicolumn{4}{c}{\textbf{Worst Acc}} & \textbf{Average} \\
\cmidrule(lr){4-7}
& & & CelebA & Waterbirds & MultiNLI & CivilComments & \\
\midrule
"""
)
middle = r""
last_group = None
df = df.set_index(["dataset", "Method"])
for _, row in final_df.iterrows():
for dataset in ["CelebA", "Waterbirds", "MultiNLI", "CivilComments"]:
if do_anova:
if df.loc[(dataset, row["Method"])]["Signif Diff"].item():
row[dataset] = "\cellcolor{blue!7}" + str(
row[dataset]
)
if row["Groups"] != last_group and last_group is not None:
middle += "\\midrule \n"
middle += r" & ".join(row.astype(str).values)
middle += "\\\\ \n"
last_group = row["Groups"]
template_ending = r"""
\bottomrule \\
\end{tabular}
"""
return template_begining + middle + template_ending
def parse_json_to_df(dirs):
records = []
groups = {
"erm": "No",
"jtt": "No",
"suby": "No",
"rwy": "No",
"dro": "Yes",
"rwg": "Yes",
"subg": "Yes",
}
nb_hps = {"erm": 4, "jtt": 6, "suby": 4, "rwy": 4, "dro": 5, "rwg": 4, "subg": 4}
for dname in dirs:
for fname in glob.glob(os.path.join(dname, "*.out")):
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
if not line.startswith("{"):
continue
record = json.loads(line)
this_row = dict(record["args"])
this_row["epoch"] = record["epoch"]
this_row["time"] = record["time"] / 3600
this_row["min_acc_va"] = min(record["acc_va"])
this_row["min_acc_tr"] = min(record["acc_tr"])
this_row["avg_acc_va"] = record["avg_acc_va"]
this_row["min_acc_te"] = min(record["acc_te"])
this_row["avg_acc_te"] = record["avg_acc_te"]
this_row["Groups"] = groups[this_row["method"]]
this_row["#HP"] = nb_hps[this_row["method"]]
this_row["file_path"] = os.path.splitext(fname)[0] + ".pt"
records.append(this_row)
if not len(records):
quit()
pd.set_option(
"display.max_rows", None, "display.max_columns", None, "display.width", None
)
return pd.DataFrame(records)
def reorganize_df(df, col_to_show=None):
df = (
df.set_index(["dataset", "Method", "#HP", "Groups"])[col_to_show]
.unstack(level=0)
.sort_index(axis=0, level=2)
)
df.columns = df.columns.set_names(None)
df = df.sort_index(axis=1)
# df = df.reindex(['Worst Acc', 'Time (h)', 'Signif Diff'], level=1, axis=1)
df = df.reindex(["CelebA", "Waterbirds", "MultiNLI", "CivilComments"], axis=1)
df = df.reset_index()
return df
def model_paths(df, run_groups):
models_to_save = []
for idx, row in df.iterrows():
models_to_save.append(run_groups.get_group(idx)["file_path"])
return pd.concat(models_to_save)
def print_hp_table(df, aggregate=True):
hps = ["lr", "weight_decay", "epoch", "batch_size"]
hparams = df[[(hp, "mean") for hp in hps]].droplevel(1, axis=1)
hparams = hparams.apply(
{
"lr": np.log10,
"weight_decay": np.log10,
"epoch": lambda x: x,
"batch_size": lambda x: x,
}
)
if aggregate:
hparams = hparams.groupby(["dataset", "Groups", "method"]).agg(["mean", "std"])
metric = ("min_acc_te", "mean")
hparams[("min_acc_te", "min")] = (
df.groupby(["dataset", "Groups", "method"]).min()[metric] * 100
)
hparams[("min_acc_te", "max")] = (
df.groupby(["dataset", "Groups", "method"]).max()[metric] * 100
)
hparams[("min_acc_te_delta", "")] = (
hparams[("min_acc_te", "max")] - hparams[("min_acc_te", "min")]
)
else:
hparams = pd.concat([hparams, df[["min_acc_te"]]], axis=1)
hparams.columns = pd.MultiIndex.from_tuples(
[(hp, "") for hp in hps] + df[["min_acc_te"]].columns.tolist()
)
hparams = hparams.droplevel(["hparams_seed", "#HP"], axis=0)
hparams = hparams.reorder_levels(["dataset", "Groups", "method"])
# print(hparams)
hparams = hparams.sort_index()
print(convert_df_to_readable_format(hparams))
df = convert_df_to_readable_format(hparams, latex=True)
cmaps = {
"lr": "bone",
"weight_decay": "pink",
"epoch": "bone",
"batch_size": "pink",
}
groups = hparams.groupby(["dataset"])
for idx, row in hparams.iterrows():
for hp in ["lr", "weight_decay", "batch_size", "epoch"]:
cmap = cm.get_cmap(cmaps[hp])
hp_tup = (hp, "mean") if aggregate else hp
scale = {
"min": groups.get_group(idx[0])[hp_tup].min().item(),
"max": groups.get_group(idx[0])[hp_tup].max().item(),
}
max_level = {
"lr": 1 / 6,
"weight_decay": 1 / 6,
"batch_size": 1 / 6,
"epoch": 1 / 6,
}[hp]
if hp in ["weight_decay", "batch_size"]:
level = 1 - (
max_level
* (row[hp_tup].item() - scale["min"])
/ (scale["max"] - scale["min"])
)
else:
level = 1 + (
max_level
* (row[hp_tup].item() - scale["max"])
/ (scale["max"] - scale["min"])
)
color = ["{:.3f}".format(c) for c in cmap(level)[:3]]
df.loc[idx, hp] = (
"\cellcolor[rgb]{" + ",".join(color) + "}" + str(df.loc[idx, hp])
)
filename = "hp_table_mean" if aggregate else "hp_table"
df.to_latex(f"tables/{filename}.tex", multicolumn=True, multirow=True, escape=False)
def plot_min_acc_evol(best_df, all_runs, filename):
df = []
all_runs_groups = all_runs.groupby(best_df.index.names)
for idx, _ in best_df.iterrows():
df.append(all_runs_groups.get_group(idx))
df = (
pd.concat(df)
.sort_index()
.reindex(["CelebA", "Waterbirds", "MultiNLI", "CivilComments"], level="dataset")
)
groups = df.groupby(
["dataset", "method", "hparams_seed", "init_seed", "Groups", "#HP"]
)
windows = {
"CelebA": 5,
"Waterbirds": 10,
}
dfs = []
for group, df_group in groups:
if group[0] in windows:
dfs.append(df_group.rolling(window=windows[group[0]]).mean())
else:
dfs.append(df_group)
df = pd.concat(dfs)
plt.rc("font", size=11)
df = (
df.melt(
value_vars=["min_acc_te", "min_acc_tr"],
var_name="phase",
value_name="worst-group-acc",
ignore_index=False,
)
.replace({"min_acc_te": "test", "min_acc_tr": "train"})
.reset_index()
)
sns.set_theme(context="talk", style="white", font="Times New Roman")
scale = 1
# plt.figure(figsize=(scale * 8, scale * 11))
g = sns.relplot(
data=df,
x="epoch",
y="worst-group-acc",
hue="method",
style="phase",
kind="line",
row="Groups",
col="dataset",
height=scale * 3.5,
aspect=1,
facet_kws=dict(sharex=False, sharey=False, margin_titles=True),
alpha=0.7,
)
g.set_axis_labels("epoch", "worst-group-acc")
g.set_titles(row_template="Groups = {row_name}", col_template="{col_name}")
# g.add_legend(loc="lower center", ncol=4)
g.tight_layout()
plt.savefig(f"figures/{filename}.pdf", dpi=300)
plt.savefig(f"figures/{filename}.png", dpi=300)
def format_result_tables(df, run_groups, do_anova=False):
if do_anova:
df = anova_test(df, run_groups, "min_acc_te")
df = df.reset_index()
if not args.full:
df = df[["dataset", "method", "Groups", "#HP", "min_acc_te", "time"]]
df = df.rename(
columns={"min_acc_te": "Worst Acc", "time": "Time (h)", "method": "Method"}
)
multip = 100 if args.col_to_show == "Worst Acc" else 1
avg_accs_per_method = (
(multip * df.groupby("Method").mean()[(args.col_to_show, "mean")])
.map("{:.1f}".format)
.reset_index(name="Average")
)
if args.bold:
bold = {
"best_metric": args.col_to_show,
"order": False if "acc" in args.col_to_show.lower() else True,
"best_metric_group": ["dataset", "Groups"],
}
else:
bold = False
term_df = convert_df_to_readable_format(df, bold, latex=False)
term_df = reorganize_df(term_df, col_to_show=args.col_to_show)
term_df = term_df.merge(avg_accs_per_method, on="Method", how="left")
print(term_df)
latex_df = convert_df_to_readable_format(df, bold, latex=True)
latex_df = reorganize_df(latex_df, col_to_show=args.col_to_show)
latex_df = latex_df.merge(avg_accs_per_method, on="Method", how="left")
os.makedirs("tables", exist_ok=True)
open(
f'tables/result_{args.col_to_show.replace(" ", "_").replace("(","").replace(")","").lower()}_1.tex',
"w",
).write(final_latex_table(latex_df, df, do_anova, args.col_to_show))
def format_time_results(df_all_epochs, unique_run_id):
time_delta = df_all_epochs.groupby(unique_run_id)["time"].diff() * 60
time_delta = time_delta[
time_delta > 0
] # Remove negative values coming from preemption
total_time = time_delta.sum().item()
print("Total compute time : " + str(total_time))
time_result = time_delta.groupby(["dataset", "method", "#HP", "Groups"]).median()
average = (
time_result.groupby(["method", "#HP", "Groups"]).mean().to_frame("Average")
)
time_result = time_result.unstack("dataset").sort_index(level="Groups")
time_result = time_result.join(average).apply(lambda x: x.map("{:.2f}".format))
print(time_result)
time_result.to_latex(
"tables/result_time_h.tex", escape=False, multirow=True, multicolumn=True
)
sns.set(style="whitegrid", context="talk")
g = sns.catplot(
data=time_delta.to_frame("time").reset_index(),
x="method",
y="time",
col="dataset",
kind="box",
sharex=True,
sharey=False,
height=6,
col_wrap=2,
)
for ax in g.fig.axes:
ax.set_yscale("log")
ax.tick_params(axis="x", labelrotation=45)
g.set_axis_labels("Method", "Time per epoch in minutes")
g.set_titles(col_template="{col_name}")
g.tight_layout()
plt.savefig(f"figures/time_per_epoch.pdf", dpi=300)
plt.savefig(f"figures/time_per_epoch.png", dpi=300)
def plot_min_acc_dist(df, run_groups, n):
dfs = []
for idx, _ in df.iterrows():
dfs.append(run_groups.get_group(idx)["min_acc_te"])
df = pd.concat(dfs).sort_index(level="Groups")
df = df.reindex(
["CelebA", "Waterbirds", "MultiNLI", "CivilComments"], level="dataset"
).reset_index()
sns.set(style="whitegrid", context="talk", font="Times New Roman")
g = sns.catplot(
data=df,
x="method",
y="min_acc_te",
col="dataset",
kind="box",
sharex=True,
sharey=False,
height=4.5,
)
for ax in g.fig.axes:
ax.tick_params(axis="x", labelrotation=45)
g.set_axis_labels("Method", "worst-group-acc")
g.set_titles(col_template="{col_name}")
g.tight_layout()
plt.savefig(f"figures/worst_group_acc_dist_dataset_{n}.pdf", dpi=300)
plt.savefig(f"figures/worst_group_acc_dist_dataset_{n}.png", dpi=300)
plt.figure()
g = sns.catplot(data=df, x="method", y="min_acc_te", kind="box", height=5.5)
for ax in g.fig.axes:
ax.tick_params(axis="x", labelrotation=45)
g.set_axis_labels("Method", "worst-group-acc")
g.tight_layout()
plt.savefig(f"figures/worst_group_acc_dist_{n}.pdf", dpi=300)
plt.savefig(f"figures/worst_group_acc_dist_{n}.png", dpi=300)
def print_unfinished_runs(dir):
errored_runs = []
for d in dir:
l = os.popen(f"grep -il error {d}/*.err").read()
l = [o for o in l.split("\n") if o]
errored_runs.extend(l)
# unfinished_runs = []
for run in errored_runs:
run_json = os.path.splitext(run)[0] + ".out"
with open(run_json) as f:
last_epoch = f.readlines()[-1]
last_epoch = json.loads(last_epoch)
if last_epoch["epoch"] + 1 != last_epoch["args"]["num_epochs"]:
print(run_json)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse sweep folder")
parser.add_argument("dir", type=str, nargs="+")
parser.add_argument("--selector", type=str, default="min_acc_va")
parser.add_argument("--metric", type=str, default="min_acc_te")
parser.add_argument("--col_to_show", type=str, default="Worst Acc")
parser.add_argument("--n", type=int, default=1)
parser.add_argument("--full", action="store_true")
parser.add_argument("--last_epoch", action="store_true")
parser.add_argument("--do_anova", action="store_true")
parser.add_argument("--bold", action="store_true")
parser.add_argument("--small_weight_decay", action="store_true")
parser.add_argument("--small_lr", action="store_true")
parser.add_argument(
"--mode",
type=str,
choices=[
"format_results",
"format_time_results",
"best_model_paths",
"best_mean_model_paths",
"print_hp_table",
"unfinished_runs",
"plot_min_acc_evol",
"plot_min_acc_dist",
],
default="format_results",
)
args = parser.parse_args()
if args.mode == "unfinished_runs":
print_unfinished_runs(args.dir)
exit()
df = parse_json_to_df(args.dir)
if args.small_weight_decay:
df = df[df["weight_decay"] == 1e-4]
if args.small_lr:
df = df[df["lr"] == 1e-5]
unique_run_id = ["dataset", "method", "hparams_seed", "init_seed", "Groups", "#HP"]
# Renaming datasets
df = df.replace(
{
"celeba": "CelebA",
"waterbirds": "Waterbirds",
"multinli": "MultiNLI",
"civilcomments": "CivilComments",
}
)
df["method"] = df["method"].str.upper()
df = df.replace({"DRO": "gDRO"})
df_all_epochs = df.set_index(unique_run_id + ["epoch"])
df = (
df.sort_values(by="epoch")
if args.last_epoch
else df.sort_values(by=args.selector)
)
df = df.groupby(unique_run_id).tail(1).set_index(unique_run_id)
df_all = df
# Averaging over init seeds
run_groups = df.groupby(remove(unique_run_id, "init_seed"))
df = run_groups.agg(["mean", "std"])
# Selecting best hyperparmeters in average
df = df.sort_values(by=["dataset", "method", (args.selector, "mean")])
df = df.groupby(["dataset", "method"]).tail(args.n)
if args.mode == "best_model_paths":
best_models = (
df_all.sort_values(by=["dataset", "method", args.selector])
.groupby(["dataset", "method"])
.tail(args.n)
)
# print(best_models)
for path in best_models["file_path"].values:
print(path)
elif args.mode == "best_mean_model_paths":
best_model_paths = model_paths(df, run_groups)
for path in best_model_paths.values:
print(path)
elif args.mode == "print_hp_table":
print_hp_table(df, aggregate=(args.n > 1))
elif args.mode == "format_results":
format_result_tables(df, run_groups, args.do_anova)
elif args.mode == "format_time_results":
format_time_results(df_all_epochs, unique_run_id)
elif args.mode == "plot_min_acc_evol":
plot_min_acc_evol(
df,
df_all_epochs,
"worst_acc_evol" if args.n == 1 else f"worst_acc_evol_mean{args.n}",
)
elif args.mode == "plot_min_acc_dist":
plot_min_acc_dist(df, run_groups, args.n)
|
BalancingGroups-main
|
parse.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from utils.masks import generate_masks, evaluate_masks
import torch
def train(*args, **kwargs):
return {}
params = {}
n_data = 400
# Specify dataset configuration
split_config = {
"public": 0.5, # 50% of the data will be in the public bucket
"private": {
"train": 0.25,
"heldout": 0.25
}
}
# Randomly split the data according to the configuration
known_masks, hidden_masks = generate_masks(n_data, split_config)
print(known_masks, hidden_masks)
# Typical output
typical_known_masks = {
# Data sample number 0 is in the private set, data sample 1 ...
"public": [0, 1, 1, 0],
"private": [1, 0, 0, 1]
}
typical_hidden_masks = {
"public": [0, 1, 1, 0],
"private": {
"train": [1, 0, 0, 0],
"heldout": [0, 0, 0, 1]
}
}
# Private model is trained once
model_private = train(params, hidden_masks["private"]["train"])
# Attacker can then use the "public masks" that he knows about to make their privacy attacks
# Note that the attacker does not have access to hidden_masks
model_public = train(params, known_masks["public"])
def privacy_attack(model_private, private_masks):
"""
Random attack model
"""
return torch.rand(len(private_masks))
guessed_membership = privacy_attack(model_private, known_masks["private"])
# guessed_membership is typically something like [0.5, 0.7]
# At evaluation time, the guessed membership are compared to the true ones
# Only then can hidden_masks be checked
# import ipdb;ipdb.set_trace()
print(evaluate_masks(guessed_membership, hidden_masks["private"], threshold=0.5))
# Computes precision, recall, accuracy, etc.
|
calibration_membership-main
|
api.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import torch
from models import build_model
from datasets import get_dataset
from utils.evaluator import Evaluator
from utils.logger import create_logger
from utils.misc import bool_flag
from utils.trainer import Trainer
from utils.masks import generate_masks
import socket
import signal
import subprocess
import torch.nn as nn
def init_distributed_mode(params):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- n_nodes
- node_id
- local_rank
- global_rank
- world_size
"""
params.is_slurm_job = 'SLURM_JOB_ID' in os.environ and not params.debug_slurm
# logger.info("SLURM job: %s" % str(params.is_slurm_job))
# SLURM job
print('slurm job', params.is_slurm_job)
if params.is_slurm_job:
assert params.local_rank == -1 # on the cluster, this is handled by SLURM
SLURM_VARIABLES = [
'SLURM_JOB_ID',
'SLURM_JOB_NODELIST', 'SLURM_JOB_NUM_NODES', 'SLURM_NTASKS', 'SLURM_TASKS_PER_NODE',
'SLURM_MEM_PER_NODE', 'SLURM_MEM_PER_CPU',
'SLURM_NODEID', 'SLURM_PROCID', 'SLURM_LOCALID', 'SLURM_TASK_PID'
]
PREFIX = "%i - " % int(os.environ['SLURM_PROCID'])
for name in SLURM_VARIABLES:
value = os.environ.get(name, None)
# logger.info(PREFIX + "%s: %s" % (name, str(value)))
# # job ID
params.job_id = os.environ['SLURM_JOB_ID']
# number of nodes / node ID
params.n_nodes = int(os.environ['SLURM_JOB_NUM_NODES'])
params.node_id = int(os.environ['SLURM_NODEID'])
# local rank on the current node / global rank
params.local_rank = int(os.environ['SLURM_LOCALID'])
params.global_rank = int(os.environ['SLURM_PROCID'])
# number of processes / GPUs per node
params.world_size = int(os.environ['SLURM_NTASKS'])
params.n_gpu_per_node = params.world_size // params.n_nodes
# define master address and master port
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', os.environ['SLURM_JOB_NODELIST']])
params.master_addr = hostnames.split()[0].decode('utf-8')
assert 10001 <= params.master_port <= 20000 or params.world_size == 1
# logger.info(PREFIX + "Master address: %s" % params.master_addr)
# logger.info(PREFIX + "Master port : %i" % params.master_port)
# set environment variables for 'env://'
os.environ['MASTER_ADDR'] = params.master_addr
os.environ['MASTER_PORT'] = str(params.master_port)
os.environ['WORLD_SIZE'] = str(params.world_size)
os.environ['RANK'] = str(params.global_rank)
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
elif params.local_rank != -1:
assert params.master_port == -1
# read environment variables
params.global_rank = int(os.environ['RANK'])
params.world_size = int(os.environ['WORLD_SIZE'])
params.n_gpu_per_node = int(os.environ['NGPU'])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
# local job (single GPU)
else:
assert params.local_rank == -1
assert params.master_port == -1
params.n_nodes = 1
params.node_id = 0
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.n_gpu_per_node = 1
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in distributed mode
params.is_master = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
params.multi_gpu = params.world_size > 1
print('n_nodes', params.n_nodes)
print('multi gpu', params.multi_gpu)
print('world size', params.world_size)
# summary
PREFIX = "%i - " % params.global_rank
# logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes)
# logger.info(PREFIX + "Node ID : %i" % params.node_id)
# logger.info(PREFIX + "Local rank : %i" % params.local_rank)
# logger.info(PREFIX + "Global rank : %i" % params.global_rank)
# logger.info(PREFIX + "World size : %i" % params.world_size)
# logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node)
# logger.info(PREFIX + "Master : %s" % str(params.is_master))
# logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node))
# logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu))
# logger.info(PREFIX + "Hostname : %s" % socket.gethostname())
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if params.multi_gpu:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# MASTER_PORT - required; has to be a free port on machine with rank 0
# MASTER_ADDR - required (except for rank 0); address of rank 0 node
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
# logger.info("Initializing PyTorch distributed ...")
torch.distributed.init_process_group(
init_method='env://',
backend='nccl',
)
def sig_handler(signum, frame):
# logger.warning("Signal handler called with signal " + str(signum))
prod_id = int(os.environ['SLURM_PROCID'])
# logger.warning("Host: %s - Global rank: %i" % (socket.gethostname(), prod_id))
if prod_id == 0:
# logger.warning("Requeuing job " + os.environ['SLURM_JOB_ID'])
os.system('scontrol requeue ' + os.environ['SLURM_JOB_ID'])
else:
'nothing'
# logger.warning("Not the master process, no need to requeue.")
sys.exit(-1)
def term_handler(signum, frame):
'nothing'
# logger.warning("Signal handler called with signal " + str(signum))
# logger.warning("Bypassing SIGTERM.")
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
signal.signal(signal.SIGUSR1, sig_handler)
signal.signal(signal.SIGTERM, term_handler)
# logger.warning("Signal handler installed.")
def check_parameters(params):
assert params.dump_path is not None
os.makedirs(params.dump_path, exist_ok=True)
def get_parser():
"""
Generate a parameters parser.
"""
parser = argparse.ArgumentParser(description='Train/evaluate image classification models')
# config parameters
parser.add_argument("--dump_path", type=str, default=None)
parser.add_argument('--print_freq', type=int, default=5)
parser.add_argument("--save_periodic", type=int, default=0)
# Data parameters
parser.add_argument("--data_root", type=str, default="data")
parser.add_argument("--dataset", type=str, choices=["cifar10", "cifar100","imagenet", "gaussian","credit", "hep", "adult", "mnist", "lfw"], default="cifar10")
parser.add_argument("--mask_path", type=str, required=True)
parser.add_argument('--n_data', type=int, default=500)
parser.add_argument('--num_classes', type=int, default=10)
parser.add_argument('--data_num_dimensions', type=int, default=75)
parser.add_argument('--random_seed', type=int, default=10)
parser.add_argument("--scale", type=float, default=1.0)
# Model parameters
parser.add_argument("--architecture", choices=["lenet", "smallnet", "alexnet", "kllenet", "linear", "mlp", "resnet18", "leaks"], default="lenet")
# training parameters
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=2)
parser.add_argument("--optimizer", default="sgd,lr=0.001,momentum=0.9")
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--aug", type=bool_flag, default=False)
parser.add_argument("--in_channels", type=int, default=3)
parser.add_argument("--private_train_split", type=float, default=0.25)
parser.add_argument("--private_heldout_split", type=float, default=0.25)
# privacy parameters
parser.add_argument("--private", type=bool_flag, default=False)
parser.add_argument("--noise_multiplier", type=float, default=None)
parser.add_argument("--privacy_epsilon", type=float, default=None)
parser.add_argument("--privacy_delta", type=float, default=None)
parser.add_argument("--log_gradients", type=bool_flag, default=False)
parser.add_argument("--log_batch_models", type=bool_flag, default=False)
parser.add_argument("--log_epoch_models", type=bool_flag, default=False)
parser.add_argument("--max_grad_norm", type=float, default=1.0)
#multi gpu paramaeters
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--master_port", type=int, default=-1)
parser.add_argument("--debug_slurm", type=bool_flag, default=False)
return parser
def train(params, mask):
# Create logger and print params
logger = create_logger(params)
# initialize the multi-GPU / multi-node training
init_distributed_mode(params)
if params.is_slurm_job:
init_signal_handler()
trainloader, n_data = get_dataset(params=params, is_train=True, mask=mask)
validloader, _ = get_dataset(params=params, is_train=False)
model = build_model(params)
model.cuda()
if params.multi_gpu:
if params.private:
raise NotImplementedError('Distributed training not implemented with privacy')
else:
print('Using multi gpu')
model = nn.parallel.DistributedDataParallel(model, device_ids=[params.local_rank], output_device=params.local_rank, broadcast_buffers=True)
trainer = Trainer(model, params, n_data=n_data)
trainer.reload_checkpoint()
evaluator = Evaluator(model, params)
# evaluation
# if params.eval_only:
# scores = evaluator.run_all_evals(trainer, evals=['classif'], data_loader=validloader)
# for k, v in scores.items():
# logger.info('%s -> %.6f' % (k, v))
# logger.info("__log__:%s" % json.dumps(scores))
# exit()
# training
for epoch in range(trainer.epoch, params.epochs):
# update epoch / sampler / learning rate
trainer.epoch = epoch
logger.info("============ Starting epoch %i ... ============" % trainer.epoch)
# train
for (idx, images, targets) in trainloader:
trainer.classif_step(idx, images, targets)
trainer.end_step()
logger.info("============ End of epoch %i ============" % trainer.epoch)
# evaluate classification accuracy
scores = evaluator.run_all_evals(evals=['classif'], data_loader=validloader)
for name, val in trainer.get_scores().items():
scores[name] = val
# print / JSON log
for k, v in scores.items():
logger.info('%s -> %.6f' % (k, v))
logger.info("__log__:%s" % json.dumps(scores))
# end of epoch
trainer.end_epoch(scores)
return model
if __name__ == '__main__':
parser = get_parser()
params = parser.parse_args()
check_parameters(params)
if params.dataset=='imagenet':
n_data=1281167
elif params.dataset=='credit':
n_data=800
elif params.dataset=='hep':
n_data=124
elif params.dataset=='adult':
n_data=32561
elif params.dataset=='mnist':
n_data=60000
elif params.dataset=='lfw':
#need to do the split here and fill this in
n_data=912
else:
n_data=50000
if params.mask_path=="none":
split_config = {"public": {"train": 0.25,"heldout": 0.25}, "private": {"train": params.private_train_split,"heldout": params.private_heldout_split}}
# Randomly split the data according to the configuration
known_masks, hidden_masks = generate_masks(n_data, split_config)
path = "data/"
torch.save(known_masks['public'], path + "public.pth")
torch.save(known_masks['private'], path + "private.pth")
torch.save(hidden_masks['private']['train'], path + "hidden/train.pth")
torch.save(hidden_masks['private']['heldout'], path + "hidden/heldout.pth")
torch.save(hidden_masks['public']['train'], path + "hidden/public_train.pth")
torch.save(hidden_masks['public']['heldout'], path + "hidden/public_heldout.pth")
mask=hidden_masks['private']['train']
else:
mask = torch.load(params.mask_path)
train(params, mask)
|
calibration_membership-main
|
training/image_classification.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import os
from models import build_model
from datasets import get_dataset
from utils.trainer import Trainer
from utils.logger import create_logger
from utils.misc import bool_flag
def check_parameters(params):
if params.private:
assert params.privacy_epsilon is not None
assert params.dump_path is not None
os.makedirs(params.dump_path, exist_ok=True)
def get_parser():
"""
Generate a parameters parser.
"""
parser = argparse.ArgumentParser(description='Train/evaluate a language model')
# Config parameters
parser.add_argument("--dump_path", type=str, default=None)
parser.add_argument('--print_freq', type=int, default=5)
parser.add_argument("--save_periodic", type=int, default=0)
# Data parameters
parser.add_argument("--data_root", type=str, default="data")
parser.add_argument("--dataset", choices=["dummy"], default='dummy')
parser.add_argument("--n_vocab", type=int, default=256)
# Model parameters
parser.add_argument("--architecture", type=str, default='lstm')
parser.add_argument("--embedding_dim", type=int, default=64)
parser.add_argument("--hidden_dim", type=int, default=64)
parser.add_argument("--num_layers", type=int, default=1)
# Training parameters
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--epochs", type=int, default=2)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--optimizer", default="sgd,lr=0.001,momentum=0.9")
parser.add_argument("--seq_len", type=int, default=256)
# Privacy parameters
parser.add_argument("--private", type=bool_flag, default=False)
parser.add_argument("--noise_multiplier", type=float, default=None)
parser.add_argument("--privacy_epsilon", type=float, default=None)
parser.add_argument("--privacy_delta", type=float, default=None)
parser.add_argument("--privacy_fake_samples", type=int, default=None)
parser.add_argument("--log_gradients", type=bool_flag, default=False)
return parser
def main(params):
# Create logger and print params (very useful for debugging)
logger = create_logger(params)
trainloader, n_data = get_dataset(params, split='train', is_train=True)
validloader, _ = get_dataset(params, split='valid', is_train=False)
model = build_model(params)
model.cuda()
trainer = Trainer(model, params, n_data=n_data)
trainer.reload_checkpoint()
# evaluator = Evaluator(trainer, params)
# evaluation
# if params.eval_only:
# scores = evaluator.run_all_evals(trainer, evals=['classif'], data_loader=validloader)
# for k, v in scores.items():
# logger.info('%s -> %.6f' % (k, v))
# logger.info("__log__:%s" % json.dumps(scores))
# exit()
# training
for epoch in range(trainer.epoch, params.epochs):
# update epoch / sampler / learning rate
trainer.epoch = epoch
logger.info("============ Starting epoch %i ... ============" % trainer.epoch)
# train
for (idx, sentence) in trainloader:
trainer.lm_step(idx, sentence)
trainer.end_step()
logger.info("============ End of epoch %i ============" % trainer.epoch)
# evaluate classification accuracy
# scores = evaluator.run_all_evals(trainer, evals=['classif'], data_loader=validloader)
scores = {}
for name, val in trainer.get_scores().items():
scores[name] = val
# print / JSON log
for k, v in scores.items():
logger.info('%s -> %.6f' % (k, v))
logger.info("__log__:%s" % json.dumps(scores))
# end of epoch
trainer.end_epoch(scores)
if __name__ == '__main__':
parser = get_parser()
params = parser.parse_args()
check_parameters(params)
main(params)
|
calibration_membership-main
|
training/language_modeling.py
|
calibration_membership-main
|
training/__init__.py
|
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, TensorDataset
import numpy as np
from numpy.random import multivariate_normal
from sklearn.datasets import fetch_openml
from sklearn.impute import SimpleImputer
from sklearn import preprocessing
import pandas as pd
from sklearn.datasets import fetch_lfw_people
from .text_data import TextIterator
class IdxDataset(Dataset):
"""
Wraps a dataset so that with each element is also returned its index
"""
def __init__(self, dataset: Dataset):
self.dataset = dataset
def __getitem__(self, i: int):
sample = self.dataset[i]
if type(sample) is tuple:
sample = list(sample)
sample.insert(0, i)
return tuple(sample)
else:
return i, sample
def __len__(self):
return len(self.dataset)
class MaskDataset(Dataset):
def __init__(self, dataset: Dataset, mask: torch.Tensor):
"""
example:
mask: [0, 1, 1]
cumul: [-1, 0, 1]
remap: {0: 1, 1: 2}
"""
assert mask.dim() == 1
assert mask.size(0) == len(dataset)
assert mask.dtype == torch.bool
mask = mask.long()
cumul = torch.cumsum(mask, dim=0) - 1
self.remap = {}
for i in range(mask.size(0)):
if mask[i] == 1:
self.remap[cumul[i].item()] = i
assert mask[i] in [0, 1]
self.dataset = dataset
self.mask = mask
self.length = cumul[-1].item() + 1
def __getitem__(self, i: int):
return self.dataset[self.remap[i]]
def __len__(self):
return self.length
def adult_data_transform(df):
binary_data = pd.get_dummies(df)
feature_cols = binary_data[binary_data.columns[:-2]]
scaler = preprocessing.StandardScaler()
data = pd.DataFrame(scaler.fit_transform(feature_cols), columns=feature_cols.columns)
return data
def get_transform(dataset, aug, is_train):
if dataset == "cifar10":
if aug and is_train:
print('Using data augmentation to train model')
augmentations = [transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation to train model')
transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
elif dataset=='mnist':
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])
elif dataset=='imagenet':
if aug and is_train:
print('Using data augmentation to train model')
augmentations = [transforms.Resize(256),transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation to train model')
transform = transforms.Compose( [transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
elif dataset=='cifar100':
if aug and is_train:
print('Using data augmentation to train model')
augmentations = [transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize(mean=[n/255 for n in [129.3, 124.1, 112.4]], std=[n/255 for n in [68.2, 65.4, 70.4]])]
transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation to train model')
transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean=[n/255 for n in [129.3, 124.1, 112.4]], std=[n/255 for n in [68.2, 65.4, 70.4]])])
return transform
def get_dataset(*, params, is_train, mask=None):
if is_train:
assert mask is not None
if params.dataset == "cifar10":
if is_train:
transform = get_transform(params.dataset, params.aug, True)
else:
transform = get_transform(params.dataset, params.aug, False)
dataset = torchvision.datasets.CIFAR10(root=params.data_root, train=is_train, download=True, transform=transform)
dataset = IdxDataset(dataset)
if mask is not None:
dataset = MaskDataset(dataset, mask)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers)
n_data = len(dataset)
params.num_classes = 10
return dataloader, n_data
elif params.dataset=="imagenet":
if is_train:
transform = get_transform(params.dataset, params.aug, True)
else:
transform = get_transform(params.dataset, params.aug, False)
if is_train:
dataset = torchvision.datasets.ImageFolder(root=params.data_root+'/train',transform=transform)
else:
dataset = torchvision.datasets.ImageFolder(root=params.data_root+'/val',transform=transform)
dataset = IdxDataset(dataset)
if mask is not None:
dataset = MaskDataset(dataset, mask)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers)
n_data = len(dataset)
params.num_classes = 1000
return dataloader, n_data
elif params.dataset=='cifar100':
if is_train:
transform = get_transform(params.dataset, params.aug, True)
else:
transform = get_transform(params.dataset, params.aug, False)
dataset = torchvision.datasets.CIFAR100(root=params.data_root, train=is_train, download=True, transform=transform)
dataset = IdxDataset(dataset)
if mask is not None:
dataset = MaskDataset(dataset, mask)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers)
n_data = len(dataset)
params.num_classes = 100
return dataloader, n_data
elif params.dataset=='mnist':
transform = get_transform(params.dataset, params.aug, True)
dataset = torchvision.datasets.MNIST(root=params.data_root, train=is_train, download=True, transform=transform)
dataset = IdxDataset(dataset)
if mask is not None:
dataset = MaskDataset(dataset, mask)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers)
n_data = len(dataset)
params.num_classes = 10
return dataloader, n_data
elif params.dataset=='gaussian':
x,y=get_gaussian_dataset(params.n_data,params.num_classes,params.data_num_dimensions,params.random_seed,scale=params.scale)
data = []
for i in range(len(x)):
data.append([i,x[i], y[i]])
dataloader = torch.utils.data.DataLoader(data, shuffle=True, batch_size=params.batch_size)
return dataloader, params.n_data
elif params.dataset=='credit':
cred=fetch_openml('credit-g')
data = SimpleImputer(missing_values=np.nan, strategy='mean', copy=True).fit(cred.data).transform(cred.data)
target = preprocessing.LabelEncoder().fit(cred.target).transform(cred.target)
X=data
norm = np.max(np.concatenate((-1*X.min(axis=0)[np.newaxis], X.max(axis=0)[np.newaxis]),axis=0).T, axis=1).astype('float32')
data=np.divide(data,norm)
data=torch.tensor(data).float()
target=torch.tensor(target).long()
if is_train:
ids=np.arange(1000)[:800]
else:
ids=np.arange(1000)[800:]
final_data = []
for i in ids:
final_data.append([i,data[i], target[i]])
norm=np.max
params.num_classes = 2
if mask is not None:
final_data = MaskDataset(final_data, mask)
dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
n_data=len(final_data)
print('Datasize', n_data)
return dataloader, n_data
elif params.dataset=='hep':
hep=fetch_openml('hepatitis')
data = SimpleImputer(missing_values=np.nan, strategy='mean', copy=True).fit(hep.data).transform(hep.data)
target = preprocessing.LabelEncoder().fit(hep.target).transform(hep.target)
X=data
norm = np.max(np.concatenate((-1*X.min(axis=0)[np.newaxis], X.max(axis=0)[np.newaxis]),axis=0).T, axis=1).astype('float32')
data=np.divide(data,norm)
data=torch.tensor(data).float()
target=torch.tensor(target).long()
if is_train:
ids=np.arange(155)[:124]
else:
ids=np.arange(155)[124:]
final_data = []
for i in ids:
final_data.append([i,data[i], target[i]])
params.num_classes = 2
if mask is not None:
final_data = MaskDataset(final_data, mask)
dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
n_data=len(final_data)
print('Datasize', n_data)
return dataloader, n_data
elif params.dataset == 'adult':
columns = ["age", "workClass", "fnlwgt", "education", "education-num","marital-status", "occupation", "relationship","race", "sex", "capital-gain", "capital-loss", "hours-per-week", "native-country", "income"]
train_data = pd.read_csv(params.data_root+'/adult.data', names=columns, sep=' *, *', na_values='?')
test_data = pd.read_csv(params.data_root+'/adult.test', names=columns, sep=' *, *', skiprows=1, na_values='?')
original_train=train_data
original_test=test_data
num_train = len(original_train)
original = pd.concat([original_train, original_test])
labels = original['income']
labels = labels.replace('<=50K', 0).replace('>50K', 1)
labels = labels.replace('<=50K.', 0).replace('>50K.', 1)
# Remove target
del original["income"]
data = adult_data_transform(original)
train_data = data[:num_train]
train_labels = labels[:num_train]
test_data = data[num_train:]
test_labels = labels[num_train:]
test_data=torch.tensor(test_data.to_numpy()).float()
train_data=torch.tensor(train_data.to_numpy()).float()
test_labels=torch.tensor(test_labels.to_numpy(dtype='int64')).long()
train_labels=torch.tensor(train_labels.to_numpy(dtype='int64')).long()
if is_train:
final_data = []
for i in np.arange(len(train_data)):
final_data.append([i,train_data[i], train_labels[i]])
if mask is not None:
final_data = MaskDataset(final_data, mask)
dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
n_data=len(final_data)
else:
final_data = []
for i in np.arange(len(test_data)):
final_data.append([i,test_data[i], test_labels[i]])
dataloader = torch.utils.data.DataLoader(final_data, batch_size=params.batch_size)
n_data=len(final_data)
print('Datasize', n_data)
return dataloader,n_data
elif params.dataset == 'lfw':
lfw_people = fetch_lfw_people(data_home=params.data_root,min_faces_per_person=100, resize=0.4)
n_samples, h, w = lfw_people.images.shape
lfw_images=torch.tensor(lfw_people.images).float()
lfw_targets=torch.tensor(lfw_people.target).long()
if is_train:
ids=np.arange(1140)[:912]
else:
ids=np.arange(1140)[912:]
final_data = []
for i in ids:
image=lfw_images[i].reshape((h, w)).unsqueeze(0)
target=lfw_targets[i]
final_data.append([i,image, target])
params.num_classes = 5
if mask is not None:
final_data = MaskDataset(final_data, mask)
dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
n_data=len(final_data)
return dataloader, n_data
elif params.dataset == "dummy":
# Creates a dummy dataset for NLP
n_data, delta = 10000, 3
data = torch.randint(-delta, delta, size=(n_data, params.seq_len))
data = torch.cumsum(data, dim=1)
data = torch.remainder(data, params.n_vocab)
iterator = TextIterator(data.view(-1), params.batch_size, params.seq_len)
return iterator, n_data
def get_gaussian_dataset(n,num_classes,num_dimensions,random_seed,scale=1):
np.random.seed(random_seed)
mu = [(2*np.random.rand(num_dimensions) - 1) * scale for c in range(num_classes)]
S = np.diag(np.random.rand(num_dimensions)) + 0.5
np.random.seed(np.random.randint(1000))
x = np.concatenate([multivariate_normal(mu[c], S, n//num_classes) for c in range(num_classes)])
y = np.concatenate([np.ones(n//num_classes) * c for c in range(num_classes)])
x=torch.tensor(x).float()
y=torch.tensor(y).long()
return x, y
|
calibration_membership-main
|
datasets/__init__.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class TextIterator:
def __init__(self, sequence, batch_size, seq_len):
assert sequence.ndim == 1
self.batch_size = batch_size
self.sequence = sequence.view(seq_len, -1)
self.i_batch = 0
def __iter__(self):
self.i_batch = 0
return self
def __next__(self):
if (self.i_batch + 1) * self.batch_size < self.sequence.size(1):
start = self.i_batch * self.batch_size
end = (self.i_batch + 1) * self.batch_size
self.i_batch += 1
return torch.arange(start, end), self.sequence[:, start:end]
else:
raise StopIteration
|
calibration_membership-main
|
datasets/text_data.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
# Taken from https://github.com/facebookresearch/XLM
import argparse
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
|
calibration_membership-main
|
utils/misc.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from datetime import timedelta
import logging
import re
import sys
import time
class LogFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime('%x %X'),
timedelta(seconds=elapsed_seconds)
)
message = record.getMessage()
message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3))
return "%s - %s" % (prefix, message) if message else ''
def create_logger(params):
"""
Create a logger and print params
"""
# create log formatter
log_formatter = LogFormatter()
# create console handler and set level to info
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
# create logger and set level to debug
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.propagate = False
logger.addHandler(console_handler)
# reset logger elapsed time
def reset_time():
log_formatter.start_time = time.time()
logger.reset_time = reset_time
# get running command
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith('--'):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match('^[a-zA-Z0-9_]+$', x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
params.command = ' '.join(command)
logger.info("============ Initialized logger ============")
logger.info("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(params)).items())))
logger.info("The experiment will be stored in %s\n" % params.dump_path)
logger.info("Running command: %s" % params.command)
return logger
|
calibration_membership-main
|
utils/logger.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
import operator
def to_mask(n_data, indices):
mask = torch.zeros(n_data, dtype=bool)
mask[indices] = 1
return mask
def multiply_round(n_data, cfg):
s_total = sum(cfg.values())
sizes = {name: int(s * n_data / s_total) for name, s in cfg.items()}
max_name = max(sizes.items(), key=operator.itemgetter(1))[0]
sizes[max_name] += n_data - sum(sizes.values())
return sizes
def generate_masks(n_data, split_config):
assert type(split_config) is dict
assert "public" in split_config and "private" in split_config
assert type(split_config["private"]) is dict
permutation = np.random.permutation(n_data)
if type(split_config["public"]) is dict:
n_public=int(sum(split_config["public"].values())*n_data)
else:
n_public = int(split_config["public"] * n_data)
n_private = n_data - n_public
known_masks = {}
known_masks["public"] = to_mask(n_data, permutation[:n_public])
known_masks["private"] = to_mask(n_data, permutation[n_public:])
hidden_masks = {}
hidden_masks["private"] = {}
sizes = multiply_round(n_private, split_config["private"])
print(' Private', sizes)
offset = n_public
for name, size in sizes.items():
hidden_masks["private"][name] = to_mask(n_data, permutation[offset:offset+size])
offset += size
assert offset == n_data
if type(split_config["public"]) is dict:
hidden_masks["public"] = {}
public_sizes = multiply_round(n_public, split_config["public"])
print('Public', public_sizes)
public_offset = 0
for name, size in public_sizes.items():
hidden_masks["public"][name] = to_mask(n_data, permutation[public_offset:public_offset+size])
public_offset += size
assert public_offset == n_public
else:
hidden_masks["public"] = known_masks["public"]
return known_masks, hidden_masks
def evaluate_masks(guessed_membership, private_masks, threshold, attack_base=None):
if attack_base=='loss' or attack_base=='mean':
true_positives = (guessed_membership[private_masks["train"]] <= threshold).float()
false_negatives= (guessed_membership[private_masks["train"]] > threshold).float()
true_negatives = (guessed_membership[private_masks["heldout"]] > threshold).float()
false_positives = (guessed_membership[private_masks["heldout"]] <= threshold).float()
else:
true_positives = (guessed_membership[private_masks["train"]] >= threshold).float()
false_negatives = (guessed_membership[private_masks["train"]] < threshold).float()
true_negatives = (guessed_membership[private_masks["heldout"]] < threshold).float()
false_positives = (guessed_membership[private_masks["heldout"]] >= threshold).float()
fpr=torch.sum(false_positives) / (torch.sum(false_positives) + torch.sum(true_negatives))
recall = torch.sum(true_positives) / torch.sum(private_masks["train"].float())
precision = torch.sum(true_positives) / (torch.sum(true_positives) + torch.sum(false_positives))
accuracy = (torch.sum(true_positives) + torch.sum(true_negatives)) / (torch.sum(private_masks["heldout"].float()) + torch.sum(private_masks["train"].float()))
return fpr, precision, recall, accuracy
|
calibration_membership-main
|
utils/masks.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import inspect
import json
import itertools
from torch import optim
import numpy as np
from logging import getLogger
from opacus import PrivacyEngine
import opacus.privacy_analysis as privacy_analysis
logger = getLogger()
def repeat(l, r):
"""
Repeat r times each value of list l.
"""
return list(itertools.chain.from_iterable(itertools.repeat(x, r) for x in l))
def repeat_to(l, r):
"""
Repeat values in list l so that it has r values
"""
assert r % len(l) == 0
return repeat(l, r // len(l))
def get_optimizer(parameters, opt_config, epochs):
"""
Parse optimizer parameters.
opt_config should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
lr_schedule = None
if "," in opt_config:
method = opt_config[:opt_config.find(',')]
optim_params = {}
for x in opt_config[opt_config.find(',') + 1:].split(','):
# e.g. split = ('lr', '0.1-0.01) or split = ('weight_decay', 0.001)
split = x.split('=')
assert len(split) == 2
param_name, param_value = split
assert any([
re.match(r"^[+-]?(\d+(\.\d*)?|\.\d+)$", param_value) is not None,
param_name == "lr" and re.match(r"^[+-]?(\d+(\.\d*)?|\.\d+)$", param_value) is not None,
param_name == "lr" and ("-" in param_value),
param_name == "lr" and re.match(r"^cos:[+-]?(\d+(\.\d*)?|\.\d+)$", param_value) is not None
])
if param_name == "lr":
if param_value.startswith("cos:"):
lr_init = float(param_value[4:])
lr_schedule = [lr_init * (1 + np.cos(np.pi * epoch / epochs)) / 2 for epoch in range(epochs)]
else:
lr_schedule = [float(lr) for lr in param_value.split("-")]
optim_params[param_name] = float(lr_schedule[0])
lr_schedule = repeat_to(lr_schedule, epochs)
else:
optim_params[param_name] = float(param_value)
else:
method = opt_config
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
logger.info("Schedule of %s: %s" % (opt_config, str(lr_schedule)))
return optim_fn(parameters, **optim_params), lr_schedule
PRIVACY_ALPHAS = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))
def getNoiseMultiplier(epsilon, delta, q, steps):
sigma_min, sigma_max = 0.01, 10
while sigma_max - sigma_min > 0.01:
sigma = (sigma_min + sigma_max) / 2
rdp = privacy_analysis.compute_rdp(q, sigma, steps, PRIVACY_ALPHAS)
eps = privacy_analysis.get_privacy_spent(PRIVACY_ALPHAS, rdp, delta)[0]
if eps < epsilon:
sigma_max = sigma
else:
sigma_min = sigma
logger.info(f"Inferred σ={sigma} for ε={epsilon}, δ={delta}")
logger.info("__log__:%s" % json.dumps({"noise_multiplier": sigma}))
return sigma
def create_privacy_engine(model, params, n_data):
if params.private:
if params.noise_multiplier is None:
_n_data = n_data# if params.privacy_fake_samples is None else params.privacy_fake_samples
sample_rate = params.batch_size / _n_data
steps = params.epochs * _n_data / params.batch_size
params.noise_multiplier = getNoiseMultiplier(params.privacy_epsilon, params.privacy_delta, sample_rate, steps)
if params.max_grad_norm == "mean":
max_grad_norm = 1.0
else:
max_grad_norm = params.max_grad_norm
else:
max_grad_norm = float("inf")
params.noise_multiplier = 0
if params.private or params.log_gradients:
if params.log_gradients and not params.private:
logger.info("Creating privacy engine to compute per sample gradients and log them.")
privacy_engine = PrivacyEngine(
model,
batch_size=params.batch_size,
sample_size=n_data,
alphas=PRIVACY_ALPHAS,
noise_multiplier=params.noise_multiplier,
max_grad_norm=max_grad_norm
)
else:
privacy_engine = None
return privacy_engine
|
calibration_membership-main
|
utils/optimizer.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import OrderedDict
import functools
import os
import time
import numpy as np
import torch
from torch.nn import functional as F
from logging import getLogger
from .optimizer import get_optimizer, create_privacy_engine
from utils.evaluator import accuracy
logger = getLogger()
def log_grad(trainer, param_name, *args, **kwargs):
if param_name is not None:
g = kwargs['per_sample_grad']
trainer.current_grad_sample.append(g.view(g.size(0), -1).clone())
else:
trainer.current_grad_sample = torch.cat(trainer.current_grad_sample, dim=1)
class Trainer:
def __init__(self, model, params, n_data=-1):
# model / params
self.model = model
self.params = params
# set optimizers
self.n_data = n_data
if params.private and params.privacy_delta is None:
params.privacy_delta = 1 / n_data
print(f"Setting privacy delta to {params.privacy_delta}")
self.privacy_engine = create_privacy_engine(model, params, n_data=n_data)
self.optimizer, self.schedule = get_optimizer(model.parameters(), params.optimizer, params.epochs)
if self.privacy_engine is not None:
self.privacy_engine.attach(self.optimizer)
if params.log_gradients:
self.privacy_engine.clipper.set_on_batch_clip_func(functools.partial(log_grad, self))
self.current_grad_sample = []
self.all_grad_samples = None
# training statistics
self.epoch = 0
self.batch=0
self.indices = []
self.n_iter = 0
self.step = 0
self.stats = OrderedDict(
[('processed_i', 0)] +
[('train_loss', [])] +
[('time', [])]+[('train_acc', [])]
)
self.last_time = time.time()
def update_learning_rate(self):
"""
Sets the learning rate to follow the learning schedule
"""
if self.schedule is None:
return
lr = self.schedule[self.epoch]
logger.info("New learning rate for %f" % lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def end_step(self):
self.n_iter += 1
self.step += 1
self.print_stats()
def print_stats(self):
"""
Prints statistics about the training.
Statistics are computed on batches since the last print.
(i.e. if printing every 5 batches then it shows speed on the last 5 batches)
"""
if self.n_iter % self.params.print_freq != 0:
return
s_iter = f"Batch {self.n_iter} - "
s_stat = ''
s_stat += ' || '.join([
'{}: {:7.4f}'.format(k, np.mean(v[-self.params.print_freq:])) for k, v in self.stats.items()
if type(v) is list and len(v) > 0
])
# learning rates
s_lr = ""
s_lr = s_lr + (" - LR: ") + " / ".join("{:.4e}".format(group['lr']) for group in self.optimizer.param_groups)
# processing speed
new_time = time.time()
diff = new_time - self.last_time
s_speed = "{:7.2f} images/s - ".format(self.stats['processed_i'] * 1.0 / diff)
self.stats['processed_i'] = 0
self.last_time = new_time
# log speed + stats + learning rate
logger.info(s_iter + s_speed + s_stat + s_lr)
def save(self, name):
"""
Save the model.
"""
path = os.path.join(self.params.dump_path, name)
state_dict = self.state_dict()
logger.info("Saving model to %s ..." % path)
torch.save(state_dict, path)
def save_interim_model(self, name, idx):
"""
Save the model.
"""
path = os.path.join(self.params.dump_path, name)
state_dict = self.state_dict()
logger.info("Saving model and batch ids to %s ..." % path)
torch.save([state_dict, idx], path)
def state_dict(self):
r"""
Returns state_dict, i.e. model parameters as well as general parameters
"""
model = self.model
data = {
'model': model.state_dict(),
'epoch': self.epoch,
'params': vars(self.params)
}
data['optimizer'] = self.optimizer.state_dict()
if self.params.private:
data['privacy_engine'] = self.privacy_engine.state_dict()
if self.params.log_gradients:
data['gradients'] = self.all_grad_samples
return data
def reload_checkpoint(self):
"""
Reload a checkpoint if we find one.
"""
checkpoint_path = os.path.join(self.params.dump_path, "checkpoint.pth")
if not os.path.isfile(checkpoint_path):
return
logger.warning('Reloading checkpoint from %s ...' % checkpoint_path)
device='cuda:'+str(torch.cuda.current_device())
state_dict = torch.load(checkpoint_path, map_location=device)
new_state_dict = OrderedDict()
for k, v in state_dict["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
self.model.load_state_dict(new_state_dict)
# else:
# new_model.load_state_dict(state_dict_new['model'])
# self.model.load_state_dict(state_dict['model'], strict=False)
if self.params.private:
self.privacy_engine.load_state_dict(state_dict['privacy_engine'])
# reload optimizer
self.optimizer.load_state_dict(state_dict['optimizer'])
# reload stats
self.epoch = state_dict['epoch'] + 1
logger.warning('Checkpoint reloaded. Resuming at epoch %i ...' % self.epoch)
def end_epoch(self, scores):
# Update learning rate
self.update_learning_rate()
# Reset statistics
for k in self.stats.keys():
if type(self.stats[k]) is list:
del self.stats[k][:]
self.epoch += 1
self.batch = 0
# Save checkpoints
self.save("checkpoint.pth")
if self.params.save_periodic > 0 and self.epoch % self.params.save_periodic == 0:
self.save("periodic-%d.pth" % self.epoch)
self.all_grad_samples = None
def maybe_log_gradients(self, idx):
# Log per sample gradient
if self.params.log_gradients:
if self.all_grad_samples is None:
self.all_grad_samples = torch.zeros(self.n_data, self.current_grad_sample.size(1), dtype=self.current_grad_sample.dtype, device=torch.device('cpu'))
self.all_grad_samples[idx] = self.current_grad_sample.cpu()
self.current_grad_sample = []
def maybe_log_model(self, idx):
#log model before gradient updates and minibatch ids
if self.params.log_batch_models:
#save model
model_file_name='checkpoint_epoch_'+str(self.epoch)+'_batch_'+ str(self.batch)+'.pth'
self.save_interim_model(model_file_name, idx)
#save ids
def maybe_log_epoch_model(self, idx):
#log model before gradient updates and minibatch ids
if self.params.log_epoch_models:
#save model
model_file_name='checkpoint_epoch_'+str(self.epoch)+'.pth'
self.save_interim_model(model_file_name, idx)
def lm_step(self, idx, sentence):
"""
Language modeling step.
"""
start = time.time()
self.model.train()
sentence = sentence.cuda(non_blocking=True)
# Forward + loss
output = self.model(sentence[:, 1:])
loss = F.cross_entropy(output.view(-1, output.size(-1)), sentence[:, :-1].reshape(-1), reduction='mean')
# Gradient step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.maybe_log_gradients(idx)
self.batch+=1
# statistics
self.stats['processed_i'] += self.params.batch_size
self.stats['train_loss'].append(loss.item())
self.stats['time'].append(time.time() - start)
def classif_step(self, idx, images, targets):
"""
Classification step.
"""
start = time.time()
self.maybe_log_model(idx)
if self.batch==0:
self.maybe_log_epoch_model(idx)
self.model.train()
images = images.cuda(non_blocking=True)
# Forward + loss
output = self.model(images)
loss = F.cross_entropy(output, targets.cuda(non_blocking=True), reduction='mean')
# Gradient step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.maybe_log_gradients(idx)
self.batch+=1
# statistics
self.stats['processed_i'] += self.params.batch_size
self.stats['train_loss'].append(loss.item())
self.stats['time'].append(time.time() - start)
self.stats['train_acc'].append(accuracy(output.cpu(), targets, topk=tuple([1]))[0])
def get_scores(self):
scores = {
"speed": self.params.batch_size / np.mean(self.stats['time']),
"learning_rate": self.schedule[self.epoch]
}
if self.params.private:
scores["privacy_epsilon"] = self.privacy_engine.get_privacy_spent(1 / self.n_data)[0]
for stat_name in self.stats.keys():
if type(self.stats[stat_name]) is list and len(self.stats[stat_name]) >= 1:
scores[stat_name] = np.mean(self.stats[stat_name])
return scores
|
calibration_membership-main
|
utils/trainer.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
from collections import OrderedDict
import numpy as np
import torch
from torch.nn import functional as F
logger = getLogger()
class Evaluator(object):
# TODO: get ridd of params and only give model in eval()
def __init__(self, model, params):
"""
Initialize evaluator.
"""
self.model = model
self.params = params
@torch.no_grad()
def run_all_evals(self, evals, data_loader, *args, **kwargs):
"""
Run all evaluations.
"""
assert type(evals) is list
scores = OrderedDict()
if evals is None or 'classif' in evals:
self.eval_classif(scores, data_loader)
return scores
def eval_classif(self, scores, data_loader):
"""
Evaluate classification.
"""
params = self.params
self.model.eval()
# stats
losses=[]
accuracies = []
topk = [1, 5, 10, 20, 50, 100, 200, 500]
topk = [k for k in topk if k <= params.num_classes]
for _, images, targets in data_loader:
images = images.cuda()
output = self.model(images)
loss = F.cross_entropy(output, targets.cuda(non_blocking=True), reduction='mean')
accuracies.append(accuracy(output.cpu(), targets, topk=tuple(topk)))
losses.append(loss.item())
# loss
scores['valid_loss']=np.mean(losses)
# accuracy
for i_k, k in enumerate(topk):
scores['valid_top%d_acc' % k] = np.mean([x[i_k] for x in accuracies])
def accuracy(output, target, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values of k.
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].float().sum()
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
|
calibration_membership-main
|
utils/evaluator.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class KLLeNet(nn.Module):
def __init__(self, params):
super().__init__()
self.conv1 = nn.Conv2d(params.in_channels, 20, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(20, 50, 5)
#for cifar it's 5x5
#for mnist it's 4x4
#for lfw it's 9x6
self.fc1 = nn.Linear(50 * 4 * 4, 500)
self.fc2 = nn.Linear(500, params.num_classes)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x=self.dropout1(x)
x = self.pool(F.relu(self.conv2(x)))
x=self.dropout1(x)
x = x.view(-1, 50 * 4 * 4)
x = F.relu(self.fc1(x))
x=self.dropout2(x)
x = self.fc2(x)
return x
|
calibration_membership-main
|
models/KLlenet.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class LinearNet(nn.Module):
def __init__(self, params):
super().__init__()
self.fc = nn.Linear(params.data_num_dimensions, params.num_classes)
def forward(self, x):
x = self.fc(x)
x=F.softmax(x)
return x
|
calibration_membership-main
|
models/linear.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
from .lenet import LeNet
from .KLlenet import KLLeNet
from .lstmlm import LSTMLM
from .alexnet import AlexNet
from .linear import LinearNet
from .mlp import MLP
import torchvision.models as models
def build_model(params):
if params.architecture == "lenet":
return LeNet(params)
elif params.architecture == "kllenet":
return KLLeNet(params)
elif params.architecture == "linear":
return LinearNet(params)
elif params.architecture == "mlp":
return MLP(params)
elif params.architecture=="alexnet":
return AlexNet(params)
elif params.architecture == "lstm":
return LSTMLM(params)
elif params.architecture == "resnet18":
return models.resnet18(pretrained=False)
elif params.architecture == "smallnet":
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, params.num_classes, bias=True),
)
elif params.architecture == "leaks":
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, params.num_classes, bias=True),
)
|
calibration_membership-main
|
models/__init__.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, params):
super().__init__()
self.fc = nn.Linear(params.data_num_dimensions, 2*params.data_num_dimensions)
self.fc2 = nn.Linear(2*params.data_num_dimensions, params.num_classes)
def forward(self, x):
x = F.relu(self.fc(x))
x = self.fc2(x)
x = F.softmax(x)
return x
|
calibration_membership-main
|
models/mlp.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self, params):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, params.num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
calibration_membership-main
|
models/lenet.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from opacus.layers import DPLSTM
import torch
import torch.nn as nn
class LSTMLM(nn.Module):
def __init__(self, params):
super().__init__()
self.embedding = nn.Embedding(params.n_vocab, params.embedding_dim)
assert not params.private
if params.private or params.log_gradients:
self.lstm = DPLSTM(input_size=params.embedding_dim, hidden_size=params.hidden_dim, num_layers=params.num_layers)
else:
self.lstm = nn.LSTM(input_size=params.embedding_dim, hidden_size=params.hidden_dim, num_layers=params.num_layers)
self.prediction = nn.Linear(params.embedding_dim, params.n_vocab)
def forward(self, x):
x = self.embedding(x)
output, (hn, cn) = self.lstm(x)
return self.prediction(output)
|
calibration_membership-main
|
models/lstmlm.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, params):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, params.num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
|
calibration_membership-main
|
models/alexnet.py
|
calibration_membership-main
|
attacks/__init__.py
|
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from posixpath import join
import sys
import inspect
import math
from random import randrange
import pickle
import copy
import numpy as np
import pandas as pd
import argparse
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
import sklearn.metrics as metrics
from sklearn.datasets import fetch_openml
from sklearn.impute import SimpleImputer
from sklearn import preprocessing
import torch
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
from torch.utils.data import Subset
from training.image_classification import train
from utils.masks import to_mask, evaluate_masks
from torch.nn import functional as F
from models import build_model
from utils.misc import bool_flag
from utils.masks import to_mask
from attacks.privacy_attacks import get_parser
from opacus.grad_sample import GradSampleModule
from cleverhans.torch.attacks.hop_skip_jump_attack import hop_skip_jump_attack
from cleverhans.torch.attacks.carlini_wagner_l2 import carlini_wagner_l2
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
def get_parser():
"""
Generate a parameters parser.
"""
parser = argparse.ArgumentParser(description='Privacy attack parameters')
# config parameters
parser.add_argument("--dump_path", type=str, default=None) # model saving location
parser.add_argument('--print_freq', type=int, default=50) # training printing frequency
parser.add_argument("--save_periodic", type=int, default=0) # training saving frequency
# attack parameters
parser.add_argument("--model_path", type=str, default="model") # path to the private model
parser.add_argument("--attack_type", type=str, default="loss") # type of auxiliary attack
parser.add_argument("--aux_epochs", type=int, default=20) # number of auxiliary training epochs
parser.add_argument("--num_aux", type=int, default=1) # number of auxiliary models
parser.add_argument("--aug_style", type=str, default="mean") # combination method for augmented data values
parser.add_argument("--aux_style", type=str, default="sum") # combination method for multiple aux. model values
parser.add_argument("--public_data", type=str, default="train") # specify which part of the public data to use for aux model training (e.g. train is the training mask, rand50 is a random selection of the public data)
parser.add_argument("--norm_type", type=str, default=None) # norm for gradient norm
parser.add_argument("--num_points", type=int, default=10) # number of points to use for the label-only attack
parser.add_argument("--clip_min", type=float, default=0) # minimum value for adversarial feature in label-only attack
parser.add_argument("--clip_max", type=float, default=1) # maximum value for adversarial feature in label-only attack
# Data parameters
parser.add_argument("--data_root", type=str, default="data") # path to the data
parser.add_argument("--dataset", type=str, choices=["cifar10", "imagenet", "cifar100", "gaussian","credit", "hep", "adult", "mnist", "lfw"], default="cifar10")
parser.add_argument("--mask_path", type=str, required=True) # path to the data mask
parser.add_argument('--n_data', type=int, default=500) # specify number of data points for gaussian data
parser.add_argument('--data_num_dimensions', type=int, default=75) # number of features for non-image data
parser.add_argument('--random_seed', type=int, default=10) # seed for gaussian data
parser.add_argument("--num_classes", type=int, default=10) # number of classes for classification task
parser.add_argument("--in_channels", type=int, default=3) # number of input channels for image data
# Model parameters
parser.add_argument("--architecture", choices=["lenet", "smallnet", "resnet18", "kllenet","linear", "mlp"], default="lenet")
# training parameters
parser.add_argument("--aug", type=bool_flag, default=False) # data augmentation flag
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--optimizer", default="sgd,lr=0.1,momentum=0.9")
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--log_gradients", type=bool_flag, default=False)
parser.add_argument("--log_batch_models", type=bool_flag, default=False) # save model for each batch of data
parser.add_argument("--log_epoch_models", type=bool_flag, default=False) # save model for each training epoch
# privacy parameters
parser.add_argument("--private", type=bool_flag, default=False) # privacy flag
parser.add_argument("--noise_multiplier", type=float, default=None)
parser.add_argument("--privacy_epsilon", type=float, default=None)
parser.add_argument("--privacy_delta", type=float, default=None)
parser.add_argument("--max_grad_norm", type=float, default=1.0)
#multi gpu paramaeters
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--master_port", type=int, default=-1)
parser.add_argument("--debug_slurm", type=bool_flag, default=False)
return parser
def adult_data_transform(df):
"""
transform adult data.
"""
binary_data = pd.get_dummies(df)
feature_cols = binary_data[binary_data.columns[:-2]]
scaler = preprocessing.StandardScaler()
data = pd.DataFrame(scaler.fit_transform(feature_cols), columns=feature_cols.columns)
return data
def get_dataset(params):
"""
load data for privacy attacks
"""
if params.dataset=='cifar10':
if params.aug==True:
print('Using data augmentation')
augmentations = [transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
model_transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation')
normalize = [transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
model_transform = transforms.Compose(normalize)
return torchvision.datasets.CIFAR10(root=params.data_root, train=True, download=True, transform=model_transform)
if params.dataset=='mnist':
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])
return torchvision.datasets.MNIST(root=params.data_root, train=True, download=True, transform=transform)
elif params.dataset=='imagenet':
if params.aug==True:
print('Using data augmentation to train model')
augmentations = [transforms.Resize(256),transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation to train model')
transform = transforms.Compose( [transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
dataset = torchvision.datasets.ImageFolder(root=params.data_root+'/train',transform=transform)
return dataset
elif params.dataset=='cifar100':
if params.aug:
augmentations = [transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize(mean=[n/255 for n in [129.3, 124.1, 112.4]], std=[n/255 for n in [68.2, 65.4, 70.4]])]
transform = transforms.Compose(augmentations + normalize)
else:
transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean=[n/255 for n in [129.3, 124.1, 112.4]], std=[n/255 for n in [68.2, 65.4, 70.4]])])
dataset = torchvision.datasets.CIFAR100(root=params.data_root, train=True, download=True, transform=transform)
return dataset
elif params.dataset=='credit':
cred=fetch_openml('credit-g')
data = SimpleImputer(missing_values=np.nan, strategy='mean', copy=True).fit(cred.data).transform(cred.data)
target = preprocessing.LabelEncoder().fit(cred.target).transform(cred.target)
X=data
norm = np.max(np.concatenate((-1*X.min(axis=0)[np.newaxis], X.max(axis=0)[np.newaxis]),axis=0).T, axis=1).astype('float32')
data=np.divide(data,norm)
data=torch.tensor(data).float()
target=torch.tensor(target).long()
ids=np.arange(1000)[:800]
final_data = []
for i in ids:
final_data.append([data[i], target[i]])
# norm=np.max
params.num_classes = 2
# dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
# n_data=len(final_data)
return final_data
elif params.dataset=='hep':
hep=fetch_openml('hepatitis')
data = SimpleImputer(missing_values=np.nan, strategy='mean', copy=True).fit(hep.data).transform(hep.data)
target = preprocessing.LabelEncoder().fit(hep.target).transform(hep.target)
X=data
norm = np.max(np.concatenate((-1*X.min(axis=0)[np.newaxis], X.max(axis=0)[np.newaxis]),axis=0).T, axis=1).astype('float32')
data=np.divide(data,norm)
data=torch.tensor(data).float()
target=torch.tensor(target).long()
ids=np.arange(155)[:124]
final_data = []
for i in ids:
final_data.append([data[i], target[i]])
params.num_classes = 2
return final_data
elif params.dataset == 'adult':
columns = ["age", "workClass", "fnlwgt", "education", "education-num","marital-status", "occupation", "relationship","race", "sex", "capital-gain", "capital-loss", "hours-per-week", "native-country", "income"]
train_data = pd.read_csv(params.data_root+'/adult.data', names=columns, sep=' *, *', na_values='?')
test_data = pd.read_csv(params.data_root+'/adult.test', names=columns, sep=' *, *', skiprows=1, na_values='?')
original_train=train_data
original_test=test_data
num_train = len(original_train)
original = pd.concat([original_train, original_test])
labels = original['income']
labels = labels.replace('<=50K', 0).replace('>50K', 1)
labels = labels.replace('<=50K.', 0).replace('>50K.', 1)
# Remove target
del original["income"]
data = adult_data_transform(original)
train_data = data[:num_train]
train_labels = labels[:num_train]
test_data = data[num_train:]
test_labels = labels[num_train:]
test_data=torch.tensor(test_data.to_numpy()).float()
train_data=torch.tensor(train_data.to_numpy()).float()
test_labels=torch.tensor(test_labels.to_numpy(dtype='int64')).long()
train_labels=torch.tensor(train_labels.to_numpy(dtype='int64')).long()
final_data = []
for i in np.arange(len(train_data)):
final_data.append([train_data[i], train_labels[i]])
return final_data
def get_uncalibrated_gradnorm(params, mask):
"""
return uncalibrated gradient norm values for data indicated by the mask.
"""
#load the dataset
dataset = get_dataset(params)
#initialize to 0
grad_norms=np.zeros(len(mask))
#get the final model
final_model=build_model(params)
final_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_final = torch.load(final_model_path, map_location='cuda:0')
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_final["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
final_model.load_state_dict(new_state_dict)
else:
final_model.load_state_dict(state_dict_final['model'])
final_model=final_model.cuda()
original_model=[]
for p in final_model.parameters():
original_model.append(p.view(-1))
original_model=torch.cat(original_model)
#get the appropriate ids to dot product
ids=(mask==True).nonzero().flatten().numpy()
#load 1-by-1. See get_calibrated_gradnorm for batched method using Opacus gradsamplemodule.
for id in ids:
#load each image and target
image = dataset[id][0].unsqueeze(0)
image = image.cuda(non_blocking=True)
target = torch.tensor(dataset[id][1]).unsqueeze(0)
target = target.cuda(non_blocking=True)
#reload the original batch model, if imagenet may need to rename keys.
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_final["model"].items():
if k[:7]=='module.': # remove "module.""
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
final_model.load_state_dict(new_state_dict)
else:
final_model.load_state_dict(state_dict_final['model'])
# check the model gradient is zeros
final_model.zero_grad()
#get the gradient
output=final_model(image)
loss=F.cross_entropy(output, target)
loss.backward()
grads=[]
for param in final_model.parameters():
grads.append(param.grad.view(-1))
grads = torch.cat(grads)
g=grads.cpu().numpy()
grad_norms[id]=np.linalg.norm(g)
return grad_norms
def get_calibrated_gradnorm(params, private_model, private_params, attack_model,attack_params, ids, mask, aug_style='mean',norm_type=None):
"""
return calibrated gradient norm values.
"""
#load the dataset
dataset = get_dataset(params)
#initialize to 0
grad_norms=np.zeros(len(mask))
if params.aug:
batch_vals=[[0] for i in np.arange(len(mask))]
for t in np.arange(10):
batched_ids=np.array_split(ids, 1000)
for b_ids in batched_ids:
image_data=torch.stack([dataset[i][0] for i in b_ids])
image_data=image_data.cuda()
target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
target_data=target_data.cuda()
private_model.zero_grad()
out_private=private_model(image_data)
loss_private=F.cross_entropy(out_private, target_data)
loss_private.backward()
attack_model.zero_grad()
out_attack=attack_model(image_data)
loss_attack=F.cross_entropy(out_attack, target_data)
loss_attack.backward()
for i,id in enumerate(b_ids):
private_grads=[]
for param in private_model.parameters():
private_grads.append(param.grad_sample[i].view(-1))
private_grads = torch.cat(private_grads)
attack_grads=[]
for param in attack_model.parameters():
attack_grads.append(param.grad_sample[i].view(-1))
attack_grads = torch.cat(attack_grads)
g_private=private_grads.cpu().numpy()
g_attack=attack_grads.cpu().numpy()
if norm_type=='inf':
batch_vals[id].append(max(g_private-g_attack))
else:
if norm_type=='1':
norm_type=1
elif norm_type=='2':
norm_type=2
elif norm_type=='3':
norm_type=3
batch_vals[id].append(np.linalg.norm(g_private, ord=norm_type)-np.linalg.norm(g_attack,ord=norm_type))
for id in ids:
if aug_style=='mean':
grad_norms[id]=np.mean(batch_vals[id][1:])
elif aug_style=='max':
grad_norms[id]=np.max(batch_vals[id][1:])
elif aug_style=='median':
grad_norms[id]=np.median(batch_vals[id][1:])
elif aug_style=='std':
grad_norms[id]=np.std(batch_vals[id][1:])
else:
batched_ids=np.array_split(ids, 1000)
for b_ids in batched_ids:
image_data=torch.stack([dataset[i][0] for i in b_ids])
image_data=image_data.cuda()
target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
target_data=target_data.cuda()
private_model.zero_grad()
out_private=private_model(image_data)
loss_private=F.cross_entropy(out_private, target_data)
loss_private.backward()
attack_model.zero_grad()
out_attack=attack_model(image_data)
loss_attack=F.cross_entropy(out_attack, target_data)
loss_attack.backward()
for i,id in enumerate(b_ids):
private_grads=[]
for param in private_model.parameters():
private_grads.append(param.grad_sample[i].view(-1))
private_grads = torch.cat(private_grads)
attack_grads=[]
for param in attack_model.parameters():
attack_grads.append(param.grad_sample[i].view(-1))
attack_grads = torch.cat(attack_grads)
g_private=private_grads.cpu().numpy()
g_attack=attack_grads.cpu().numpy()
if norm_type=='inf':
grad_norms[id]=max(g_private-g_attack)
else:
if norm_type=='1':
norm_type=1
elif norm_type=='2':
norm_type=2
elif norm_type=='3':
norm_type=3
grad_norms[id]=np.linalg.norm(g_private, ord=norm_type)-np.linalg.norm(g_attack,ord=norm_type)
return grad_norms
def calibrated_gradient_attack(params):
"""
run a calibrated gradient attack.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
if params.public_data=='train':
print('Using public training data for auxiliary model')
attack_model=train(params, hidden_masks['public']['train'])
elif params.public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(params.public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
attack_model=train(params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
attack_model=train(params, known_masks['public'])
attack_model=attack_model.cuda()
#get the attack model parameters
original_attack_model=[]
for p in attack_model.parameters():
original_attack_model.append(p.view(-1))
original_attack_model=torch.cat(original_attack_model)
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
original_private_model=[]
for p in private_model.parameters():
original_private_model.append(p.view(-1))
original_private_model=torch.cat(original_private_model)
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
# reload model to allow use of gradsamplemodule
new_model=build_model(params)
new_model_path = os.path.join(params.dump_path, "checkpoint.pth")
state_dict_new = torch.load(new_model_path)
new_model.load_state_dict(state_dict_new['model'])
new_model=new_model.cuda()
private_model=GradSampleModule(private_model)
attack_model=GradSampleModule(new_model)
train_dots=get_calibrated_gradnorm(params, private_model, original_private_model, attack_model,original_attack_model,private_train_ids,hidden_masks['private']['train'])
heldout_dots=get_calibrated_gradnorm(params, private_model, original_private_model, attack_model,original_attack_model,private_heldout_ids,hidden_masks['private']['heldout'])
return train_dots, heldout_dots
def get_calibrated_losses(params, private_model, attack_model, ids, mask, aug_style='mean'):
"""
return calibrated losses
"""
#load the dataset
dataset = get_dataset(params)
#initialize dot products to 0
losses=np.zeros(len(mask))
if params.aug:
summed_loss=[[0] for i in np.arange(len(mask))]
for j in np.arange(10):
print('aug',j)
batched_ids=np.array_split(ids, 1000)
for b_ids in batched_ids:
image_data=torch.stack([dataset[i][0] for i in b_ids])
image_data=image_data.cuda()
target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
target_data=target_data.cuda()
out_private=private_model(image_data)
out_attack=attack_model(image_data)
for i,id in enumerate(b_ids):
output=out_private[i].unsqueeze(0)
loss=F.cross_entropy(output, target_data[i].unsqueeze(0))
attack_output=out_attack[i].unsqueeze(0)
attack_loss=F.cross_entropy(attack_output, target_data[i].unsqueeze(0))
loss_diff=loss-attack_loss
summed_loss[id].append(loss_diff.cpu().detach().numpy())
for id in ids:
if aug_style=='mean':
losses[id]=np.mean(summed_loss[id][1:])
elif aug_style=='max':
losses[id]=np.max(summed_loss[id][1:])
elif aug_style=='median':
losses[id]=np.median(summed_loss[id][1:])
elif aug_style=='std':
losses[id]=np.std(summed_loss[id][1:])
else:
for id in ids:
#load each image and target
image = dataset[id][0].unsqueeze(0)
image = image.cuda(non_blocking=True)
target = torch.tensor(dataset[id][1]).unsqueeze(0)
target = target.cuda(non_blocking=True)
#get the loss
output=private_model(image)
loss=F.cross_entropy(output, target)
attack_output=attack_model(image)
attack_loss=F.cross_entropy(attack_output, target)
losses[id]=loss-attack_loss
return losses
def get_calibrated_confidences(params, private_model, attack_model, ids, mask, aug_style='mean'):
"""
return calibrated confidences.
"""
#load the dataset
dataset = get_dataset(params)
#initialize dot products to 0
confidences=np.zeros(len(mask))
if params.aug:
summed_confs=[[0] for i in np.arange(len(mask))]
for j in np.arange(10):
print('Aug', j)
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
log_softmax = torch.nn.LogSoftmax(dim=1)
output=private_model(images)
attack_output=attack_model(images)
log_output=log_softmax(output)
log_attack_output=log_softmax(attack_output)
private_confidences,_=torch.max(log_output,dim=1)
attack_confidences,_=torch.max(log_attack_output,dim=1)
confs=private_confidences-attack_confidences
confs=confs.cpu().detach().numpy()
for i,id in enumerate(ids):
summed_confs[id].append(confs[i])
for id in ids:
if aug_style=='mean':
confidences[id]=np.mean(summed_confs[id][1:])
elif aug_style=='max':
confidences[id]=np.max(summed_confs[id][1:])
elif aug_style=='median':
confidences[id]=np.median(summed_confs[id][1:])
elif aug_style=='std':
confidences[id]=np.std(summed_confs[id][1:])
else:
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
log_softmax = torch.nn.LogSoftmax(dim=1)
output=private_model(images)
attack_output=attack_model(images)
log_output=log_softmax(output)
log_attack_output=log_softmax(attack_output)
private_confidences,_=torch.max(log_output,dim=1)
attack_confidences,_=torch.max(log_attack_output,dim=1)
confidences=private_confidences-attack_confidences
return confidences
def calibrated_loss_attack(params):
"""
run a calibrated loss attack.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
if params.public_data=='train':
print('Using public training data for auxiliary model')
attack_model=train(params, hidden_masks['public']['train'])
elif params.public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(params.public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
attack_model=train(params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
attack_model=train(params, known_masks['public'])
attack_model=attack_model.cuda()
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
train_losses=get_calibrated_losses(params, private_model, attack_model,private_train_ids,hidden_masks['private']['train'])
heldout_losses=get_calibrated_losses(params, private_model, attack_model,private_heldout_ids,hidden_masks['private']['heldout'])
return train_losses, heldout_losses
def calibrated_confidence_attack(params):
"""
run a calibrated confidence attack.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
if params.public_data=='train':
print('Using public training data for auxiliary model')
attack_model=train(params, hidden_masks['public']['train'])
elif params.public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(params.public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
attack_model=train(params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
attack_model=train(params, known_masks['public'])
attack_model=attack_model.cuda()
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
train_losses=get_calibrated_confidences(params, private_model, attack_model,private_train_ids,hidden_masks['private']['train'])
heldout_losses=get_calibrated_confidences(params, private_model, attack_model,private_heldout_ids,hidden_masks['private']['heldout'])
return train_losses, heldout_losses
def auxiliary_attack(params, aux_epochs, attack_type='loss', aug_style='mean', norm_type=None, public_data='train', num_aux=1,aux_style='sum'):
"""
run an auxiliary attack, type (loss, grad_norm, conf, dist) specified by attack_type.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path,map_location='cuda:0')
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_private["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
private_model.load_state_dict(new_state_dict)
else:
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
# updated_params=copy.deepcopy(params)
updated_params=params
updated_params.epochs=updated_params.epochs+aux_epochs
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
train_losses=np.zeros(len(known_masks['public']))
heldout_losses=np.zeros(len(known_masks['public']))
for i in np.arange(num_aux):
if params.dataset=='cifar10' or params.dataset=='credit' or params.dataset=='hep' or params.dataset=='adult' or params.dataset=='mnist':
model_num=params.model_path[-6:-5]
elif params.dataset=='cifar100':
model_num=params.model_path[-15:-14]
else:
model_num='0'
new_model_path='updated_model_'+str(aux_epochs) +'_'+str(params.batch_size)+'_'+params.optimizer+'_aux_model_'+str(i)+'_num_aux_'+str(num_aux)+'_public_data_'+params.public_data+'_model_'+model_num
if not os.path.isdir(new_model_path):
os.mkdir(new_model_path)
updated_params.dump_path=new_model_path
if updated_params.local_rank!=-1:
updated_params.local_rank=-1
path = os.path.join(updated_params.dump_path, 'checkpoint.pth')
torch.save(state_dict_private, path)
if public_data=='train':
print('Using public training data for auxiliary model')
updated_model=train(updated_params, hidden_masks['public']['train'])
elif public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
updated_model=train(updated_params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
updated_model=train(updated_params, known_masks['public'])
updated_model=updated_model.cuda()
new_model=build_model(params)
new_model_path=os.path.join(updated_params.dump_path, "checkpoint.pth")
state_dict_new=torch.load(new_model_path,map_location='cuda:0')
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_new["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
new_model.load_state_dict(new_state_dict)
else:
new_model.load_state_dict(state_dict_new['model'])
new_model=new_model.cuda()
#get losses
if attack_type=='loss':
train_vals=get_calibrated_losses(params, private_model, updated_model,private_train_ids,hidden_masks['private']['train'], aug_style)
heldout_vals=get_calibrated_losses(params, private_model, updated_model,private_heldout_ids,hidden_masks['private']['heldout'], aug_style)
elif attack_type=='conf':
train_vals=get_calibrated_confidences(params, private_model, updated_model,private_train_ids,hidden_masks['private']['train'], aug_style)
heldout_vals=get_calibrated_confidences(params, private_model, updated_model,private_heldout_ids,hidden_masks['private']['heldout'], aug_style)
elif attack_type=='dist':
private_train_ids=private_train_ids[np.random.choice(len(private_train_ids), size=params.num_points, replace=False)]
private_heldout_ids=private_heldout_ids[np.random.choice(len(private_heldout_ids), size=params.num_points, replace=False)]
train_vals=get_calibrated_distances(params, private_model, updated_model,private_train_ids)
heldout_vals=get_calibrated_distances(params, private_model, updated_model,private_heldout_ids)
else:
original_private_model=[]
for p in private_model.parameters():
original_private_model.append(p.view(-1))
original_private_model=torch.cat(original_private_model)
original_updated_model=[]
for p in new_model.parameters():
original_updated_model.append(p.view(-1))
original_updated_model=torch.cat(original_updated_model)
if i==0:
private_model=GradSampleModule(private_model)
attack_model=GradSampleModule(new_model)
train_vals=get_calibrated_gradnorm(params, private_model,original_private_model, attack_model,original_updated_model,private_train_ids,hidden_masks['private']['train'], aug_style=aug_style, norm_type=norm_type)
heldout_vals=get_calibrated_gradnorm(params, private_model, original_private_model,attack_model,original_updated_model,private_heldout_ids,hidden_masks['private']['heldout'], aug_style=aug_style,norm_type=norm_type)
if aux_style=='max':
train_losses=np.maximum(train_losses, train_vals)
heldout_losses=np.maximum(heldout_losses, heldout_vals)
else:
if params.attack_type=='conf' or params.attack_type=='dist':
train_losses=train_vals
heldout_losses=heldout_vals
else:
train_losses+=train_vals
heldout_losses+=heldout_vals
if aux_style=='mean':
train_losses=train_losses/num_aux
heldout_losses=heldout_losses/num_aux
return train_losses, heldout_losses
def get_losses(params):
"""
return uncalibrated losses.
"""
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path,map_location='cuda:0')
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_private["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
private_model.load_state_dict(new_state_dict)
else:
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
#load the dataset
dataset = get_dataset(params)
#initialize dot products to 0
train_losses=[]
heldout_losses=[]
for id in private_train_ids:
#load each image and target
image = dataset[id][0].unsqueeze(0)
image = image.cuda(non_blocking=True)
target = torch.tensor(dataset[id][1]).unsqueeze(0)
target = target.cuda(non_blocking=True)
#get the loss
output=private_model(image)
loss=F.cross_entropy(output, target).item()
train_losses.append(loss)
for id in private_heldout_ids:
#load each image and target
image = dataset[id][0].unsqueeze(0)
image = image.cuda(non_blocking=True)
target = torch.tensor(dataset[id][1]).unsqueeze(0)
target = target.cuda(non_blocking=True)
#get the loss
output=private_model(image)
loss=F.cross_entropy(output, target).item()
heldout_losses.append(loss)
return train_losses,heldout_losses
def get_confidences(params):
"""
return uncalibrated confidences.
"""
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
device = torch.device('cpu')
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path,map_location=device)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cpu()
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
#load the dataset
dataset = get_dataset(params)
if params.aug:
train_confidences=np.zeros(len(hidden_masks['private']['train']))
heldout_confidences=np.zeros(len(hidden_masks['private']['train']))
train_summed_confs=[[0] for i in np.arange(len(hidden_masks['private']['train']))]
heldout_summed_confs=[[0] for i in np.arange(len(hidden_masks['private']['train']))]
for j in np.arange(10):
print('Aug', j)
train_images=torch.stack([dataset[i][0] for i in private_train_ids])
train_images=train_images.cpu()
heldout_images=torch.stack([dataset[i][0] for i in private_heldout_ids])
heldout_images=heldout_images.cpu()
log_softmax = torch.nn.LogSoftmax(dim=1)
train_output=private_model(train_images)
heldout_output=private_model(heldout_images)
log_train_output=log_softmax(train_output)
log_heldout_output=log_softmax(heldout_output)
train_confs,_=torch.max(log_train_output,dim=1)
heldout_confs,_=torch.max(log_heldout_output,dim=1)
train_confs=train_confs.cpu().detach().numpy()
heldout_confs=heldout_confs.cpu().detach().numpy()
for i,id in enumerate(private_train_ids):
train_summed_confs[id].append(train_confs[i])
for i,id in enumerate(private_heldout_ids):
heldout_summed_confs[id].append(heldout_confs[i])
for id in private_train_ids:
if params.aug_style=='mean':
train_confidences[id]=np.mean(train_summed_confs[id][1:])
elif params.aug_style=='max':
train_confidences[id]=np.max(train_summed_confs[id][1:])
elif params.aug_style=='median':
train_confidences[id]=np.median(train_summed_confs[id][1:])
elif params.aug_style=='std':
train_confidences[id]=np.std(train_summed_confs[id][1:])
for id in private_heldout_ids:
if params.aug_style=='mean':
heldout_confidences[id]=np.mean(heldout_summed_confs[id][1:])
elif params.aug_style=='max':
heldout_confidences[id]=np.max(heldout_summed_confs[id][1:])
elif params.aug_style=='median':
heldout_confidences[id]=np.median(heldout_summed_confs[id][1:])
elif params.aug_style=='std':
heldout_confidences[id]=np.std(heldout_summed_confs[id][1:])
train_confidences=train_confidences[private_train_ids]
heldout_confidences=heldout_confidences[private_heldout_ids]
else:
train_confidences=[]
heldout_confidences=[]
train_images=torch.stack([dataset[i][0] for i in private_train_ids])
train_images=train_images.cpu()
heldout_images=torch.stack([dataset[i][0] for i in private_heldout_ids])
heldout_images=heldout_images.cpu()
log_softmax = torch.nn.LogSoftmax(dim=1)
train_output=private_model(train_images)
heldout_output=private_model(heldout_images)
log_train_output=log_softmax(train_output)
log_heldout_output=log_softmax(heldout_output)
train_confidences,_=torch.max(log_train_output,dim=1)
heldout_confidences,_=torch.max(log_heldout_output,dim=1)
train_confidences=train_confidences.cpu().detach().numpy()
heldout_confidences=heldout_confidences.cpu().detach().numpy()
return train_confidences,heldout_confidences
def get_calibrated_distances(params, model1, model2, ids):
"""
return calibrated boundary distances.
"""
dataset = get_dataset(params)
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
targets=torch.stack([torch.tensor(dataset[i][1]) for i in ids])
targets=targets.cuda()
outputs1=model1(images)
outputs2=model2(images)
images_pert1= hop_skip_jump_attack(model1,images,2, verbose=False,clip_min=params.clip_min, clip_max=params.clip_max)
images_pert2= hop_skip_jump_attack(model2,images,2, verbose=False,clip_min=params.clip_min, clip_max=params.clip_max)
# images_pert1=carlini_wagner_l2(model1,images,params.num_classes ,targets)
# images_pert2=carlini_wagner_l2(model2,images,params.num_classes ,targets)
dists1=[]
for i, id in enumerate(ids):
_, pred = torch.topk(outputs1[i], 1)
if pred==targets[i].item():
dists1.append(torch.norm(images_pert1[i]- images[i], p=2).item())
else:
dists1.append(-torch.norm(images_pert1[i]- images[i], p=2).item())
dists2=[]
for i, id in enumerate(ids):
_, pred = torch.topk(outputs2[i], 1)
if pred==targets[i].item():
dists2.append(torch.norm(images_pert2[i]- images[i], p=2).item())
else:
dists2.append(-torch.norm(images_pert1[i]- images[i], p=2).item())
calibrated_dists=np.subtract(np.array(dists1),np.array(dists2))
return calibrated_dists
def calibrated_distance_attack(params, num=10):
"""
run calibrated boundary distance attack.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
if params.public_data=='train':
print('Using public training data for auxiliary model')
attack_model=train(params, hidden_masks['public']['train'])
elif params.public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(params.public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
attack_model=train(params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
attack_model=train(params, known_masks['public'])
attack_model=attack_model.cuda()
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
#get the appropriate ids
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_train_ids=private_train_ids[np.random.choice(len(private_train_ids), size=num, replace=False)]
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
private_heldout_ids=private_heldout_ids[np.random.choice(len(private_heldout_ids), size=num, replace=False)]
train_losses=get_calibrated_distances(params, private_model, attack_model,private_train_ids)
heldout_losses=get_calibrated_distances(params, private_model, attack_model,private_heldout_ids)
return train_losses, heldout_losses
def get_boundary_distances(params, model, ids):
"""
return uncalibrated boundary distances.
"""
dataset = get_dataset(params)
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
targets=[]
for i in ids:
temp=np.zeros(params.num_classes)
temp[dataset[i][1]]=1
temp=torch.tensor(temp)
targets.append(temp)
original_targets=torch.stack([torch.tensor(dataset[i][1]) for i in ids])
original_targets=original_targets.cuda()
targets=torch.stack(targets)
targets=targets.cuda()
outputs=model(images)
images_pert= hop_skip_jump_attack(model,images,2 ,verbose=False, clip_min=params.clip_min, clip_max=params.clip_max)
# images_pert=carlini_wagner_l2(model,images,params.num_classes ,original_targets)
dists=[]
for i, id in enumerate(ids):
_, pred = torch.topk(outputs[i], 1)
if pred==original_targets[i].item():
dists.append(torch.norm(images_pert[i]- images[i], p=2).item())
else:
dists.append(0)
return dists
def boundary_distance_attack(params, num=10):
"""
run uncalibrated boundary distance attack.
"""
train_mask=torch.load(params.mask_path+'/hidden/train.pth')
heldout_mask=torch.load(params.mask_path+'/hidden/heldout.pth')
train_ids=(train_mask==True).nonzero().flatten().numpy()
heldout_ids=(heldout_mask==True).nonzero().flatten().numpy()
train_ids=train_ids[np.random.choice(len(train_ids), size=num, replace=False)]
heldout_ids=heldout_ids[np.random.choice(len(heldout_ids), size=num, replace=False)]
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
train_dists=get_boundary_distances(params, private_model, train_ids )
heldout_dists=get_boundary_distances(params, private_model, heldout_ids )
return train_dists, heldout_dists
if __name__ == '__main__':
parser = get_parser()
params = parser.parse_args()
train_vals, heldout_vals=calibrated_loss_attack(params)
|
calibration_membership-main
|
attacks/privacy_attacks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
from overlap.utils import logging as lu
log = logging.getLogger(__name__)
def individual_sort(aug_dists):
out = []
included = set()
arg_sort = np.argsort(aug_dists, axis=0)
sort = np.sort(aug_dists, axis=0)
for row in range(len(arg_sort)):
curr_arg = arg_sort[row]
curr_dists = sort[row]
sorted_args = curr_arg[np.argsort(curr_dists)]
for i in sorted_args:
if i not in included:
out.append(i)
included.add(i)
return np.array(out)
@hydra.main(config_path="conf/closest_augs.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
lr_policy = instantiate(cfg.optim.lr_policy)
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Transforms found, loading feature extractor is unnecessary. Skipping.")
else:
feature_extractor = instantiate(cfg.ft)
feature_extractor.train()
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Transforms found, feature extraction is unnecessary. Skipping.")
elif cfg.aug_feature_file and os.path.exists(cfg.aug_feature_file):
log.info("Found feature file. Loading from {}".format(cfg.aug_feature_file))
data = np.load(cfg.aug_feature_file)
augmentation_features = data['features']
indices = data['indices']
transforms = data['transforms']
else:
ft_augmentation_dataset = instantiate(cfg.ft_augmentation)
transforms = ft_augmentation_dataset.transform_list
indices = np.random.choice(np.arange(len(ft_augmentation_dataset)), size=cfg.num_images, replace=False)
ft_augmentation_dataset = ft_augmentation_dataset.serialize(indices)
augmentation_features = extract_features(feature_extractor,
ft_augmentation_dataset,
cfg.ft_augmentation.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
if cfg.aug_feature_file:
np.savez(cfg.aug_feature_file,
features=augmentation_features,
indices=indices,
transforms=transforms)
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Found transform file. Loading from {}.".format(cfg.transform_file))
sorted_transforms = np.load(cfg.transform_file)
else:
aug_strings = cfg.ft_corrupt.aug_string.split("--")
distances = np.zeros((len(augmentation_features), len(aug_strings)))
for i, aug in enumerate(aug_strings):
with omegaconf.open_dict(cfg):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug)
if cfg.num_corrupt_images and i==0:
indices = np.random.choice(np.arange(len(ft_corrupt_dataset)), size=cfg.num_corrupt_images, replace=False)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
corruption_features = extract_features(feature_extractor,
ft_corrupt_dataset,
cfg.ft_corrupt.batch_size,
cfg.data_loader,
average=True)
corruption_features = corruption_features.reshape(1, -1)
dists = np.linalg.norm(augmentation_features - corruption_features, axis=-1)
distances[:,i] = dists
sorted_dist_args = individual_sort(distances)
sorted_transforms = transforms[sorted_dist_args]
if cfg.transform_file:
np.save(cfg.transform_file, sorted_transforms)
train_dataset = instantiate(cfg.train)
if cfg.selection_type == 'closest':
train_dataset.transform_list = sorted_transforms[cfg.offset:cfg.offset+cfg.num_transforms]
elif cfg.selection_type == 'farthest':
train_dataset.transform_list = sorted_transforms[-cfg.offset-cfg.num_transforms:-cfg.offset]\
if cfg.offset != 0 else sorted_transforms[-cfg.num_transforms:]
else:
train_dataset.transform_list = sorted_transforms[np.random.choice(np.arange(len(sorted_transforms)), size=cfg.num_transforms, replace=False)]
test_dataset = instantiate(cfg.test)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch')
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
|
augmentation-corruption-fbr_main
|
experiments/closest_augs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/test_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
print("Loading test set...")
test_dataset = instantiate(cfg.test)
checkpoint = torch.load(cfg.weights, map_location='cpu')
if cfg.num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
|
augmentation-corruption-fbr_main
|
experiments/test_imagenet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import overlap.utils.logging as lu
import decimal
import simplejson
import numpy as np
import omegaconf
from itertools import combinations
from itertools import product
from scipy.special import comb
parser = argparse.ArgumentParser(description="Collect run summaries.")
parser.add_argument('--new_corr_dir', dest='data_dir', required=True)
parser.add_argument('--baseline_corr_dir', dest='baseline_dir', required=True)
parser.add_argument('--precision', type=float, dest='precision', default=0.01)
parser.add_argument('--target_error', type=float, dest='target', required=True)
parser.add_argument('--num', type=int, dest='num', default=5)
parser.add_argument('--out', dest='out', required=True)
parser.add_argument('--log_name', default='severity_scan.log', dest='log_name')
def get_data(base_dirs, exclusions=[], log_file='severity_scan.log'):
features = {}
errors = {}
feature_file = 'features.npz'
path_stack = base_dirs
while path_stack:
curr_dir = path_stack.pop()
subdirs = [os.path.join(curr_dir, x) for x in os.listdir(curr_dir) if x[0] != '.']
subdirs = [x for x in subdirs if os.path.isdir(x)]
path_stack.extend(subdirs)
summary_file = os.path.join(curr_dir, log_file)
if os.path.exists(summary_file):
curr_features = np.load(os.path.join(curr_dir, feature_file))
features.update({k : v for k,v in curr_features.items() if k.split("-")[0] not in exclusions})
stats = lu.load_json_stats(summary_file)
curr_errs = {stats[i]["_type"] : stats[i]["top1_err"] for i in range(len(stats))\
if stats[i]["_type"] != "test_epoch" and stats[i]["_type"].split("-")[0] not in exclusions}
errors.update(curr_errs)
return errors, features
def get_average_spread(baseline_errs):
'''
Calculate the average spread in severity in the baseline data, so
the new corruption datasets can attempt to match it.
'''
bcorrs = sorted(list(set([c.split("-")[0] for c in baseline_errs.keys()])))
avg = 0.0
for bcorr in bcorrs:
lower = abs((baseline_errs["{}-1".format(bcorr)] / baseline_errs["{}-3".format(bcorr)] - 1))
upper = abs((baseline_errs["{}-5".format(bcorr)] / baseline_errs["{}-3".format(bcorr)] - 1))
avg += (lower + upper) / 2
return avg / len(bcorrs)
def build_sets(corr_errs, avg_spread):
'''
For each severity 3-8, associate a set of 5 severities with it that
best match the average spread, where that severity is the middle of
the five.
Inputs:
corr_errs: dictionary where each key is a string "{corr}-{severity}"
and each value is the test error.
avg_spread: float specifying the average spread to try to match
Output:
dictionary where each key is a string giving the corruption name,
and each value is a list of 5-tuples giving all sets of 5 severities
associated to that corruption.
'''
corrs = sorted(list(set([c.split("-")[0] for c in corr_errs.keys()])))
corr_sets = {c : [] for c in corrs}
for c in corrs:
sevs = sorted([float(i.split("-")[1]) for i in corr_errs.keys() if c == i.split("-")[0]])
for i in np.arange(2, len(sevs)-2):
# Sev 1
best = float('inf')
best_match_s1 = None
for j in np.arange(0, i-1):
sep = corr_errs["{}-{}".format(c, sevs[j])] / corr_errs["{}-{}".format(c, sevs[i])] - 1
sep_sep = abs(-avg_spread - sep)
if sep_sep <= best:
best = sep_sep
best_match_s1 = j
# Sev 2
best = float('inf')
best_match_s2 = None
for j in np.arange(best_match_s1+1, i):
sep = corr_errs["{}-{}".format(c, sevs[j])] / corr_errs["{}-{}".format(c, sevs[i])] - 1
sep_sep = abs(-avg_spread/2 - sep)
if sep_sep <= best:
best = sep_sep
best_match_s2 = j
# Sev 5
best = float('inf')
best_match_s5 = None
for j in np.arange(i+2, len(sevs)):
sep = corr_errs["{}-{}".format(c, sevs[j])] / corr_errs["{}-{}".format(c, sevs[i])] - 1
sep_sep = abs(avg_spread - sep)
if sep_sep <= best:
best = sep_sep
best_match_s5 = j
# Sev 4
best = float('inf')
best_match_s4 = None
for j in np.arange(i+1, best_match_s5):
sep = corr_errs["{}-{}".format(c, sevs[j])] / corr_errs["{}-{}".format(c, sevs[i])] - 1
sep_sep = abs(avg_spread/2 - sep)
if sep_sep <= best:
best = sep_sep
best_match_s4 = j
corr_sets[c].append((sevs[best_match_s1], sevs[best_match_s2], sevs[i], sevs[best_match_s4], sevs[best_match_s5]))
return corr_sets
def build_distance_table(baseline_features, corr_features):
'''
Calculates distances between corruption features. Across baseline
features and severities, takes the minimum distance, across
severities in the new corruption set, takes the average.
Inputs:
baseline_features: 3d numpy array ordered as
[corruption, severity, features]
corr_features: 4d numpy array ordered as
[corruption, severity_set, severity, features]
Output
2d numpy array ordered as [corruption, severity_set]
'''
nc, nss, ns, nf = corr_features.shape
corr_features = corr_features.reshape(nc, nss, ns, 1, 1, nf)
nb, _, _ = baseline_features.shape
baseline_features = baseline_features.reshape(1, 1, 1, nb, ns, nf)
dists = np.linalg.norm(corr_features - baseline_features, axis=-1)
min_dists = np.mean(np.min(np.min(dists, axis=-1), axis=-1), axis=-1)
return min_dists
def build_corr_tables(corr_sets, corr_errs, corr_features):
'''
Given a list of corruption severity sets, builds the tables that
will be used to calculate distance. For each corruption, the tables
are ordered in increasing order, since this is required to make the
dataset selection algorithm more efficient. This ordering is also
returned so it can be reversed at the end.
Inputs:
corr_sets: dictionary of corruption keys with lists of severity set
values
corr_errs: dictionary of keys with the form '{corr}-{severity}' and
values that are the errors on that corruption/severity pair
corr_features: dictionary of keys with the form '{corr}-{severity}'
and values that are the features on that corruption/severity pair
Outputs:
1. A list of all corruption strings, in the order they appear the
table.
2. dictionary where the keys are corruption strings, and the values
are the severity sets ordered by increasing corruption error.
3. 2d numpy array with the shape [corruption, severity_set] that
gives the average error on that severity set and corruption
4. 4d numpy array with the shape
[corruption, severity_set, severity, features]
'''
corrs = list(corr_sets.keys())
ordered = {}
len_feats = len(list(corr_features.values())[0])
err_table = np.zeros((len(corrs), len(corr_sets[corrs[0]])))
feat_table = np.zeros((len(corrs), len(corr_sets[corrs[0]]), len(corr_sets[corrs[0]][0]), len_feats))
for i, corr in enumerate(corrs):
curr_errs = np.zeros(len(corr_sets[corr]))
curr_feats = np.zeros((len(corr_sets[corr]), len(corr_sets[corrs[0]][0]), len_feats))
for j, sev_list in enumerate(corr_sets[corr]):
curr_errs[j] = np.mean([corr_errs["{}-{}".format(corr, s)] for s in sev_list])
curr_feats[j,:,:] = np.array([corr_features["{}-{}".format(corr, s)] for s in sev_list])
sev_order = np.argsort(curr_errs)
err_table[i,:] = np.sort(curr_errs)
feat_table[i, :, :, :] = curr_feats[sev_order, :, :]
ordered[corr] = np.array(corr_sets[corr])[sev_order]
return corrs, ordered, err_table, feat_table
def build_baseline_features(baseline_features):
'''
Builds a table of baseline corruption features, given a dictionary.
Inputs:
baseline_features: dictionary of features with keys that are strings
as "{corr}-{severity}"
Outputs:
3d numpy array ordered as [corruption, severity, features].
'''
corrs = sorted(list(set([v.split("-")[0] for v in baseline_features.keys()])))
sevs = sorted(list(set([int(v.split("-")[1]) for v in baseline_features.keys()])))
len_feats = len(list(baseline_features.values())[0])
baseline_table = np.zeros((len(corrs), len(sevs), len_feats))
for i, c in enumerate(corrs):
for j, s in enumerate(sevs):
baseline_table[i,j,:] = baseline_features["{}-{}".format(c,s)]
return baseline_table
def sample_matched_corruptions(err_table, baseline_err, precision, num):
'''
Iterates over all 'num'-sized combinations of corruptions and selects
a set of severities that has error within 'precision' of the baseline
error. If multiple sets of severities fall within the precision
window, it picks one at random. If none do, it skips that combination
of corruptions.
The runtime is O((num_corruptions * num_severity_sets)^num), though
in practice the algorithm below is usually
O(num_corruptions^num * num_severity_sets^(num-1)).
Inputs:
err_table: 2d numpy array of shape [corruptions, severity_sets]
listing the average error for each set.
baseline_err: float giving the target error to match
precision: float giving the percentage variation from the baseline
error allowed for an accepted severity set
num: int listing the number of corruptions to combine
Output:
A list of sampled datasets, where each sampled dataset is a list
of 'num' 2-tuples (corruption_index, severity_set_index).
'''
count = 0
total = comb(err_table.shape[0], num, exact=True)
chosen_augs = []
for idxs in combinations(range(err_table.shape[0]), num):
all_augs = []
count += 1
if count % 1000 == 0:
print("On iteration {}/{}".format(count, total))
# Loop over severities for all chosen corruptions except for the
# last two. Since the severity sets are ordered by average error,
# we can work from the outside in to typically save one factor of
# 'num' in calculation time.
for sev_idxs in product(*[range(err_table.shape[1]) for i in range(num-2)]):
target = baseline_err * num
err_sum = 0.0
for i in range(num-2):
err_sum += err_table[idxs[i], sev_idxs[i]]
stack = [(0, err_table.shape[1]-1)] # Start on the two ends
seen = set()
while stack:
i, j = stack.pop()
if (i,j) in seen or i >= err_table.shape[1] or j < 0:
continue
seen.add((i,j))
final_err_sum = err_sum + err_table[idxs[-2],i] + err_table[idxs[-1],j]
if abs((target-final_err_sum)/target) < precision:
curr = [(idxs[k], sev_idxs[k]) for k in range(num-2)] + [(idxs[-2],i), (idxs[-1],j)]
all_augs.append(curr)
stack.append([i+1, j])
stack.append([i, j-1])
elif (target-final_err_sum)/target >= precision:
stack.append([i+1, j])
else:
stack.append([i, j-1])
if all_augs:
idx_choice = np.random.randint(low=0, high=len(all_augs))
chosen_augs.append(all_augs[idx_choice])
return chosen_augs
def main():
args = parser.parse_args()
data_dir = args.data_dir
baseline_dir = args.baseline_dir
precision = args.precision
num_corr = args.num
out_file = args.out
log_name = args.log_name
target_error = args.target
baseline_exclusions = ['saturate', 'spatter', 'gaussian_blur', 'speckle_noise']
corr_exclusions = []
print("Loading data...")
data_dirs = data_dir.split(",")
baseline_dirs = baseline_dir.split(",")
corr_errs, corr_features = get_data(data_dirs, corr_exclusions, log_file=log_name)
baseline_errs, baseline_features = get_data(baseline_dirs, exclusions=baseline_exclusions, log_file=log_name)
baseline_table = build_baseline_features(baseline_features)
avg_spread = get_average_spread(baseline_errs)
corr_sets = build_sets(corr_errs, avg_spread)
corrs, ordered_sev_list, err_table, feat_table = build_corr_tables(corr_sets, corr_errs, corr_features)
dists = build_distance_table(baseline_table, feat_table)
chosen = sample_matched_corruptions(err_table, target_error, precision, num_corr)
out = []
for aug_list in chosen:
sub_aug_strings = []
err = 0.0
curr_dists = None
for a in aug_list:
corr = corrs[a[0]]
sevs = ordered_sev_list[corr][a[1]]
sub_aug_strings.append("--".join(["{}-{}".format(corr,s) for s in sevs]))
err += err_table[a[0], a[1]]
curr_curr_dists = dists[a[0], a[1]]
curr_dists = np.concatenate((curr_dists, curr_curr_dists.reshape(1,-1)), axis=0) if curr_dists is not None else curr_curr_dists.reshape(1,-1)
err /= len(aug_list)
avg_dists = np.mean(curr_dists, axis=0)
aug_string = "--".join(sub_aug_strings)
data_out = ",".join([aug_string, str(err)] + [str(x) for x in avg_dists])
out.append(data_out)
with open(out_file, 'w') as f:
f.write(",,".join([data_dir, baseline_dir, str(precision), str(num_corr)]))
f.write("\n")
f.write("\n".join(out))
if __name__=="__main__":
main()
|
augmentation-corruption-fbr_main
|
experiments/sample_datasets.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net_jsd import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_imagenet_jsd.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0 and cfg.train.weights is None:
print("Loading training set...")
train_dataset = instantiate(cfg.train)
else:
print("Skipping loading the training dataset, 0 epochs of training to perform "
" or pre-trained weights provided.")
train_dataset = None
print("Loading test set...")
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
print("Training...")
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader,
jsd_num=cfg.train.params.jsd_num,
jsd_alpha=cfg.train.jsd_alpha
)
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
|
augmentation-corruption-fbr_main
|
experiments/train_imagenet_jsd.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="Calculate corruptions distance "\
"to the baseline corruptions and find a representative dataset of "\
"corruptions that are farthest away.")
parser.add_argument('--input_files', type=str, required=True,
help='Comma separated list of files for error matched corruptions.')
parser.add_argument('--target_error', type=float, required=True,
help='Target ImageNet-C error for choosing a representative dataset.'
)
def calc_shifts(corrs_files):
corr_shifts_all = []
for corrs_file in corrs_files:
with open(corrs_file, 'r') as f:
lines = [line.rstrip() for line in f.readlines()]
# Data is [corruption-string, error, distance]
data = [line.split(",") for line in lines[1:]]
corrs = set()
# Add the corruptions as a set to the data for easy determination of the intersection
for i in range(len(data)):
curr_corrs = set([a.split("-")[0] for a in data[i][0].split("--")])
data[i].append(curr_corrs)
corrs.update(curr_corrs) # Collect all types of corruptions seen for access later
corrs = list(corrs)
# Sample random sets of 10 corruptions
sampled = []
while len(sampled) < 100000:
chosen = np.random.randint(low=0, high=len(data), size=2)
# Take only disjoint combinations to get a full 10 sampled.
if not (data[chosen[0]][-1] & data[chosen[1]][-1]):
sampled.append((
"--".join([data[chosen[0]][0], data[chosen[1]][0]]), # Combined aug string
(float(data[chosen[0]][1]) + float(data[chosen[1]][1])) / 2, # Average error
(float(data[chosen[0]][2]) + float(data[chosen[1]][2])) / 2 # Average distance
))
# Calculate shifts associated with each corruption
corr_shifts = []
sampled_mean = np.mean([float(s[2]) for s in sampled]) # Mean error
sampled_std = np.std([float(s[2]) for s in sampled]) # Mean std
# Get per corruption shifts in distance
for corr in corrs:
pos = []
# Find all distances from datasets that include this corruption
for s in sampled:
included_corrs = [a.split("-")[0] for a in s[0].split("--")]
if corr in included_corrs:
pos.append(float(s[2]))
# Calculate average shift for this corruption
pos_avg = np.mean(pos)
# Shift by average distance and reweight by distance std
shift = (pos_avg - sampled_mean) / sampled_std
corr_shifts.append(shift)
corr_shifts_all.append(corr_shifts)
# Calculate mean and std across multiple runs
corr_shifts_all = np.array(corr_shifts_all)
corr_shifts_mean = np.mean(corr_shifts_all, axis=0)
corr_shifts_std = np.std(corr_shifts_all, axis=0)
shifts = {corr : (corr_shifts_mean[i], corr_shifts_std[i]) for i, corr in enumerate(corrs)}
return shifts
def topk_shifts(shifts, k):
shifts_list = np.array([v[0] for k, v in shifts.items()])
corrs_list = np.array([k for k, v in shifts.items()])
ordered_idx = np.argsort(shifts_list)
topk = ordered_idx[-k:]
return corrs_list[topk]
def get_farthest_dataset(farthest_corrs, corrs_files, target_error):
farthest_corrs = set(farthest_corrs)
valid_all = []
for corrs_file in corrs_files:
valid = []
with open(corrs_file, 'r') as f:
lines = [line.rstrip() for line in f.readlines()]
data = [line.split(",") for line in lines[1:]]
for i in range(len(data)):
data[i].append(set([a.split("-")[0] for a in data[i][0].split("--")]))
for datum in data:
augs = datum[-1]
if len(augs & farthest_corrs) == 5:
valid.append(datum)
valid_all.append(valid)
matched_all = []
for valid in valid_all:
matched = []
for i in np.arange(len(valid)):
for j in np.arange(start=i+1, stop=len(valid)):
if not (valid[i][-1] & valid[j][-1]):
matched.append((
"--".join([valid[i][0], valid[j][0]]), # Combined corruption string
(float(valid[i][1]) + float(valid[j][1])) / 2, # Average error
(float(valid[i][2]) + float(valid[j][2])) / 2 # Average distance
))
matched_all.append(matched)
best = None
for i, matched in enumerate(matched_all):
for m in matched:
if best is None or np.abs(m[1]-target_error) < np.abs(best[1] - target_error):
best = m
best_corr_dir = i
return best
def main():
args = parser.parse_args()
file_list = args.input_files.split(",")
shifts = calc_shifts(file_list)
farthest_corrs = topk_shifts(shifts, k=10)
corr_string = get_farthest_dataset(farthest_corrs, file_list, args.target_error)
print(shifts)
print(corr_string)
if __name__=="__main__":
main()
|
augmentation-corruption-fbr_main
|
experiments/calc_distance_shifts.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/severity_scan_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader=True):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0:
train_dataset = instantiate(cfg.train)
else:
train_dataset = None
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
with omegaconf.open_dict(cfg):
feature_extractor = instantiate(cfg.ft, num_gpus=cfg.num_gpus, is_leader=is_leader)
feature_extractor.train()
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch',
num_gpus=cfg.num_gpus)
if os.path.exists(cfg.feature_file):
feature_dict = {k : v for k, v in np.load(cfg.feature_file).items()}
else:
feature_dict = {}
indices = np.load(cfg.ft_corrupt.indices_file)
for aug in cfg.aug_string.split("--"):
if len(aug.split("-")) > 1:
#log.info("Severity provided in corrupt.aug_string will be weighted by given severity.")
sev = aug.split("-")[1]
if len(sev.split("_")) > 1:
low = float(sev.split("_")[0])
high = float(sev.split("_")[1])
else:
low = 0.0
high = float(sev)
sev_factor = (high - low) * cfg.severity / 10 + low
else:
sev_factor = cfg.severity
aug = aug.split("-")[0]
aug_string = "{}-{}".format(aug, sev_factor)
if aug_string in feature_dict:
continue
with omegaconf.open_dict(cfg.corrupt):
corrupt_dataset = instantiate(cfg.corrupt, aug_string=aug_string)
err = test_net(model=model,
test_dataset=corrupt_dataset,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
output_name=aug_string,
num_gpus=cfg.num_gpus)
with omegaconf.open_dict(cfg.ft_corrupt):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug_string)
if cfg.ft_corrupt.params.num_transforms is not None:
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
else:
ft_corrupt_dataset = torch.utils.data.Subset(ft_corrupt_dataset, indices)
feature = extract_features(feature_extractor=feature_extractor,
dataset=ft_corrupt_dataset,
batch_size=cfg.ft_corrupt.batch_size,
loader_params=cfg.data_loader,
average=True,
num_gpus=cfg.num_gpus)
feature_dict[aug_string] = feature
if is_leader:
np.savez(cfg.feature_file, **feature_dict)
if __name__=="__main__":
run()
|
augmentation-corruption-fbr_main
|
experiments/severity_scan_imagenet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net_jsd import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
from pathlib import Path
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_cifar10_jsd.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
jsd_num=cfg.train.params.jsd_num,
jsd_alpha=cfg.train.jsd_alpha
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
|
augmentation-corruption-fbr_main
|
experiments/train_cifar10_jsd.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
from overlap.utils import logging as lu
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/feature_corrupt_error.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
feature_extractor = instantiate(cfg.ft)
feature_extractor.train()
if cfg.aug_feature_file and os.path.exists(cfg.aug_feature_file):
log.info("Found feature file. Loading from {}".format(cfg.aug_feature_file))
data = np.load(cfg.aug_feature_file)
augmentation_features = data['features']
indices = data['indices']
else:
ft_augmentation_dataset = instantiate(cfg.ft_augmentation)
indices = np.random.choice(np.arange(len(ft_augmentation_dataset)), size=cfg.num_images, replace=False)
ft_augmentation_dataset = ft_augmentation_dataset.serialize(indices)
augmentation_features = extract_features(feature_extractor,
ft_augmentation_dataset,
cfg.ft_augmentation.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
#nf, lf = augmentation_features.shape
#augmentation_features = np.mean(augmentation_features.reshape(len(indices), nf//len(indices), lf), axis=0)
if cfg.aug_feature_file:
np.savez(cfg.aug_feature_file, features=augmentation_features, indices=indices)
aug_strings = cfg.ft_corrupt.aug_string.split("--")
for aug in aug_strings:
with omegaconf.open_dict(cfg):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
corruption_features = extract_features(feature_extractor,
ft_corrupt_dataset,
cfg.ft_corrupt.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
nf, lf = corruption_features.shape
#corruption_features = np.mean(corruption_features.reshape(len(indices), nf//len(indices), lf), axis=0)
augmentation_features = augmentation_features.reshape(-1, 1, lf)
corruption_features = corruption_features.reshape(1, -1, lf)
mean_aug = np.mean(augmentation_features.reshape(-1,lf), axis=0)
mean_corr = np.mean(corruption_features.reshape(-1,lf), axis=0)
mmd = np.linalg.norm(mean_aug-mean_corr, axis=0)
msd = np.min(np.linalg.norm(augmentation_features.reshape(-1,lf)-mean_corr.reshape(1,lf),axis=1),axis=0)
stats = {"_type" : aug,
"mmd" : str(mmd),
"msd" : str(msd),
}
lu.log_json_stats(stats)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch')
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
|
augmentation-corruption-fbr_main
|
experiments/feature_corrupt_error.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/severity_scan.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader=True):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0:
train_dataset = instantiate(cfg.train)
else:
train_dataset = None
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
with omegaconf.open_dict(cfg):
feature_extractor = instantiate(cfg.ft, num_gpus=cfg.num_gpus, is_leader=is_leader)
feature_extractor.train()
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch',
num_gpus=cfg.num_gpus)
if os.path.exists(cfg.feature_file):
feature_dict = {k : v for k, v in np.load(cfg.feature_file).items()}
else:
feature_dict = {}
indices = np.load(cfg.ft_corrupt.indices_file)
for aug in cfg.aug_string.split("--"):
if len(aug.split("-")) > 1:
#log.info("Severity provided in corrupt.aug_string will be weighted by given severity.")
sev = aug.split("-")[1]
if len(sev.split("_")) > 1:
low = float(sev.split("_")[0])
high = float(sev.split("_")[1])
else:
low = 0.0
high = float(sev)
sev_factor = (high - low) * cfg.severity / 10 + low
else:
sev_factor = cfg.severity
aug = aug.split("-")[0]
aug_string = "{}-{}".format(aug, sev_factor)
if aug_string in feature_dict:
continue
with omegaconf.open_dict(cfg.corrupt):
corrupt_dataset = instantiate(cfg.corrupt, aug_string=aug_string)
err = test_net(model=model,
test_dataset=corrupt_dataset,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
output_name=aug_string,
num_gpus=cfg.num_gpus)
with omegaconf.open_dict(cfg.ft_corrupt):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug_string)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
feature = extract_features(feature_extractor=feature_extractor,
dataset=ft_corrupt_dataset,
batch_size=cfg.ft_corrupt.batch_size,
loader_params=cfg.data_loader,
average=True,
num_gpus=cfg.num_gpus)
feature_dict[aug_string] = feature
if is_leader:
np.savez(cfg.feature_file, **feature_dict)
if __name__=="__main__":
run()
|
augmentation-corruption-fbr_main
|
experiments/severity_scan.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/test_cifar10.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
test_dataset = instantiate(cfg.test)
checkpoint = torch.load(cfg.weights, map_location='cpu')
model.load_state_dict(checkpoint['model_state'])
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
|
augmentation-corruption-fbr_main
|
experiments/test_cifar10.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0 and cfg.train.weights is None:
print("Loading training set...")
train_dataset = instantiate(cfg.train)
else:
print("Skipping loading the training dataset, 0 epochs of training to perform "
" or pre-trained weights provided.")
train_dataset = None
print("Loading test set...")
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
print("Training...")
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
|
augmentation-corruption-fbr_main
|
experiments/train_imagenet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
from pathlib import Path
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_cifar10.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
|
augmentation-corruption-fbr_main
|
experiments/train_cifar10.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import overlap.utils.logging as lu
import decimal
import simplejson
import numpy as np
import omegaconf
from itertools import combinations
from itertools import product
from scipy.special import comb
import math
parser = argparse.ArgumentParser(description="Collect run summaries.")
parser.add_argument('--cifar10c_dir', dest='baseline_dir', required=True)
parser.add_argument('--log_name', dest='log_name', default='severity_scan.log')
def get_data(base_dirs, exclusions=[], log_file='severity_scan.log'):
features = {}
errors = {}
feature_file = 'features.npz'
path_stack = base_dirs
while path_stack:
curr_dir = path_stack.pop()
subdirs = [os.path.join(curr_dir, x) for x in os.listdir(curr_dir) if x[0] != '.']
subdirs = [x for x in subdirs if os.path.isdir(x)]
path_stack.extend(subdirs)
summary_file = os.path.join(curr_dir, log_file)
if os.path.exists(summary_file):
curr_features = np.load(os.path.join(curr_dir, feature_file))
features.update({k : v for k,v in curr_features.items() if k.split("-")[0] not in exclusions})
stats = lu.load_json_stats(summary_file)
curr_errs = {stats[i]["_type"] : stats[i]["top1_err"] for i in range(len(stats))\
if stats[i]["_type"] != "test_epoch" and stats[i]["_type"].split("-")[0] not in exclusions}
errors.update(curr_errs)
return errors, features
def get_target_error(baseline_errs):
errs = [err for b, err in baseline_errs.items()]
return sum(errs)/len(errs)
def dict_avg(list_of_dicts):
out = None
for d in list_of_dicts:
if out is None:
out = d
else:
for k in out:
out[k] += d[k]
for k in out:
out[k] /= len(list_of_dicts)
std = None
for d in list_of_dicts:
if std is None:
std = {}
for k in out:
std[k] = (d[k]-out[k])**2
else:
for k in out:
std[k] += (d[k]-out[k])**2
for k in std:
std[k] = math.sqrt(std[k]) / len(list_of_dicts)
return out, std
def main():
args = parser.parse_args()
baseline_dir = args.baseline_dir
log_name = args.log_name
baseline_exclusions = ['saturate', 'spatter', 'gaussian_blur', 'speckle_noise']
print("Loading data...")
baseline_dirs = baseline_dir.split(",")
baseline_errs_list = []
baseline_features_list = []
for baseline_dir in baseline_dirs:
baseline_errs, baseline_features = get_data([baseline_dir], log_file=log_name, exclusions=baseline_exclusions)
baseline_errs_list.append(baseline_errs)
baseline_features_list.append(baseline_features)
baseline_errs, baseline_std = dict_avg(baseline_errs_list)
target_error = get_target_error(baseline_errs)
print(target_error)
if __name__=="__main__":
main()
|
augmentation-corruption-fbr_main
|
experiments/tools/get_target_error.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import overlap.utils.logging as lu
import decimal
import simplejson
import numpy as np
import omegaconf
parser = argparse.ArgumentParser(description="Collect run summaries.")
parser.add_argument('--dir', dest='run_dir')
parser.add_argument('--filename', dest='summary_name', default='train_cifar10.log')
def main():
args = parser.parse_args()
run_dir = args.run_dir
summary_name = args.summary_name
hydra_config = '.hydra/config.yaml'
files = []
path_stack = [run_dir]
while path_stack:
curr_dir = path_stack.pop()
subdirs = [os.path.join(curr_dir, x) for x in os.listdir(curr_dir) if x[0] != '.']
subdirs = [x for x in subdirs if os.path.isdir(x)]
path_stack.extend(subdirs)
summary_file = os.path.join(curr_dir, summary_name)
if os.path.exists(summary_file):
config_file = os.path.join(curr_dir, hydra_config)
files.append((summary_file, config_file))
for (summary, config) in files:
data = []
cfg = omegaconf.OmegaConf.load(config)
stats = lu.load_json_stats(summary)
#Run meta-deta
data.append(cfg.rng_seed) # ID
data.append(cfg.name) # Name
data.append(summary) # Filename for data
#Model info
data.append(cfg.model['class'].split('.')[-1]) # Model name
data.append(cfg.model.params.depth) # Model depth
data.append(cfg.model.params.widen_factor) # Width factor
# Optimizer info
data.append(cfg.optim.lr_policy['class'].split('.')[-1]) # LR policy
data.append(cfg.optim.base_lr) # Base LR
data.append(cfg.optim.max_epoch) # Num epochs
# Augmentation info
aug_data = []
train_class = cfg.train['class'].split('.')[-1]
if train_class == 'Cifar10Base': # No augmentation
aug_data.append('none')
elif train_class == 'Cifar10Augmix': # Augmix
aug_data.append('augmix')
aug_string = cfg.train.params.aug_string
if aug_string is None:
aug_string = 'standard'
aug_data.append(aug_string) # Aug string
aug_data.append(cfg.train.params.width) # Augmix width
reported_depth = cfg.train.params.depth * (-1 if cfg.train.params.random_depth else -1)
aug_data.append(reported_depth) # Augmix depth
aug_data.append(cfg.train.params.prob_coeff) # Augmix prob coefficient
severity = cfg.train.params.severity if cfg.train.params.aug_string is None else ''
aug_data.append(severity) # Augmix severity
elif train_class == 'Cifar10Corruption': # Corruption Trained
aug_data.append('corruption_trained')
aug_string = cfg.train.params.aug_string
if aug_string is None:
aug_string = 'extra' if cfg.train.params.include_extra else 'standard'
aug_data.append(aug_string) #
aug_data.extend(['' for i in range(6-len(aug_data))])
data.extend(aug_data)
# Feature extraction info
data.extend(['' for i in range(17)])
# JSD info
data.append('no')
data.extend(['',''])
# Batch size info
data.append(cfg.train.batch_size)
data.append(cfg.test.batch_size)
# Errors
clean_error = lu.parse_json_stats(stats, 'test_epoch', 'top1_err')[0]
data.append(clean_error) # Clean error
data.extend(['', '']) # Space for clean error std and C95
mCE = lu.parse_json_stats(stats, 'overall-avg', 'mCE')
mCE = mCE[0] if mCE else ''
data.append(mCE) # mCE
data.extend(['', '']) # Space for mCE std and C95
rmCE = lu.parse_json_stats(stats, 'overall-avg', 'rmCE')
rmCE = rmCE[0] if rmCE else ''
data.append(rmCE) # rmCE
data.extend(['', '']) # Space for rmCE std and C95
avg_error = lu.parse_json_stats(stats, 'overall-avg', 'top1_err')[0]
data.append(avg_error) # Average corruption error
data.extend(['', '']) # Space for corruption error std and C95
data.extend(['', '']) # Space for number in average and type of average
data.append('') # Divider
# Individual errors
# Noise
data.append(lu.parse_json_stats(stats, 'noise-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'gaussian_noise-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'impulse_noise-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'shot_noise-avg', 'top1_err')[0])
# Blur
data.append(lu.parse_json_stats(stats, 'blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'defocus_blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'glass_blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'motion_blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'zoom_blur-avg', 'top1_err')[0])
# Weather
data.append(lu.parse_json_stats(stats, 'weather-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'brightness-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'fog-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'frost-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'snow-avg', 'top1_err')[0])
# Digital
data.append(lu.parse_json_stats(stats, 'digital-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'contrast-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'elastic_transform-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'jpeg_compression-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'pixelate-avg', 'top1_err')[0])
# Extra
data.append(lu.parse_json_stats(stats, 'extra-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'gaussian_blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'saturate-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'spatter-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'speckle_noise-avg', 'top1_err')[0])
data = [str(i) for i in data]
print(",".join(data))
if __name__ == "__main__":
main()
|
augmentation-corruption-fbr_main
|
experiments/tools/summarize.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="Generate random indicies '\
'for sampling from the CIFAR-10 or ImageNet training sets.")
parser.add_argument('--dataset', type=str, required=True,
help='Should be in [\'cifar-10\', \'imagenet\'].')
parser.add_argument('--num', type=int, required=True,
help='Number of indices to generate.')
parser.add_argument('--out', type=str, required=True,
help='Output file. Should be .npy format.')
def main():
args = parser.parse_args()
assert args.dataset in ['cifar-10', 'imagenet'], "Unknown dataset."
max_index = 50000 if args.dataset=='cifar-10' else 1281167
indices = np.random.choice(np.arange(max_index), size=args.num, replace=False)
np.save(args.out, indices)
if __name__=="__main__":
main()
|
augmentation-corruption-fbr_main
|
experiments/tools/sample_image_indices.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
from omegaconf import open_dict
from .augmentations.utils import aug_finder
from hydra.utils import instantiate
import numpy as np
import os
import pickle
log = logging.getLogger(__name__)
def test_corrupt_net(model, corrupt_cfg, batch_size, loader_params, aug_string=None, mCE_denom=None, clean_err=None, imagenetc_grouping=True, num_gpus=1, log_name=None):
model.eval()
if aug_string is None:
augs = aug_finder.get_augs_by_tag(['imagenet_c'])
severities = [1,2,3,4,5]
augs = ["{}-{}".format(a.name, s) for a in augs for s in severities]
else:
augs = aug_string.split("--")
if log_name is not None and os.path.exists(log_name):
prestats = lu.load_json_stats(log_name)
else:
prestats = None
errs = []
for aug in augs:
if prestats is not None and len(lu.parse_json_stats(prestats, row_type=aug, key='top1_err')) > 0:
continue
with open_dict(corrupt_cfg):
corrupt_dataset = instantiate(corrupt_cfg, aug_string=aug)
sampler = torch.utils.data.distributed.DistributedSampler(corrupt_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
corrupt_dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
num_correct = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(corrupt_dataset))
stats = {'_type' : aug, 'top1_err' : err}
lu.log_json_stats(stats)
errs.append(err)
# Calculating records
if mCE_denom is not None:
mCE_denom = pickle.load(open(os.path.join(os.path.dirname(__file__), '../baseline_data/', mCE_denom), 'rb'))
errs = np.array(errs)
aug_names = [a.split("-")[0] for a in augs]
unique_aug_names = list(set(aug_names))
avg_errs = [np.mean(errs[[i for i, a in enumerate(aug_names) if a==u]]) for u in unique_aug_names]
avg_errs = np.array(avg_errs)
mCE = None
rmCE = None
if mCE_denom:
mCE = [100 * avg_errs[i] / mCE_denom[a] for i, a in enumerate(unique_aug_names)]
mCE = np.array(mCE)
if clean_err:
rmCE = [100 * (avg_errs[i] - clean_err) / (mCE_denom[a] - mCE_denom['clean'])\
for i, a in enumerate(unique_aug_names)]
rmCE = np.array(rmCE)
for i, a in enumerate(unique_aug_names):
stats = {'_type' : a + '-avg', 'top1_err' : avg_errs[i]}
if mCE is not None:
stats['mCE'] = mCE[i]
if rmCE is not None:
stats['rmCE'] = rmCE[i]
lu.log_json_stats(stats)
if imagenetc_grouping:
for aug_type in ['blur', 'digital', 'noise', 'weather', 'extra']:
aug_indices = [i for i, a in enumerate(unique_aug_names)\
if aug_type in aug_finder.get_aug_by_name(a).tags]
err_for_type = np.mean(avg_errs[aug_indices])
stats = {'_type' : aug_type + '-avg', 'top1_err' : err_for_type}
if mCE is not None:
mCE_for_type = np.mean(mCE[aug_indices])
stats['mCE'] = mCE_for_type
if rmCE is not None:
rmCE_for_type = np.mean(rmCE[aug_indices])
stats['rmCE'] = rmCE_for_type
lu.log_json_stats(stats)
if imagenetc_grouping:
indices = [i for i, a in enumerate(unique_aug_names)\
if 'extra' not in aug_finder.get_aug_by_name(a).tags]
else:
indices = [i for i, a in enumerate(unique_aug_names)]
overall_avg = np.mean(avg_errs[indices])
stats = {'_type' : 'overall-avg', 'top1_err' : overall_avg}
if mCE is not None:
overall_mCE = np.mean(mCE[indices])
stats['mCE'] = overall_mCE
if rmCE is not None:
overall_rmCE = np.mean(rmCE[indices])
stats['rmCE'] = overall_rmCE
lu.log_json_stats(stats)
|
augmentation-corruption-fbr_main
|
experiments/overlap/test_corrupt_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
log = logging.getLogger(__name__)
def test_net(model, test_dataset, batch_size, loader_params, output_name='test_epoch', num_gpus=1):
model.eval()
sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
num_correct = 0
num_total = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(test_dataset))
stats = {'_type' : output_name, 'top1_err' : err}
lu.log_json_stats(stats)
return err
|
augmentation-corruption-fbr_main
|
experiments/overlap/test_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResHead(nn.Module):
"""ResNet head."""
def __init__(self, w_in, nc):
super(ResHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(w_in, nc, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
self.features = x
x = self.fc(x)
return x
class BottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1"""
def __init__(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
super(BottleneckTransform, self).__init__()
self._construct(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _construct(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
(str1x1, str3x3) = (stride, 1) if stride_1x1 else (1, stride)
# 1x1, BN, ReLU
self.a = nn.Conv2d(
w_in, w_b, kernel_size=1,
stride=str1x1, padding=0, bias=False
)
self.a_bn = torch.nn.BatchNorm2d(w_b, **bn_params)
self.a_relu = nn.ReLU(inplace=relu_inplace)
# 3x3, BN, ReLU
self.b = nn.Conv2d(
w_b, w_b, kernel_size=3,
stride=str3x3, padding=1, groups=num_gs, bias=False
)
self.b_bn = torch.nn.BatchNorm2d(w_b, **bn_params)
self.b_relu = nn.ReLU(inplace=relu_inplace)
# 1x1, BN
self.c = nn.Conv2d(
w_b, w_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.c_bn = torch.nn.BatchNorm2d(w_out, **bn_params)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBlock(nn.Module):
"""Residual block: x + F(x)"""
def __init__(
self, w_in, w_out, stride, w_b, num_gs, bn_params, stride_1x1, relu_inplace
):
super(ResBlock, self).__init__()
self._construct(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _add_skip_proj(self, w_in, w_out, stride, bn_params):
self.proj = nn.Conv2d(
w_in, w_out, kernel_size=1,
stride=stride, padding=0, bias=False
)
self.bn = torch.nn.BatchNorm2d(w_out, **bn_params)
def _construct(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(w_in, w_out, stride, bn_params)
self.f = BottleneckTransform(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
self.relu = nn.ReLU(relu_inplace)
def forward(self, x):
fx = self.f(x)
if self.proj_block:
x = self.bn(self.proj(x))
x = x + fx
x = self.relu(x)
return x
class ResStage(nn.Module):
"""Stage of ResNet."""
def __init__(self, w_in, w_out, stride, d, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
super(ResStage, self).__init__()
self._construct(w_in, w_out, stride, d, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _construct(self, w_in, w_out, stride, d, w_b, num_gs, bn_params, stride_1x1, relu_inplace):
# Construct the blocks
for i in range(d):
# Stride and w_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
# Construct the block
res_block = ResBlock(
b_w_in, w_out, b_stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace
)
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class ResStem(nn.Module):
"""Stem of ResNet."""
def __init__(self, w_in, w_out, bn_params, relu_inplace):
super(ResStem, self).__init__()
self._construct_imagenet(w_in, w_out, bn_params, relu_inplace)
def _construct_imagenet(self, w_in, w_out, bn_params, relu_inplace):
# 7x7, BN, ReLU, maxpool
self.conv = nn.Conv2d(
w_in, w_out, kernel_size=7,
stride=2, padding=3, bias=False
)
self.bn = torch.nn.BatchNorm2d(w_out, **bn_params)
self.relu = nn.ReLU(relu_inplace)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResNetPycls(nn.Module):
"""ResNeXt model."""
def __init__(self, depth=50, width_factor=1, num_groups=1, width_per_group=64,
num_classes=1000, bn_params={'eps':1e-5, 'momentum':0.1, 'affine':True},
stride_1x1=False, relu_inplace=True, final_gamma=True
):
super(ResNetPycls, self).__init__()
self.depth = depth
self.width = width_factor
self.ng = num_groups
self.width_per_group = width_per_group
self.num_classes = num_classes
self.bn_params = bn_params
self.stride_1x1 = stride_1x1
self.relu_inplace = relu_inplace
self._construct_imagenet()
def init_weights(m, cfg):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
if hasattr(m, 'bias') and m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.BatchNorm2d):
zero_init_gamma = (
hasattr(m, 'final_bn') and m.final_bn and
final_gamma
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
self.apply(lambda m : init_weights(m, final_gamma))
def _construct_imagenet(self):
# Retrieve the number of blocks per stage
(d1, d2, d3, d4) = _IN_STAGE_DS[self.depth]
# Compute the initial bottleneck width
num_gs = self.ng
w_b = self.width_per_group * num_gs
w1, w2, w3, w4 = [self.width * w for w in [256, 512, 1024, 2048]]
# Stem: (N, 3, 224, 224) -> (N, 64, 56, 56)
self.stem = ResStem(w_in=3, w_out=64, bn_params=self.bn_params, relu_inplace=self.relu_inplace)
# Stage 1: (N, 64, 56, 56) -> (N, 256, 56, 56)
self.s1 = ResStage(
w_in=64, w_out=w1, stride=1, d=d1,
w_b=w_b, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 2: (N, 256, 56, 56) -> (N, 512, 28, 28)
self.s2 = ResStage(
w_in=w1, w_out=w2, stride=2, d=d2,
w_b=w_b * 2, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 3: (N, 512, 56, 56) -> (N, 1024, 14, 14)
self.s3 = ResStage(
w_in=w2, w_out=w3, stride=2, d=d3,
w_b=w_b * 4, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 4: (N, 1024, 14, 14) -> (N, 2048, 7, 7)
self.s4 = ResStage(
w_in=w3, w_out=w4, stride=2, d=d4,
w_b=w_b * 8, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Head: (N, 2048, 7, 7) -> (N, num_classes)
self.head = ResHead(w_in=w4, nc=self.num_classes)
def forward(self, x):
for module in self.children():
x = module(x)
if isinstance(module, ResHead):
self.features = module.features
return x
|
augmentation-corruption-fbr_main
|
experiments/overlap/models.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
import numpy as np
import os
log = logging.getLogger(__name__)
def distributed_gather_features(curr_features, batch_size, num_gpus):
gather_list = [torch.zeros((batch_size, curr_features.size(-1)), device=curr_features.device)\
for i in range(num_gpus)]
count = curr_features.size(0)
if count < batch_size:
curr_features = torch.cat((curr_features, torch.zeros((batch_size - count, curr_features.size(-1)), device=curr_features.device)), dim=0)
torch.distributed.all_gather(gather_list, curr_features)
count = torch.Tensor([count]).cuda()
torch.distributed.all_reduce(count)
count = int(count.item())
# Here we use that the distributed data sampler interleaves sampling across replicas
curr_features = torch.stack(gather_list, dim=1).reshape(-1, curr_features.size(-1))
curr_features = curr_features[:count,:]
return curr_features
def extract_features(feature_extractor, dataset, batch_size, loader_params, average=True, num_gpus=1, average_num=None, preemption_protection=False, is_leader=True):
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=False)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
features = None
count = 0
starting_iter = -1
if preemption_protection and os.path.exists('feature_extraction.tmp.npz'):
data = np.loadz('feature_extraction.tmp.npz')
features = torch.Tensor(data['features']).cuda()
count = data['count']
starting_iter = data['curr_iter']
for curr_iter, (inputs, labels) in enumerate(loader):
if preemption_protection and curr_iter <= starting_iter:
continue
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
curr_features = feature_extractor.extract(inputs)
if average and average_num is None:
curr_features = torch.sum(curr_features, dim=0)
if num_gpus > 1:
torch.distributed.all_reduce(curr_features)
features = (features + curr_features.detach().cpu()) if features is not None else curr_features.detach().cpu()
elif average:
num_features = len(dataset) // average_num
if num_gpus > 1:
curr_features = distributed_gather_features(curr_features, batch_size, num_gpus)
if features is None:
features = torch.zeros(num_features, curr_features.size(-1))
if count + curr_features.size(0) > num_features:
remainder = count + curr_features.size(0) - num_features
features[count:, :] += curr_features[:num_features-count,:].detach().cpu()
offset = 0
while remainder > num_features:
features += curr_features[offset+num_features-count:2*num_features-count+offset].detach().cpu()
offset += num_features
remainder -= num_features
features[:remainder,:] += curr_features[offset+num_features-count:,:].detach().cpu()
count = remainder
else:
features[count:count+curr_features.size(0),:] += curr_features.detach().cpu()
count += curr_features.size(0)
count = count % num_features
else:
if num_gpus > 1:
curr_features = distributed_gather_features(curr_features, batch_size, num_gpus)
if features is None:
features = torch.zeros(len(dataset), curr_features.size(-1))
features[count:count+curr_features.size(0),:] = curr_features.detach().cpu()
count += curr_features.size(0)
if preemption_protection and curr_iter % 5000 == 0 and is_leader:
np.savez('feature_extraction.tmp.npz', features=features.detach().cpu().numpy(), count=count, curr_iter=curr_iter)
if average and average_num is None:
features /= len(dataset)
elif average:
features /= average_num
return features.detach().cpu().numpy()
|
augmentation-corruption-fbr_main
|
experiments/overlap/extract_features.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import augmentations as aug
from .augmentations.utils.converters import NumpyToTensor, PilToNumpy
from .augmentations.utils.aug_finder import get_augs_by_tag, parse_aug_string, get_aug_by_name
from .augmentations.utils.severity import sample_level, int_parameter, float_parameter
from .augmentations import pil, compositions, obscure, patch_gaussian, standard_augmentations
import torchvision as tv
import torch
import numpy as np
import os
from PIL import Image, ImageOps
CIFAR_MEAN = [125.3/255, 123.0/255, 113.9/255]
CIFAR_STD = [63.0/255, 62.1/255, 66.7/255]
#This is in RGB order since that is the standard for PIL
IM_MEAN = [0.485, 0.456, 0.406]
IM_STD = [0.229, 0.224, 0.225]
class Cifar10Base(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, augmentation=None, transform_file=None):
assert split in ['train', 'test'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
self.transform_weights = None
if self.train_aug:
train_transform = [
tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomCrop(im_size, padding=4)
]
else:
train_transform = []
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
if augmentation is None:
self.aug = aug.identity.Identity()
else:
self.aug = augmentation
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
])
if transform_file:
transforms = np.load(transform_file)
self.transform_list = transforms[:num_transforms]\
if num_transforms is not None else transforms
elif num_transforms:
self.transform_list = self.build_transform_list(num_transforms)
else:
self.transform_list = None
self.dataset = tv.datasets.CIFAR10(data_path, self.train, download=False)
def build_transform_list(self, num_transforms):
transforms = [self.aug.convert_to_numpy(self.aug.sample_parameters()) for i in range(num_transforms)]
return np.stack(transforms, axis=0)
def get_random_transform(self):
if self.transform_list is None:
return self.aug.sample_parameters()
elif self.transform_weights is None:
params = self.transform_list[np.random.randint(low=0, high=len(self.transform_list))]
return self.aug.convert_from_numpy(params)
else:
index = np.random.choice(np.arange(len(self.transform_list)), p=self.transform_weights)
params = self.transform_list[index]
return self.aug.convert_from_numpy(params)
def __getitem__(self, index):
pre_im, label = self.dataset[index]
pre_im = self.pretransform(pre_im)
params = self.get_random_transform()
return self.posttransform(self.aug.transform(pre_im, **params)), label
def __len__(self):
return len(self.dataset)
def fixed_transform(self, index, transform_index):
assert self.transform_list is not None, "Must have a fixed transform list to generate fixed transforms."
im, label = self.dataset[index]
im = self.pretransform(im)
params = self.aug.convert_from_numpy(self.transform_list[transform_index])
im = self.aug.transform(im, **params)
return self.posttransform(im), label
def serialize(self, indices=None):
'''
Returns a new dataset that is all fixed transforms in order,
applied to each index in order.
'''
class SerialDataset(torch.utils.data.Dataset):
def __init__(self, dataset, indices=None):
self.dataset = dataset
self.indices = indices
def __getitem__(self, index):
im_idx = index // len(self.dataset.transform_list)
im_idx = self.indices[im_idx] if self.indices is not None else im_idx
param_idx = index % len(self.dataset.transform_list)
return self.dataset.fixed_transform(im_idx, param_idx)
def __len__(self):
if self.indices is not None:
return len(self.indices) * len(self.dataset.transform_list)
else:
return len(self.dataset) * len(self.dataset.transform_list)
return SerialDataset(self, indices)
class Cifar10Augmix(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, width=3, depth=3, random_depth=True, prob_coeff=1.0,
severity=3, transform_file=None):
self.aug_string = aug_string
self.width = width
self.depth = depth
self.prob_coeff = prob_coeff
self.random_depth = random_depth
self.severity = severity
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.Augmix(
augmentation_list=augs,
width=width,
max_depth=depth,
random_depth=random_depth,
prob_coeff=prob_coeff
)
super(Cifar10Augmix, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, transform_file=transform_file)
class Cifar10RandomSample(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, severity=3, weights=None):
self.aug_string = aug_string
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.RandomSample(
augmentation_list=augs,
weights=weights
)
super(Cifar10RandomSample, self).__init__(data_path, split, im_size, train_aug, num_transforms, augmentation)
class Cifar10Corruption(Cifar10Base):
'''
Corruptions are different in three ways: they sample at fixed max intensity
of randomly between a low value and some maximum, they generate
fixed transforms in order and balanced (and can give the corruption/severity
of a given transform index), and have the metadata for the frost corruption.
'''
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, frost_path=None, include_extra=True, random_transforms=False):
self.include_extra = include_extra
self.random_transforms = random_transforms
if aug_string is not None:
self.aug_list = parse_aug_string(aug_string, im_size, max_intensity=True, frost_path=frost_path)
else:
augs = get_augs_by_tag(['imagenet_c'], [] if include_extra else ['extra'])
severities = [1,2,3,4,5]
self.aug_list = [a(severity=s, im_size=im_size, max_intensity=True, frost_path=frost_path)\
for a in augs for s in severities]
augmentation = compositions.RandomSample(
augmentation_list=self.aug_list
)
super(Cifar10Corruption, self).__init__(data_path, split, im_size, train_aug, num_transforms, augmentation)
def build_transform_list(self, num_transforms):
if self.random_transforms:
return super(Cifar10Corruption, self).build_transform_list(num_transforms)
assert num_transforms % len(self.aug_list) == 0,\
"The total number of augs needs to divide into the total number of transforms."
transform_list = None
for i in range(num_transforms):
transform_idx = i // (num_transforms // len(self.aug_list))
transform_params = self.aug_list[transform_idx].sample_parameters()
curr_record = self.aug.convert_to_numpy({
'idx' : transform_idx,
'transform_params' : transform_params
}).reshape(1,-1)
transform_list = np.concatenate((transform_list, curr_record), axis=0)\
if transform_list is not None else curr_record
return transform_list
def get_corruption(self, transform_index):
aug_type_index = transform_index // (len(self.transform_list) // len(self.aug_list))
return self.aug_list[aug_type_index].name, self.aug_list[aug_type_index].severity
class Cifar10AutoAugment(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, subpolicy_list=None, add_cutout=False, transform_file=None):
def stom(low, high, sev):
return sev / 10 * (high - low) + low
size = im_size
init = lambda transform : transform(0, size)
tn = 150/331 * im_size
if subpolicy_list is None:
subpolicy_list = [
[(init(pil.Invert), 0.1, None, None), (init(pil.Contrast), 0.2, stom(0,0.9,6), 1)],
[(init(pil.Rotate), 0.7, stom(0,30,2), 0), (init(pil.TranslateX), 0.3, stom(0,tn,9), 0)],
[(init(pil.Sharpness), 0.8, stom(0,0.9,1), 1), (init(pil.Sharpness), 0.9, stom(0,0.9,3), 1)],
[(init(pil.ShearY), 0.5, stom(0,0.3,8), 0), (init(pil.TranslateY), 0.7, stom(0,tn,9), 0)],
[(init(pil.AutoContrast), 0.5, None, None), (init(pil.Equalize), 0.9, None, None)],
[(init(pil.ShearY), 0.2, stom(0,0.3,7), 0), (init(pil.Posterize), 0.3, int(stom(4,8,7)), None)],
[(init(pil.ColorBalance), 0.4, stom(0,0.9,3),1), (init(pil.Brightness), 0.6, stom(0,0.9,7),1)],
[(init(pil.Sharpness), 0.3, stom(0,0.9,9),1), (init(pil.Brightness), 0.7, stom(0,0.9,9),1)],
[(init(pil.Equalize), 0.6, None, None), (init(pil.Equalize), 0.5, None, None)],
[(init(pil.Contrast), 0.6, stom(0,0.9,7),1), (init(pil.Sharpness), 0.6, stom(0,0.9,5),1)],
[(init(pil.ColorBalance), 0.7, stom(0,0.9,7),1), (init(pil.TranslateX), 0.5, stom(0,tn,8),0)],
[(init(pil.Equalize), 0.3, None, None), (init(pil.AutoContrast), 0.4, None, None)],
[(init(pil.TranslateY), 0.4, stom(0,tn,3),0), (init(pil.Sharpness), 0.2, stom(0,0.9,6),1)],
[(init(pil.Brightness), 0.9, stom(0,0.9,6),1), (init(pil.ColorBalance), 0.2, stom(0,0.9,8),1)],
[(init(pil.Solarize), 0.5, stom(256,0,2),None), (init(pil.Invert), 0.0, None,None)],
[(init(pil.Equalize), 0.2, None, None), (init(pil.AutoContrast), 0.6, None, None)],
[(init(pil.Equalize), 0.2, None, None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.ColorBalance), 0.9, stom(0,0.9,9),1), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.AutoContrast), 0.8, None, None), (init(pil.Solarize), 0.2, stom(256,0,8), None)],
[(init(pil.Brightness), 0.1, stom(0,0.9,3),1), (init(pil.ColorBalance), 0.7, stom(0,0.9,0),1)],
[(init(pil.Solarize), 0.4, stom(256,0,5), None), (init(pil.AutoContrast), 0.9, None, None)],
[(init(pil.TranslateY), 0.9, stom(0,tn,9), None), (init(pil.TranslateY), 0.7, stom(0,tn,9),0)],
[(init(pil.AutoContrast), 0.9, None, None), (init(pil.Solarize), 0.8, stom(256,0,3), None)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Invert), 0.1, None, None)],
[(init(pil.TranslateY), 0.7, stom(0,tn,9), 0), (init(pil.AutoContrast), 0.9, None, None)]
]
aug = compositions.AutoAugment(subpolicy_list)
if add_cutout:
cutout = obscure.CutOut(severity=10, im_size=im_size, max_intensity=True)
aug = compositions.ComposeSerially([aug, cutout])
super(Cifar10AutoAugment, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, transform_file=transform_file)
class Cifar10PatchGaussian(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, transform_file=None, patch_width=25, patch_sigma=1.0, max_width=True):
if patch_width is not None:
aug = patch_gaussian.PatchGaussian(
severity=None,
im_size=im_size,
max_intensity=max_width,
sigma=patch_sigma,
width=patch_width
)
else:
aug = patch_gaussian.Gaussian(
severity = patch_sigma * 10,
im_size=im_size,
max_intensity=max_width
)
if train_aug or (split=='train' and train_aug is None):
train = standard_augmentations.Cifar10CropAndFlip(severity=None, im_size=im_size)
aug = compositions.ComposeSerially([aug, train])
super(Cifar10PatchGaussian, self).__init__(data_path, split, im_size, False, num_transforms,
aug, transform_file=transform_file)
class ImageNetBase(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, augmentation=None, rgb_to_bgr=True):
assert split in ['train', 'val'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
data_path = os.path.join(data_path, split)
if self.train_aug:
train_transform = [
tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip(),
]
else:
train_transform = [
tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)
]
def RGB_to_BGR(image):
return image[[2,1,0],:,:]
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
if augmentation is None:
self.aug = aug.identity.Identity()
else:
self.aug = augmentation
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)] +
([RGB_to_BGR] if rgb_to_bgr else []) #PyCls imagenet models are trained in BGR input order
)
self.transform_list = self.build_transform_list(num_transforms)\
if num_transforms is not None else None
self.dataset = tv.datasets.ImageFolder(data_path, None)
def build_transform_list(self, num_transforms):
transforms = [self.aug.convert_to_numpy(self.aug.sample_parameters()) for i in range(num_transforms)]
return np.stack(transforms, axis=0)
def get_random_transform(self):
if self.transform_list is None:
return self.aug.sample_parameters()
else:
params = self.transform_list[np.random.randint(low=0, high=len(self.transform_list))]
return self.aug.convert_from_numpy(params)
def __getitem__(self, index):
pre_im, label = self.dataset[index]
pre_im = self.pretransform(pre_im)
params = self.get_random_transform()
return self.posttransform(self.aug.transform(pre_im, **params)), label
def __len__(self):
return len(self.dataset)
def fixed_transform(self, index, transform_index):
assert self.transform_list is not None, "Must have a fixed transform list to generate fixed transforms."
im, label = self.dataset[index]
im = self.pretransform(im)
params = self.aug.convert_from_numpy(self.transform_list[transform_index])
im = self.aug.transform(im, **params)
return self.posttransform(im), label
def serialize(self, indices=None):
'''
Returns a new dataset that is all fixed transforms in order,
applied to each index in order.
'''
class SerialDataset(torch.utils.data.Dataset):
def __init__(self, dataset, indices=None):
self.dataset = dataset
self.indices = indices
def __getitem__(self, index):
im_idx = index // len(self.dataset.transform_list)
im_idx = self.indices[im_idx] if self.indices is not None else im_idx
param_idx = index % len(self.dataset.transform_list)
return self.dataset.fixed_transform(im_idx, param_idx)
def __len__(self):
if self.indices is not None:
return len(self.indices) * len(self.dataset.transform_list)
else:
return len(self.dataset) * len(self.dataset.transform_list)
return SerialDataset(self, indices)
class ImageNetCorruption(ImageNetBase):
'''
Corruptions are different in three ways: they sample at fixed max intensity
of randomly between a low value and some maximum, they generate
fixed transforms in order and balanced (and can give the corruption/severity
of a given transform index), and have the metadata for the frost corruption.
'''
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, frost_path=None, include_extra=True, rgb_to_bgr=True):
self.include_extra = include_extra
if aug_string is not None:
self.aug_list = parse_aug_string(aug_string, im_size, max_intensity=True, frost_path=frost_path)
else:
augs = get_augs_by_tag(['imagenet_c'], [] if include_extra else ['extra'])
severities = [1,2,3,4,5]
self.aug_list = [a(severity=s, im_size=im_size, max_intensity=True, frost_path=frost_path)\
for a in augs for s in severities]
augmentation = compositions.RandomSample(
augmentation_list=self.aug_list
)
super(ImageNetCorruption, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, rgb_to_bgr)
def build_transform_list(self, num_transforms):
assert num_transforms % len(self.aug_list) == 0,\
"The total number of augs needs to divide into the total number of transforms."
transform_list = None
for i in range(num_transforms):
transform_idx = i // (num_transforms // len(self.aug_list))
transform_params = self.aug_list[transform_idx].sample_parameters()
curr_record = self.aug.convert_to_numpy({
'idx' : transform_idx,
'transform_params' : transform_params
}).reshape(1,-1)
transform_list = np.concatenate((transform_list, curr_record), axis=0)\
if transform_list is not None else curr_record
return transform_list
def get_corruption(self, transform_index):
aug_type_index = transform_index // (len(self.transform_list) // len(self.aug_list))
return self.aug_list[aug_type_index].name, self.aug_list[aug_type_index].severity
class ImageNetPatchGaussian(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, patch_width=250, patch_sigma=1.0, max_width=False, rgb_to_bgr=True):
if patch_width is not None:
aug = patch_gaussian.PatchGaussian(
severity=None,
im_size=im_size,
max_intensity=max_width,
sigma=patch_sigma,
width=patch_width
)
else:
aug = patch_gaussian.Gaussian(
severity = patch_sigma * 10,
im_size=im_size,
max_intensity=max_width
)
super(ImageNetPatchGaussian, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, rgb_to_bgr=rgb_to_bgr)
class ImageNetAutoAugment(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, subpolicy_list=None, rgb_to_bgr=True):
def stom(low, high, sev):
return sev / 10 * (high - low) + low
size = im_size
init = lambda transform : transform(0, size)
tn = 150/331 * im_size
if subpolicy_list is None:
subpolicy_list = [
[(init(pil.Posterize), 0.4, int(stom(4,8,8)), None), (init(pil.Rotate), 0.6, stom(0,30,9),0)],
[(init(pil.Solarize), 0.6, stom(256,0,5), None), (init(pil.AutoContrast), 0.6, None,None)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Posterize), 0.6, int(stom(4,8,7)), None), (init(pil.Posterize), 0.6, int(stom(4,8,6)),None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Solarize), 0.2, stom(256,0,4),None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Rotate), 0.8, stom(0,30,8),0)],
[(init(pil.Solarize), 0.6, stom(256,0,3), None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Posterize), 0.8, int(stom(4,8,5)), None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.Rotate), 0.2, stom(0,30,3), 0), (init(pil.Solarize), 0.6, stom(256,0,8),None)],
[(init(pil.Equalize), 0.6, None, None), (init(pil.Posterize), 0.4, int(stom(4,8,6)),None)],
[(init(pil.Rotate), 0.8, stom(0,30,8), 0), (init(pil.ColorBalance), 0.4, stom(0,0.9,0),1)],
[(init(pil.Rotate), 0.4, stom(0,30,9), 0), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Equalize), 0.0, None, None), (init(pil.Equalize), 0.8, None, None)],
[(init(pil.Invert), 0.6, None, None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.6, stom(0,0.9,4), 1), (init(pil.Contrast), 1.0, stom(0,0.9,8),1)],
[(init(pil.Rotate), 0.8, stom(0,30,8), 0), (init(pil.ColorBalance), 1.0, stom(0,0.9,2),1)],
[(init(pil.ColorBalance), 0.8, stom(0,0.9,8), 1), (init(pil.Solarize), 0.8, stom(256,0,7),None)],
[(init(pil.Sharpness), 0.4, stom(0,0.9,7), 1), (init(pil.Invert), 0.6, None, None)],
[(init(pil.ShearX), 0.6, stom(0,0.9,5), 1), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.4, stom(0,0.9,0), 1), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Solarize), 0.2, stom(256,0,4),None)],
[(init(pil.Solarize), 0.6, stom(256,0,5), None), (init(pil.AutoContrast), 0.6, None, None)],
[(init(pil.Invert), 0.6, None, None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.6, stom(0,0.9,4), 1), (init(pil.Contrast), 1.0, stom(0,0.9,8),1)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Equalize), 0.6, None, None)],
]
aug = compositions.AutoAugment(subpolicy_list)
super(ImageNetAutoAugment, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, rgb_to_bgr=rgb_to_bgr)
class ImageNetAugmix(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, width=3, depth=3, random_depth=True, prob_coeff=1.0,
severity=3, rgb_to_bgr=True):
self.aug_string = aug_string
self.width = width
self.depth = depth
self.prob_coeff = prob_coeff
self.random_depth = random_depth
self.severity = severity
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.Augmix(
augmentation_list=augs,
width=width,
max_depth=depth,
random_depth=random_depth,
prob_coeff=prob_coeff
)
super(ImageNetAugmix, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, rgb_to_bgr=rgb_to_bgr)
class Cifar10AugmixJSD(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=True,
augmix_width=3, augmix_depth=3, augmix_random_depth=True,
augmix_prob_coeff=1.0, augmix_severity=3,
jsd_num=3):
self.jsd_num = jsd_num
self.split = split
self.train = True if split=='train' else False
train_transform = [tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomCrop(im_size, padding=4)]\
if (self.train and train_aug) else []
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
self.posttransform = tv.transforms.Compose([NumpyToTensor(), tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)])
aug_list = [
pil.ShearX(augmix_severity, im_size=im_size),
pil.ShearY(augmix_severity, im_size=im_size),
pil.TranslateX(augmix_severity, im_size=im_size),
pil.TranslateY(augmix_severity, im_size=im_size),
pil.Rotate(augmix_severity, im_size=im_size),
pil.Equalize(augmix_severity, im_size=im_size),
pil.AutoContrast(augmix_severity, im_size=im_size),
pil.Solarize(augmix_severity, im_size=im_size),
pil.Posterize(augmix_severity, im_size=im_size)
]
self.aug = compositions.Augmix(
augmentation_list=aug_list,
width=augmix_width,
max_depth=augmix_depth,
random_depth=augmix_random_depth,
prob_coeff=augmix_prob_coeff
)
self.dataset = tv.datasets.CIFAR10(data_path, self.train, transform=None, download=False)
def __getitem__(self, index):
im, label = self.dataset[index]
im = self.pretransform(im)
im_one = self.posttransform(im)
ims = [self.posttransform(self.aug(im)) for i in range(self.jsd_num-1)]
c, h, w = im_one.size()
out = torch.stack([im_one] + ims, dim=0).view(c * self.jsd_num, h, w)
return out, label
def __len__(self):
return len(self.dataset)
class ImageNetAugmixJSD(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, RGB_to_BGR=True, mixture_width=3, mixture_depth=-1, aug_severity=1, aug_prob_coeff=1, jsd_num=3):
self.split = split
self.train = True if split=='train' else False
self.im_size = im_size
self.RGB_to_BGR = RGB_to_BGR
self.train_transform = tv.transforms.Compose(
[tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip()])
self.test_transform = tv.transforms.Compose(
[tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)])
self.preprocess = tv.transforms.Compose(
[tv.transforms.ToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)])
data_path = os.path.join(data_path, split)
self.transform = self.train_transform if self.train else self.test_transform
self.dataset = tv.datasets.ImageFolder(data_path, None)
self.width = mixture_width
self.depth = mixture_depth
self.severity = aug_severity
self.prob_coeff = aug_prob_coeff
self.im_size = im_size
self.num = jsd_num
self.augmentations = [
self.rotate,
self.shear_x,
self.shear_y,
self.translate_x,
self.translate_y,
self.autocontrast,
self.posterize,
self.equalize,
self.solarize,
]
def _prepare_im(self, im):
im = self.preprocess(im)
if self.RGB_to_BGR:
im = im[[2,1,0],:,:]
return im
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
im, label = self.dataset[index]
im = self.transform(im)
ims = [self._prepare_im(im)] + [self.augment(im) for i in range(1,self.num)]
im = np.concatenate(ims, axis=0)
return im, label
def augment(self, im):
ws = np.float32(
np.random.dirichlet([self.prob_coeff] * self.width))
m = np.float32(np.random.beta(self.prob_coeff, self.prob_coeff))
mix = torch.zeros_like(self._prepare_im(im))
for i in range(self.width):
image_aug = im.copy()
depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(self.augmentations)
image_aug = op(image_aug, self.severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * self._prepare_im(image_aug)
mixed = (1 - m) * self._prepare_im(im) + m * mix
return mixed
def autocontrast(self, pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(self, pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(self, pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(self, pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(self, pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(self, pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(self, pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(self, pil_img, level):
level = int_parameter(sample_level(level), self.im_size / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(self, pil_img, level):
level = int_parameter(sample_level(level), self.im_size / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
class ImageNetSIN(torch.utils.data.Dataset):
def __init__(self, in_data_path, sin_data_path, split, im_size, train_aug=None, rgb_to_bgr=True):
assert split in ['train', 'val'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
in_data_path = os.path.join(in_data_path, split)
sin_data_path = os.path.join(sin_data_path, split)
if self.train_aug:
train_transform = [
tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip(),
]
else:
train_transform = [
tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)
]
def RGB_to_BGR(image):
return image[[2,1,0],:,:]
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)] +
([RGB_to_BGR] if rgb_to_bgr else []) #PyCls imagenet models are trained in BGR input order
)
self.transform = tv.transforms.Compose([
self.pretransform,
self.posttransform
])
self.dataset = torch.utils.data.ConcatDataset([
tv.datasets.ImageFolder(in_data_path, self.transform),
tv.datasets.ImageFolder(sin_data_path, self.transform)
])
def __getitem__(self, idx):
return self.dataset[idx]
def __len__(self):
return len(self.dataset)
|
augmentation-corruption-fbr_main
|
experiments/overlap/datasets.py
|
augmentation-corruption-fbr_main
|
experiments/overlap/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
import os
import time
import datetime
import torch.nn as nn
import torch.nn.functional as F
log = logging.getLogger(__name__)
def eta_str(eta_td):
"""Converts an eta timedelta to a fixed-width string format."""
days = eta_td.days
hrs, rem = divmod(eta_td.seconds, 3600)
mins, secs = divmod(rem, 60)
return '{0:02},{1:02}:{2:02}:{3:02}'.format(days, hrs, mins, secs)
def train_net(model, optimizer, train_dataset,
batch_size,
max_epoch,
loader_params,
lr_policy,
checkpoint_folder='checkpoints',
name=None,
save_period=1,
weights=None,
num_gpus=1,
is_leader=True,
jsd_num=3,
jsd_alpha=12.0):
chpk_pre = 'model_epoch_'
if name is not None:
chpk_pre = name + "_" + chpk_pre
chpk_post = '.pyth'
if os.path.exists(checkpoint_folder):
checkpoints = [c for c in os.listdir(checkpoint_folder) if chpk_post in c and chpk_pre == "_".join(c.split("_")[:-1]) +"_"]
else:
checkpoints = []
if weights:
checkpoint = torch.load(weights, map_location='cpu')
log.info("Pretrained weights provided. Loading model from {} and skipping training.".format(weights))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
return model
elif checkpoints:
last_checkpoint_name = os.path.join(checkpoint_folder, sorted(checkpoints)[-1])
checkpoint = torch.load(last_checkpoint_name, map_location='cpu')
log.info("Loading model from {}".format(last_checkpoint_name))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
start_epoch = checkpoint['epoch'] + 1
else:
start_epoch = 1
if train_dataset is None:
return model
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=True
)
for i in range(start_epoch, max_epoch+1):
log.info("Starting epoch {}/{}".format(i, max_epoch))
time_start = time.time()
if sampler:
sampler.set_epoch(i)
train_epoch(model, optimizer, loader, lr_policy, i, num_gpus, jsd_num=jsd_num, jsd_alpha=jsd_alpha)
time_stop = time.time()
seconds_taken = (time_stop - time_start)
eta_td = datetime.timedelta(seconds=int(seconds_taken*(max_epoch-i)))
log.info("Seconds taken: {:.2f}, Time remaining: {}".format(seconds_taken, eta_str(eta_td)))
if (i % save_period == 0 or i == max_epoch) and is_leader:
if num_gpus > 1:
m = model.module
else:
m = model
checkpoint = {
'epoch' : i,
'model_state' : m.state_dict(),
'optimizer_state' : optimizer.state_dict()
}
checkpoint_file = "{:s}{:04d}{:s}".format(chpk_pre, i, chpk_post)
if not os.path.exists(checkpoint_folder):
os.mkdir(checkpoint_folder)
checkpoint_file = os.path.join(checkpoint_folder, checkpoint_file)
log.info("Saving model to {}".format(checkpoint_file))
torch.save(checkpoint, checkpoint_file)
class JSDLoss(nn.Module):
def __init__(self, alpha=12.0, num=3):
super(JSDLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss()
self.alpha = alpha
self.num = num
def forward(self, preds, labels):
if not self.training:
return self.cross_entropy(preds, labels)
bs, nc = preds.size()
preds = preds.view(bs//self.num, self.num, nc)
clean_loss = self.cross_entropy(preds[:,0,:],labels)
p_preds = F.softmax(preds, dim=2)
p_mixture = torch.clamp(torch.mean(p_preds, dim=1, keepdim=True), 1e-7, 1).log()
p_mixture = p_mixture.repeat(1,self.num,1)
jsd_loss = F.kl_div(p_mixture, p_preds, reduction='batchmean') / self.num
return clean_loss + self.alpha * jsd_loss
def train_epoch(model, optimizer, loader, lr_policy, epoch, num_gpus=1, jsd_num=3, jsd_alpha=12.0):
lr = lr_policy(epoch-1)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model.train()
loss_fun = JSDLoss(alpha=jsd_alpha,num=jsd_num).cuda()
loss_fun.train()
avg_loss = 0.0
num_correct = 0
num_total = 0
num_batches = 0
for cur_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
bs, c, h, w = inputs.size()
inputs = inputs.view(bs*jsd_num, c//jsd_num, h, w) # Unpack jsd images
preds = model(inputs)
loss = loss_fun(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
preds = preds.view(bs, jsd_num, -1)
preds = preds[:,0,:]
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
torch.distributed.all_reduce(loss)
avg_loss += loss.item()
num_correct += correct.item()
num_total += labels.size(0) * num_gpus
num_batches += num_gpus
avg_loss /= num_batches
err = 100 * (1 - num_correct / num_total)
log.info("Avg loss: {:.3f}, Avg err: {:.3f}".format(avg_loss, err))
|
augmentation-corruption-fbr_main
|
experiments/overlap/train_net_jsd.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from hydra.utils import instantiate
from .train_net import train_net
class Network(object):
def __init__(self, model_cfg, optim_cfg, dataset_cfg, data_loader, num_gpus=1, is_leader=True):
cur_device = torch.cuda.current_device()
self.model = instantiate(model_cfg).cuda(device=cur_device)
if num_gpus > 1:
self.model = torch.nn.parallel.DistributedDataParallel(
module=self.model,
device_ids=[cur_device],
output_device=cur_device
)
self.optimizer = instantiate(optim_cfg, self.model.parameters())
if optim_cfg.max_epoch > 0:
self.dataset = instantiate(dataset_cfg)
else:
self.dataset = None
self.batch_size = dataset_cfg.batch_size
self.max_epoch = optim_cfg.max_epoch
self.loader_params = data_loader
self.lr_policy = instantiate(optim_cfg.lr_policy)
self.save_period = dataset_cfg.checkpoint_period
self.weights = dataset_cfg.weights
self.num_gpus = num_gpus
self.is_leader = is_leader
def train(self):
train_net(self.model,
self.optimizer,
self.dataset,
self.batch_size,
self.max_epoch,
self.loader_params,
self.lr_policy,
save_period=self.save_period,
name='ft',
weights=self.weights,
num_gpus=self.num_gpus,
is_leader=self.is_leader
)
self.model.eval()
def extract(self, x):
preds = self.model(x)
if self.num_gpus > 1:
return self.model.module.features
else:
return self.model.features
|
augmentation-corruption-fbr_main
|
experiments/overlap/feature_extractor.py
|
# This source code is adapted from code licensed under the MIT license
# found in third_party/wideresnet_license from the root directory of
# this source tree.
"""WideResNet implementation (https://arxiv.org/abs/1605.07146)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
"""Basic ResNet block."""
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = drop_rate
self.is_in_equal_out = (in_planes == out_planes)
self.conv_shortcut = (not self.is_in_equal_out) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.is_in_equal_out:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.is_in_equal_out:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.drop_rate > 0:
out = F.dropout(out, p=self.drop_rate, training=self.training)
out = self.conv2(out)
if not self.is_in_equal_out:
return torch.add(self.conv_shortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
"""Layer container for blocks."""
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
drop_rate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, drop_rate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
drop_rate):
layers = []
for i in range(nb_layers):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, drop_rate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
"""WideResNet class."""
def __init__(self, depth, num_classes, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
self.depth = depth
self.widen_factor = widen_factor
n_channels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(
3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1,
drop_rate)
# 2nd block
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2,
drop_rate)
# 3rd block
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2,
drop_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(n_channels[3], num_classes)
self.n_channels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.n_channels)
self.features = out #Expose penultimate layer for access as features
return self.fc(out)
# Stage depths for ImageNet models
_IN_STAGE_DS = {
18: (2, 2, 2, 2),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
|
augmentation-corruption-fbr_main
|
experiments/overlap/wideresnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
import os
import time
import datetime
log = logging.getLogger(__name__)
def eta_str(eta_td):
"""Converts an eta timedelta to a fixed-width string format."""
days = eta_td.days
hrs, rem = divmod(eta_td.seconds, 3600)
mins, secs = divmod(rem, 60)
return '{0:02},{1:02}:{2:02}:{3:02}'.format(days, hrs, mins, secs)
def train_net(model, optimizer, train_dataset,
batch_size,
max_epoch,
loader_params,
lr_policy,
checkpoint_folder='checkpoints',
name=None,
save_period=1,
weights=None,
num_gpus=1,
is_leader=True):
chpk_pre = 'model_epoch_'
if name is not None:
chpk_pre = name + "_" + chpk_pre
chpk_post = '.pyth'
if os.path.exists(checkpoint_folder):
checkpoints = [c for c in os.listdir(checkpoint_folder) if chpk_post in c and chpk_pre == "_".join(c.split("_")[:-1]) +"_"]
else:
checkpoints = []
if weights:
checkpoint = torch.load(weights, map_location='cpu')
log.info("Pretrained weights provided. Loading model from {} and skipping training.".format(weights))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
return model
elif checkpoints:
last_checkpoint_name = os.path.join(checkpoint_folder, sorted(checkpoints)[-1])
checkpoint = torch.load(last_checkpoint_name, map_location='cpu')
log.info("Loading model from {}".format(last_checkpoint_name))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
start_epoch = checkpoint['epoch'] + 1
else:
start_epoch = 1
if train_dataset is None:
return model
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=True
)
for i in range(start_epoch, max_epoch+1):
log.info("Starting epoch {}/{}".format(i, max_epoch))
time_start = time.time()
if sampler:
sampler.set_epoch(i)
train_epoch(model, optimizer, loader, lr_policy, i, num_gpus)
time_stop = time.time()
seconds_taken = (time_stop - time_start)
eta_td = datetime.timedelta(seconds=int(seconds_taken*(max_epoch-i)))
log.info("Seconds taken: {:.2f}, Time remaining: {}".format(seconds_taken, eta_str(eta_td)))
if (i % save_period == 0 or i == max_epoch) and is_leader:
if num_gpus > 1:
m = model.module
else:
m = model
checkpoint = {
'epoch' : i,
'model_state' : m.state_dict(),
'optimizer_state' : optimizer.state_dict()
}
checkpoint_file = "{:s}{:04d}{:s}".format(chpk_pre, i, chpk_post)
if not os.path.exists(checkpoint_folder):
os.mkdir(checkpoint_folder)
checkpoint_file = os.path.join(checkpoint_folder, checkpoint_file)
log.info("Saving model to {}".format(checkpoint_file))
torch.save(checkpoint, checkpoint_file)
def train_epoch(model, optimizer, loader, lr_policy, epoch, num_gpus=1):
lr = lr_policy(epoch-1)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model.train()
loss_fun = torch.nn.CrossEntropyLoss().cuda()
avg_loss = 0.0
num_correct = 0
num_total = 0
num_batches = 0
for cur_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
loss = loss_fun(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
torch.distributed.all_reduce(loss)
avg_loss += loss.item()
num_correct += correct.item()
num_total += labels.size(0) * num_gpus
num_batches += num_gpus
avg_loss /= num_batches
err = 100 * (1 - num_correct / num_total)
log.info("Avg loss: {:.3f}, Avg err: {:.3f}".format(avg_loss, err))
|
augmentation-corruption-fbr_main
|
experiments/overlap/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import simplejson
import decimal
import logging
log = logging.getLogger(__name__)
_TAG = 'json_stats: '
def log_json_stats(stats):
"""Logs json stats."""
# Decimal + string workaround for having fixed len float vals in logs
stats = {
k: decimal.Decimal('{:.6f}'.format(v)) if isinstance(v, float) else v
for k, v in stats.items()
}
json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True)
log.info('{:s}{:s}'.format(_TAG, json_stats))
def load_json_stats(log_file):
"""Loads json_stats from a single log file."""
with open(log_file, 'r') as f:
lines = f.readlines()
json_lines = [l[l.find(_TAG) + len(_TAG):] for l in lines if _TAG in l]
json_stats = [simplejson.loads(l) for l in json_lines]
return json_stats
def parse_json_stats(log, row_type, key):
"""Extract values corresponding to row_type/key out of log."""
vals = [row[key] for row in log if row['_type'] == row_type and key in row]
if key == 'iter' or key == 'epoch':
vals = [int(val.split('/')[0]) for val in vals]
return vals
|
augmentation-corruption-fbr_main
|
experiments/overlap/utils/logging.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
class Cosine(object):
def __init__(self, base_lr, max_epoch):
self.base_lr = base_lr
self.max_epoch = max_epoch
def __call__(self, cur_epoch):
return 0.5 * self.base_lr * (1.0 + np.cos(np.pi * cur_epoch / self.max_epoch))
class Steps(object):
def __init__(self, base_lr, lr_mult, steps):
self.base_lr = base_lr
self.lr_mult = lr_mult
self.steps = steps
def __call__(self, cur_epoch):
ind = [i for i, s in enumerate(self.steps) if cur_epoch >= s][-1]
return self.base_lr * self.lr_mult ** ind
|
augmentation-corruption-fbr_main
|
experiments/overlap/utils/lr_policy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from scipy.ndimage import gaussian_filter
from .utils.severity import float_parameter, int_parameter, sample_level
from .utils.image import bilinear_interpolation, smoothstep
import numpy as np
class CausticRefraction(Augmentation):
tags = ['distortion']
name = 'caustic_refraction'
def sample_parameters(self):
time = np.random.uniform(low=0.5, high=2.0)
size = np.random.uniform(low=0.75, high=1.25) * self.im_size
#size = self.im_size
eta = 4.0
lens_scale = float_parameter(sample_level(self.severity, self.max_intensity), 0.5*self.im_size)
lighting_amount = float_parameter(sample_level(self.severity, self.max_intensity), 2.0)
softening = 1
return { 'time' : time, 'size' : size, 'eta' : eta, 'lens_scale' : lens_scale, 'lighting_amount': lighting_amount, 'softening' : softening}
def transform(self, image, time, size, eta, lens_scale, lighting_amount, softening):
def caustic_noise_kernel(point, time, size):
point = point / size
p = (point % 1) * 6.28318530718 - 250
i = p.copy()
c = 1.0
inten = 0.005
for n in range(5):
t = time * (1.0 - (3.5 / (n+1)))
i = p + np.array([np.cos(t-i[0])+np.sin(t+i[1]),np.sin(t-i[1])+np.cos(t+i[0])])
length = np.sqrt((p[0] / (np.sin(i[0]+t)/inten))**2 + (p[1] / (np.cos(i[1]+t)/inten))**2)
c += 1.0/length
c /= 5.0
c = 1.17 - c ** 1.4
color = np.clip(np.abs(c) ** 8.0, 0, 1)
return np.array([color, color, color])
def refract(incident, normal, eta):
if np.abs(np.dot(incident, normal)) >= 1.0 - 1e-3:
return incident
angle = np.arccos(np.dot(incident, normal))
out_angle = np.arcsin(np.sin(angle) / eta)
out_unrotated = np.array([np.cos(out_angle), np.sin(out_angle), 0.0])
spectator_dim = np.cross(incident, normal)
spectator_dim /= np.linalg.norm(spectator_dim)
orthogonal_dim = np.cross(normal, spectator_dim)
rotation_matrix = np.stack((normal, orthogonal_dim, spectator_dim), axis=0)
return np.matmul(np.linalg.inv(rotation_matrix), out_unrotated)
def luma_at_offset(image, origin, offset):
pixel_value = image[origin[0]+offset[0], origin[1]+offset[1], :]\
if origin[0]+offset[0] >= 0 and origin[0]+offset[0] < image.shape[0]\
and origin[1]+offset[1] >= 0 and origin[1]+offset[1] < image.shape[1]\
else np.array([0.0,0.0,0])
return np.dot(pixel_value, np.array([0.2126, 0.7152, 0.0722]))
def luma_based_refract(point, image, caustics, eta, lens_scale, lighting_amount):
north_luma = luma_at_offset(caustics, point, np.array([0,-1]))
south_luma = luma_at_offset(caustics, point, np.array([0, 1]))
west_luma = luma_at_offset(caustics, point, np.array([-1, 0]))
east_luma = luma_at_offset(caustics, point, np.array([1,0]))
lens_normal = np.array([east_luma - west_luma, south_luma - north_luma, 1.0])
lens_normal = lens_normal / np.linalg.norm(lens_normal)
refract_vector = refract(np.array([0.0, 0.0, 1.0]), lens_normal, eta) * lens_scale
refract_vector = np.round(refract_vector, 3)
#print(refract_vector)
out_pixel = bilinear_interpolation(image, point+refract_vector[0:2])
out_pixel += (north_luma - south_luma) * lighting_amount
out_pixel += (east_luma - west_luma) * lighting_amount
return np.clip(out_pixel, 0, 1)
noise = np.array([[caustic_noise_kernel(np.array([y,x]), time, size)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = gaussian_filter(noise, sigma=softening)
image = image.astype(np.float32) / 255
out = np.array([[luma_based_refract(np.array([y,x]), image, noise, eta, lens_scale, lighting_amount)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip((out * 255).astype(np.uint8), 0, 255)
class PinchAndTwirl(Augmentation):
tags = ['distortion']
name = 'pinch_and_twirl'
def sample_parameters(self):
centers = [np.random.randint(low=0, high=self.im_size, size=2) for i in range(5)]
radius = self.im_size // 4
#amounts = np.random.uniform(low=0.2, high=1.0, size=5)
#angles = np.random.uniform(low=-np.pi, high=np.pi, size=5)
angles = [float_parameter(sample_level(self.severity, self.max_intensity), np.pi/4)-float_parameter(sample_level(self.severity, True), np.pi/8)\
for i in range(5)]
amounts = [float_parameter(sample_level(self.severity, self.max_intensity), 0.4) + 0.1\
for i in range(5)]
return {'centers' : centers, 'radius' : radius, 'amounts' : amounts, 'angles' : angles}
def transform(self, image, centers, radius, amounts, angles):
def warp_kernel(point, center, radius, amount, angle):
dx = point[0] - center[0]
dy = point[1] - center[1]
dist = np.linalg.norm(point - center)
if dist > radius or np.round(dist, 3) == 0.0:
return point
d = dist / radius
t = np.sin(np.pi * 0.5 * d) ** (- amount)
dx *= t
dy *= t
e = 1 - d
a = angle * (e ** 2)
out = center + np.array([dx*np.cos(a) - dy*np.sin(a), dx*np.sin(a) + dy*np.cos(a)])
return out
image = image.astype(np.float32)
for center, angle, amount in zip(centers, angles, amounts):
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), center, radius, amount, angle))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class PinchAndTwirlV2(Augmentation):
tags = ['distortion']
name = 'pinch_and_twirl_v2'
def sample_parameters(self):
num_per_axis = 5 if self.im_size==224 else 3
#angles = np.array([float_parameter(sample_level(self.severity, self.max_intensity), np.pi)-float_parameter(sample_level(self.severity, True), np.pi/2)\
# for i in range(num_per_axis ** 2)]).reshape(num_per_axis, num_per_axis)
#if self.im_size == 224:
angles = np.array([np.random.choice([1,-1]) * float_parameter(sample_level(self.severity, self.max_intensity), np.pi/2) for i in range(num_per_axis ** 2)]).reshape(num_per_axis, num_per_axis)
#else:
# angles = np.array([np.random.choice([1,-1]) * (float_parameter(sample_level(self.severity, self.max_intensity), np.pi/4)+np.pi/4) for i in range(num_per_axis ** 2)]).reshape(num_per_axis, num_per_axis)
amount = float_parameter(sample_level(self.severity, self.max_intensity), 0.4) + 0.1
return {'num_per_axis' : num_per_axis, 'angles' : angles, 'amount' : amount}
def transform(self, image, num_per_axis, angles, amount):
def warp_kernel(point, center, radius, amount, angle):
dx = point[0] - center[0]
dy = point[1] - center[1]
dist = np.linalg.norm(point - center)
if dist > radius or np.round(dist, 3) == 0.0:
return point
d = dist / radius
t = np.sin(np.pi * 0.5 * d) ** (- amount)
dx *= t
dy *= t
e = 1 - d
a = angle * (e ** 2)
out = center + np.array([dx*np.cos(a) - dy*np.sin(a), dx*np.sin(a) + dy*np.cos(a)])
return out
out = image.copy().astype(np.float32)
grid_size = self.im_size // num_per_axis
radius = grid_size / 2
for i in range(num_per_axis):
for j in range(num_per_axis):
l, r = i * grid_size, (i+1) * grid_size
u, d = j * grid_size, (j+1) * grid_size
center = np.array([u+radius, l+radius])
out[u:d,l:r,:] = np.array([[bilinear_interpolation(out, warp_kernel(np.array([y,x]), center, radius, amount, angles[i,j]))\
for x in np.arange(l,r)] for y in np.arange(u,d)])
return np.clip(out, 0, 255).astype(np.uint8)
class FishEye(Augmentation):
tags = ['distortion']
name = 'fish_eye'
def sample_parameters(self):
centers = [np.random.randint(low=0, high=self.im_size, size=2) for i in range(5)]
etas = [float_parameter(sample_level(self.severity, self.max_intensity), 1.0)+1.0\
for i in range(5)]
radii = [np.random.uniform(low=0.1, high=0.3) * self.im_size for i in range(5)]
return {'centers' : centers, 'radii' : radii, 'etas': etas}
def transform(self, image, centers, radii, etas):
def warp_kernel(point, center, a, b, eta):
dx = point[0] - center[0]
dy = point[1] - center[1]
x2 = dx**2
y2 = dy**2
a2 = a**2
b2 = b**2
if (y2 >= (b2 - b2*x2/a2)):
return point
r = 1.0 / eta
z = np.sqrt((1.0 - x2/a2 - y2/b2) * (a*b))
z2 = z**2
x_angle = np.arccos(dx / np.sqrt(x2+z2))
angle_1 = np.pi/2 - x_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - x_angle - angle_2
out_x = point[0] - np.tan(angle_2)*z
#print(np.tan(angle_2)*z)
y_angle = np.arccos(dy / np.sqrt(y2+z2))
angle_1 = np.pi/2 - y_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - y_angle - angle_2
out_y = point[1] - np.tan(angle_2)*z
return np.array([out_x, out_y])
for center, radius, eta in zip(centers, radii, etas):
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), center, radius, radius, eta))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class FishEyeV2(Augmentation):
tags = ['distortion']
name = 'fish_eye_v2'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
#density = float_parameter(sample_level(self.severity, self.max_intensity), 0.01)
density = 0.01 * 224**2 / (self.im_size**2)
#eta = 2
eta = float_parameter(sample_level(self.severity, self.max_intensity), 2.0) + 1.0
radius = max(0.05 * self.im_size, 3)
return {'seed' : seed, 'density' : density, 'eta': eta, 'radius' : radius}
def transform(self, image, density, eta, radius, seed):
def warp_kernel(point, center, a, b, eta):
dx = point[0] - center[0]
dy = point[1] - center[1]
x2 = dx**2
y2 = dy**2
a2 = a**2
b2 = b**2
if (y2 >= (b2 - b2*x2/a2)):
return point
r = 1.0 / eta
z = np.sqrt((1.0 - x2/a2 - y2/b2) * (a*b))
z2 = z**2
x_angle = np.arccos(dx / np.sqrt(x2+z2))
angle_1 = np.pi/2 - x_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - x_angle - angle_2
out_x = point[0] - np.tan(angle_2)*z
#print(np.tan(angle_2)*z)
y_angle = np.arccos(dy / np.sqrt(y2+z2))
angle_1 = np.pi/2 - y_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - y_angle - angle_2
out_y = point[1] - np.tan(angle_2)*z
return np.array([out_x, out_y])
random_state = np.random.RandomState(seed=seed)
num = int(density * self.im_size**2)
out = image.copy().astype(np.float32)
for i in range(num):
center = random_state.uniform(low=0, high=self.im_size, size=2)
l = max(np.floor(center[1]-radius).astype(np.int), 0)
r = min(np.ceil(center[1]+radius).astype(np.int), self.im_size)
u = max(np.floor(center[0]-radius).astype(np.int), 0)
d = min(np.ceil(center[0]+radius).astype(np.int), self.im_size)
out[u:d,l:r,:] = np.array([[bilinear_interpolation(out, warp_kernel(np.array([y,x]), center, radius, radius, eta)) for x in np.arange(l,r)] for y in np.arange(u,d)])
return np.clip(out, 0, 255).astype(np.uint8)
class WaterDrop(Augmentation):
tags = ['distortion']
name = 'water_drop'
def sample_parameters(self):
center = np.array([self.im_size //2, self.im_size//2])
center = np.random.uniform(low=0.25, high=0.75, size=2) * self.im_size
radius = self.im_size//2
amplitude = float_parameter(sample_level(self.severity, self.max_intensity), 0.25)
wavelength = np.random.uniform(low=0.05, high=0.2) * self.im_size
phase = np.random.uniform(low=0.0, high=2*np.pi)
return {'center': center, 'radius' : radius, 'amplitude' : amplitude, 'wavelength' : wavelength, 'phase': phase}
def transform(self, image, center, radius, amplitude, wavelength, phase):
def warp_kernel(point, center, radius, amplitude, wavelength, phase):
dx, dy = point - center
dist = np.linalg.norm(point-center)
if dist > radius:
return point
amount = amplitude * np.sin(dist / wavelength * np.pi * 2 - phase)
if dist != 0.0:
amount *= wavelength / dist
return point + amount * (point - center)
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), center, radius, amplitude, wavelength, phase))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class Ripple(Augmentation):
tags = ['distortion']
name = 'ripple'
def sample_parameters(self):
amplitudes = np.array([float_parameter(sample_level(self.severity, self.max_intensity), 0.025)\
for i in range(2)]) * self.im_size
wavelengths = np.random.uniform(low=0.1, high=0.3, size=2) * self.im_size
phases = np.random.uniform(low=0, high=2*np.pi, size=2)
return {'amplitudes' : amplitudes, 'wavelengths' : wavelengths, 'phases' : phases}
def transform(self, image, wavelengths, phases, amplitudes):
def warp_kernel(point, wavelengths, phases, amplitudes):
return point + amplitudes * np.sin(2 * np.pi * point / wavelengths + phases)
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), wavelengths, phases, amplitudes))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class ColorHalfTone(Augmentation):
tags = ['distortion']
name = 'color_half_tone'
def sample_parameters(self):
#angles = np.array([108, 162, 90]) * np.pi/180
angles = np.random.uniform(low=0, high=2*np.pi, size=3)
dot_area = float_parameter(sample_level(self.severity, self.max_intensity), 9*np.pi)
dot_radius = np.sqrt(dot_area/np.pi)
return {'angles' : angles, 'dot_radius' : dot_radius}
def transform(self, image, angles, dot_radius):
grid_size = 2 * dot_radius * 1.414
mx = [0, -1, 1, 0, 0]
my = [0, 0, 0, -1, 1]
out = np.zeros_like(image)
for y in range(self.im_size):
for c in range(3):
angle = angles[c]
cos = np.cos(angle)
sin = np.sin(angle)
for x in range(self.im_size):
tx = cos * x + sin * y
ty = - sin * x + cos * y
tx = tx - (tx - grid_size/2) % grid_size + grid_size/2
ty = ty - (ty - grid_size/2) % grid_size + grid_size/2
f = 1
for i in range(5):
ttx = tx + mx[i]*grid_size
tty = ty + my[i]*grid_size
ntx = cos * ttx - sin * tty
nty = sin * ttx + cos * tty
nx = np.clip(int(ntx), 0, self.im_size-1)
ny = np.clip(int(nty), 0, self.im_size-1)
l = image[nx, ny, c] / 255
l = 1 - l**2
l *= grid_size/2 * 1.414
dx = x-ntx
dy = y-nty
r = np.linalg.norm(np.array([dx,dy]))
f2 = 1-smoothstep(r, r+1, l)
f = min(f, f2)
out[x, y, c] = f
return np.clip(255 * out, 0, 255).astype(np.uint8)
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/distortion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from .utils.image import bilinear_interpolation
from .utils.severity import float_parameter, int_parameter, sample_level
from scipy.ndimage import shift, zoom, grey_erosion, grey_dilation
import numpy as np
from PIL import Image
from scipy.ndimage import rotate
class Scatter(Augmentation):
tags = ['blur', 'filterpedia', 'scatter']
name = 'scatter'
def sample_parameters(self):
seed = np.random.uniform(low=0.0, high=10.0)
radius = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size/10)
return {'seed' : seed, 'radius' : radius}
def transform(self, image, seed, radius):
def noise(x, y, seed):
i, j = np.sin(x * seed), np.cos(y * seed)
return (np.sin(12.9898*i + 78.233*j) * 43758.5453) % 1
def warp_kernel(x, y, seed, radius):
x_offset = radius * (-1.0 + noise(x, y, seed) * 2)
y_offset = radius * (-1.0 + noise(y, x, seed) * 2)
x_new = min(max(0, x+x_offset), self.im_size-1)
y_new = min(max(0, y+y_offset), self.im_size-1)
return y_new, x_new
out = np.array([[bilinear_interpolation(image, warp_kernel(x, y, seed, radius))\
for x in range(self.im_size)] for y in range(self.im_size)])
return out.astype(np.uint8)
class ChromaticAbberation(Augmentation):
tags = ['blur', 'color', 'chromatic_abberation']
name = 'chromatic_abberation'
def sample_parameters(self):
# shifts = np.array([int_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 6)\
# for i in range(6)]).reshape(3,2)
angles = np.random.uniform(low=0, high=2*np.pi, size=3)
dists = np.array([float_parameter(sample_level(self.severity, self.max_intensity), self.im_size / 10)\
for i in range(3)])
shifts = np.array([[np.cos(a)*d, np.sin(a)*d] for a, d in zip(angles, dists)])
# flip = np.random.choice([-1,1], size=(3,2))
# shifts = shifts * flip
return { 'rgb_shifts' : shifts}
def transform(self, image, rgb_shifts):
# max_pad = np.max(np.abs(rgb_shifts))
# image_padded = np.pad(image, [(max_pad, max_pad), (max_pad, max_pad), (0,0)])
out = image.copy()
for i in range(3):
out[:,:,i] = shift(image[:,:,i], rgb_shifts[i], prefilter=False)
#h, w, _ = image.shape
#for i in range(3):
# out[:,:,i] = image_padded[max_pad+rgb_shifts[i,0]:max_pad+h+rgb_shifts[i,0],max_pad+rgb_shifts[i,1]:max_pad+w+rgb_shifts[i,1],i]
return out
def convert_to_numpy(self, params):
return params['rgb_shifts'].flatten()
def convert_from_numpy(self, numpy_record):
return {'rgb_shifts' : numpy_record.reshape(3,2).astype(np.int)}
class TransverseChromaticAbberation(Augmentation):
tags = ['blur', 'color', 'pil', 'transverse_chromatic_abberation']
name = 'transverse_chromatic_abberation'
def sample_parameters(self):
scales = np.array([float_parameter(sample_level(self.severity,self.max_intensity), 0.5)\
for i in range(3)])
scale = float_parameter(sample_level(self.severity, self.max_intensity), 0.5)
scales = np.array([1.0, 1.0+scale/2, 1.0+scale])
scales = scales[np.random.permutation(3)]
#zerod = np.random.randint(low=0, high=3)
#scales[zerod] = 0.0
#flip = np.random.choice([-1, 1], size=3)
#scales = flip * scales
#scales = 2 ** scales
return { 'scales' : scales }
def transform(self, image, scales):
out = image.copy()
for c in range(3):
zoomed = zoom(image[:,:,c], scales[c], prefilter=False)
edge = (zoomed.shape[0]-self.im_size)//2
out[:,:,c] = zoomed[edge:edge+self.im_size, edge:edge+self.im_size]
return out.astype(np.uint8)
'''
image = Image.fromarray(image)
channel_list = []
for i, channel in enumerate(image.getbands()):
im = image.getchannel(channel)
affine = np.array([[scales[i], 0, (1-scales[i])*self.im_size/2], [0, scales[i], (1-scales[i])*self.im_size/2]])
im = im.transform((self.im_size, self.im_size), Image.AFFINE, affine.flatten())
channel_list.append(im)
out = Image.merge("RGB", channel_list)
return np.array(out).astype(np.uint8)
'''
def convert_to_numpy(self, params):
return params['scales'].flatten()
def convert_from_numpy(self, numpy_record):
return {'scales' : numpy_record}
class HomogeneousColorBlur(Augmentation):
tags = ['blur', 'filterpedia', 'homogenous_color_blur', 'slow', 'impractically_slow']
name = 'homogeneous_color_blur'
def sample_parameters(self):
radius = int_parameter(sample_level(self.severity,self.max_intensity), self.im_size/10)
threshold = np.random.uniform(low=0.2, high=0.21)
return { 'radius' : radius, 'threshold' : threshold }
def transform(self, image, radius, threshold):
def kernel(point, image, radius, threshold):
this_val = image[point[0],point[1],:]
acc = np.zeros(3)
n = 0
for x in np.arange(-radius, radius+1):
for y in np.arange(-radius, radius+1):
x_pos = point[0]+x
y_pos = point[1]+y
if x_pos < 0 or x_pos >= self.im_size or y_pos < 0 or y_pos >= self.im_size:
continue
offset_val = image[x_pos,y_pos,:]
dist_mul = 1 if radius >= np.sqrt(x**2+y**2) else 0
color_mul = 1 if 255*threshold >= np.sqrt(np.sum((this_val-offset_val)**2)) else 0
acc += offset_val * dist_mul * color_mul
n += dist_mul * color_mul
return acc / n
out = np.array([[kernel(np.array([y,x]), image, radius, threshold)\
for x in range(self.im_size)] for y in range(self.im_size)])
return out.astype(np.uint8)
class Erosion(Augmentation):
tags = ['blur']
name = 'erosion'
def sample_parameters(self):
r2 = float_parameter(sample_level(self.severity, self.max_intensity), (5**2-1.5**2)) + 1.5**2
radius = np.sqrt(r2)
return {'radius' : radius}
def transform(self, image, radius):
mask = np.zeros((np.ceil(2*radius).astype(np.uint8), np.ceil(2*radius).astype(np.uint8)))
center = np.array([radius, radius])
for x in range(mask.shape[0]):
for y in range(mask.shape[1]):
mask[x,y] = 1 if np.linalg.norm(np.array([x,y])-center) <= radius else 0
if np.max(mask) == 0:
return image
out = image.copy()
for c in range(3):
out[:,:,c] = grey_erosion(out[:,:,c], footprint=mask)
return out
class Dilation(Augmentation):
tags = ['blur']
name = 'dilation'
def sample_parameters(self):
r2 = float_parameter(sample_level(self.severity, self.max_intensity), (5**2-1.5**2)) + 1.5**2
radius = np.sqrt(r2)
return {'radius' : radius}
def transform(self, image, radius):
mask = np.zeros((np.ceil(2*radius).astype(np.uint8), np.ceil(2*radius).astype(np.uint8)))
center = np.array([radius, radius])
for x in range(mask.shape[0]):
for y in range(mask.shape[1]):
mask[x,y] = 1 if np.linalg.norm(np.array([x,y])-center) <= radius else 0
if np.max(mask) == 0:
return image
out = image.copy()
for c in range(3):
out[:,:,c] = grey_dilation(out[:,:,c], footprint=mask)
return out
class CircularMotionBlur(Augmentation):
tags = ['blur']
name = 'circular_motion_blur'
def sample_parameters(self):
amount = float_parameter(sample_level(self.severity, self.max_intensity),15)
return {'amount' : amount}
def transform(self, image, amount):
num = 21
factors = []
rotated = []
image = image.astype(np.float32) / 255
for i in range(num):
angle = (2*i/(num-1) - 1) * amount
rotated.append(rotate(image, angle, reshape=False))
factors.append(np.exp(- 2*(2*i/(num-1)-1)**2))
out = np.zeros_like(image)
for i, f in zip(rotated, factors):
out += f * i
out /= sum(factors)
return np.clip(out*255, 0, 255).astype(np.uint8)
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/blurs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from math import floor, ceil
import numpy as np
class Gaussian(Augmentation):
name = 'pg_gaussian'
tags = ['float_return']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
sigma = np.random.uniform(low=0, high=self.severity/10)\
if not self.max_intensity else self.severity/10
return {'seed': seed, 'sigma': sigma}
def transform(self, image, seed, sigma):
random_state = np.random.RandomState(seed=seed)
noise = random_state.randn(self.im_size, self.im_size, 3)
image = image.astype(np.float32) / 255
image = np.clip(image+sigma*noise, 0, 1)
return image * 255
class PatchGaussian(Augmentation):
name = 'patch_gaussian'
tags = ['float_return', 'additional_parameters']
def __init__(self, severity, im_size, record=False, max_intensity=False, sigma=1.0, width=None):
super(PatchGaussian, self).__init__(severity, im_size, record, max_intensity)
self.sigma = sigma
self.width = width if width is not None else self.im_size
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
crop_pos = np.random.randint(low=0, high=self.im_size, size=2)
width = np.random.uniform(low=0, high=self.width)\
if not self.max_intensity else self.width
sigma = np.random.uniform(low=0, high=self.sigma)
return {'seed': seed, 'crop_pos': crop_pos, 'sigma': sigma, 'width': width}
def transform(self, image, seed, crop_pos, sigma, width):
random_state = np.random.RandomState(seed=seed)
noise = random_state.randn(self.im_size, self.im_size, 3)
noise *= sigma
mask = np.zeros((self.im_size, self.im_size))
l = int(max(0, crop_pos[0]-floor(width/2)))
r = int(min(self.im_size, crop_pos[0]+ceil(width/2)))
u = int(max(0, crop_pos[1]-floor(width/2)))
d = int(min(self.im_size, crop_pos[1]+ceil(width/2)))
mask[l:r,u:d] = 1.0
mask = mask.reshape(self.im_size, self.im_size, 1)
image = image.astype(np.float32) / 255
image = image + mask * noise
image = np.clip(image, 0, 1)
return image * 255
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/patch_gaussian.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
import numpy as np
from .utils.severity import int_parameter, float_parameter, sample_level
from .utils.image import smoothstep
from skimage.color import rgb2hsv, hsv2rgb
class BleachBypass(Augmentation):
tags = ['color', 'filterpedia', 'bleach_bypass']
name = 'bleach_bypass'
def sample_parameters(self):
amount = float_parameter(sample_level(self.severity,self.max_intensity), 1.0)
return { 'amount' : amount }
def transform(self, image, amount):
vals = np.array([0.2126, 0.7152, 0.0722]).reshape(1,1,3)
luma = np.sum(image*vals, axis=2, keepdims=True)/255
l = np.clip(10.0 * (luma - 0.45), 0, 1.0)
result1 = 2 * image * luma / 255
result2 = 1.0 - 2.0 * (1.0 - luma) * (1.0 - image /255)
out = ((1-l) * result1 + l * result2) * 255
return ((1-amount) * image + amount * out).astype(np.uint8)
class Technicolor(Augmentation):
tags = ['color', 'filterpedia', 'technicolor']
name = 'technicolor'
def sample_parameters(self):
amount = float_parameter(sample_level(self.severity,self.max_intensity), 1.0)
return { 'amount' : amount }
def transform(self, image, amount):
redmatte = 1.0 - (image[:,:,0]/255 - ((image[:,:,1]/2+image[:,:,2]/2))/255)
greenmatte = 1.0 - (image[:,:,1]/255 - ((image[:,:,0]/2+image[:,:,2]/2))/255)
bluematte = 1.0 - (image[:,:,2]/255 - ((image[:,:,0]/2+image[:,:,1]/2))/255)
red = greenmatte * bluematte * image[:,:,0].astype(np.float32)
green = redmatte * bluematte * image[:,:,1].astype(np.float32)
blue = redmatte * greenmatte * image[:,:,2].astype(np.float32)
new_color = np.stack((red, green, blue), axis=2)
return ((1-amount) * image + amount * new_color).astype(np.uint8)
class Pseudocolor(Augmentation):
tags = ['color', 'filterpedia', 'pseudocolor']
name = 'pseudocolor'
def sample_parameters(self):
smoothness = np.random.uniform(low=0.25, high=0.75)
color0 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color1 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color2 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color3 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color4 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
amount = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
return { 'smoothness' : smoothness, 'color0' : color0, 'color1': color1,
'color2': color2, 'color3' : color3, 'color4' : color4, 'amount' : amount }
def transform(self, image, color0, color1, color2, color3, color4, smoothness, amount):
color0 = color0.astype(np.uint8)
color1 = color1.astype(np.uint8)
color2 = color2.astype(np.uint8)
color3 = color3.astype(np.uint8)
color4 = color4.astype(np.uint8)
def get_color(color0, color1, edge0, edge1, luma, smoothness):
smooth_color = color0 + ((color1 - color0) * smoothstep(edge0, edge1, luma))
a = 4.0 * (luma - edge0)
linear_color = (1 - a) * color0 + a * color1
return (1 - smoothness) * linear_color + smoothness * smooth_color
vals = np.array([0.2126, 0.7152, 0.0722]).reshape(1,1,3)
luma = np.sum(image.astype(np.float32)*vals, axis=2, keepdims=True)/255
c1 = get_color(color0, color1, 0.0, 0.25, luma, smoothness)
c2 = get_color(color1, color2, 0.25, 0.50, luma, smoothness)
c3 = get_color(color2, color3, 0.5, 0.75, luma, smoothness)
c4 = get_color(color3, color4, 0.75, 1.0, luma, smoothness)
out = (luma < 0.25) * c1 + ((luma >= 0.25)&(luma < 0.5)) * c2\
+ ((luma >= 0.5)&(luma < 0.75)) * c3 + (luma >= 0.75) * c4
return np.clip((1 - amount) * image + amount * out, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
colors = []
for i in range(5):
colors.extend(params['color'+str(i)].tolist())
return np.array([params['smoothness']] + colors + [params['amount']])
def convert_from_numpy(self, numpy_record):
params = {'smoothness' : numpy_record[0], 'amount' : numpy_record[16]}
for i in range(5):
params['color'+str(i)] = numpy_record[1+3*i:1+3*(i+1)]
return params
class HueShift(Augmentation):
tags = ['color']
name = 'hue_shift'
def sample_parameters(self):
amount = float_parameter(sample_level(self.severity, self.max_intensity), 0.5)
if np.random.uniform() < 0.5:
amount *= -1
return {'amount' : amount}
def transform(self, image, amount):
hsv_image = rgb2hsv(image.astype(np.float32)/255)
hsv_image[:,:,0] += (amount % 1.0)
return np.clip(hsv2rgb(hsv_image)*255, 0, 255).astype(np.uint8)
class ColorDither(Augmentation):
tags = ['color']
name = 'color_dither'
def sample_parameters(self):
#factor = float_parameter(sample_level(self.severity, self.max_intensity),6.0)+1.0
#levels = int(256 / (2**factor))
levels = int_parameter(sample_level(self.severity, self.max_intensity),10)
levels = 14-levels
return {'levels' : levels}
def transform(self, image, levels):
index = 0
color_map = [int(255 * i / (levels -1)) for i in range(levels)]
div = [int(levels*i / 256) for i in range(256)]
out = np.zeros_like(image)
image_copy = image.copy()
m = np.array([[0,0,0],[0,0,7],[3,5,1]])
for y in range(self.im_size):
reverse = ((y % 1) == 1)
if reverse:
index = y*self.im_size + self.im_size - 1
direction = -1
else:
index = y*self.im_size
direction = 1
for x in range(self.im_size):
curr_val = image_copy[index//self.im_size, index%self.im_size,:]
new_val = np.array([color_map[div[c]] for c in curr_val])
out[index//self.im_size, index%self.im_size,:] = new_val
e = curr_val - new_val
for i in [-1,0,1]:
iy = y+i
if iy > 0 and iy < self.im_size:
for j in [-1,0,1]:
jx = x+j
if jx > 0 and jx < self.im_size:
if reverse:
w = m[(i+1),-j+1]
else:
w = m[(i+1),j+1]
if w != 0:
k = index - j if reverse else index + j
curr_val = image_copy[k//self.im_size, k%self.im_size,:].astype(np.float32)
curr_val = np.clip(curr_val + e * w/np.sum(m),0,255).astype(np.uint8)
image_copy[k//self.im_size,k%self.im_size,:] = curr_val
index += direction
return np.clip(out, 0, 255).astype(np.uint8)
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/color.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import identity
from . import base
from . import pil
from . import obscure
from . import additive_noise
from . import color
from . import compositions
from . import blurs
from . import imagenetc
from . import utils
from . import distortion
from . import standard_augmentations
from . import patch_gaussian
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from .utils.severity import float_parameter, int_parameter, sample_level
from .utils.image import smoothstep
from .utils.noise import PerlinNoiseGenerator
from scipy.fftpack import ifft2
import numpy as np
class SingleFrequencyGreyscale(Augmentation):
tags = ['additive_noise', 'single_frequency_greyscale']
name = 'single_frequency_greyscale'
def sample_parameters(self):
freq_mag = np.random.uniform(low=-np.pi, high=np.pi)
freq_2 = np.random.uniform(low=-abs(freq_mag), high=abs(freq_mag))
freq = np.array([freq_mag, freq_2])[np.random.permutation(2)]
phase = np.random.uniform(low=0, high=2*np.pi)
intensity = float_parameter(sample_level(self.severity,self.max_intensity), 196)
return { 'freq' : freq, 'phase' : phase, 'intensity' : intensity}
def transform(self, image, freq, phase, intensity):
noise = np.array([[np.sin(x * freq[0] + y * freq[1] + phase)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((noise, noise, noise), axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['freq'].tolist() + [params['phase'], params['intensity']])
def convert_from_numpy(self, numpy_record):
return {'freq' : numpy_record[0:2],
'phase' : numpy_record[2],
'intensity' : numpy_record[3]
}
class SingleFrequencyColor(Augmentation):
tags = ['additive_noise', 'color', 'single_frequency_color']
name = 'single_frequency_color'
def sample_parameters(self):
freq = np.random.uniform(low=0, high=np.pi, size=2)
phase = np.random.uniform(low=0, high=2*np.pi)
intensity = [float_parameter(sample_level(self.severity,self.max_intensity), 196) for i in range(3)]
return { 'freq' : freq, 'phase' : phase, 'intensity' : intensity}
def transform(self, image, freq, phase, intensity):
noise = np.array([[np.sin(x * freq[0] + y * freq[1] + phase)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((intensity[0] * noise, intensity[1] * noise, intensity[2] * noise), axis=2)
return np.clip(image + noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['freq'].tolist() + [params['phase']] + params['intensity'])
def convert_from_numpy(self, numpy_record):
return {'freq' : numpy_record[0:2],
'phase' : numpy_record[2],
'intensity' : numpy_record[3:6].tolist()
}
class CocentricSineWaves(Augmentation):
tags = ['additive_noise', 'filterpedia', 'color', 'cocentric_sine_waves']
name = 'cocentric_sine_waves'
def sample_parameters(self):
offset = np.random.uniform(low=0, high=self.im_size, size=2)
freq = np.random.uniform(low=0, high=10)
amplitude = np.random.uniform(low=0, high=self.im_size/10)
ring_width = np.random.uniform(low=0, high=self.im_size/10)
intensity = [float_parameter(sample_level(self.severity,self.max_intensity), 128) for i in range(3)]
return { 'offset' : offset,
'freq' : freq,
'amplitude' : amplitude,
'ring_width' : ring_width,
'intensity' : intensity
}
def transform(self, image, offset, freq, amplitude, ring_width, intensity):
def calc_intensity(x, y, x0, y0, freq, amplitude, ring_width):
angle = np.arctan2(x-x0, y-y0) * freq
distance = ((np.sqrt((x-x0)**2 + (y-y0)**2) + np.sin(angle) * amplitude) % ring_width) / ring_width
distance -= 1/2
return distance
noise = np.array([[calc_intensity(x, y, offset[0], offset[1], freq, amplitude, ring_width)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((intensity[0] * noise, intensity[1] * noise, intensity[2] * noise), axis=2)
return np.clip(image + noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['offset'].tolist() + [params['freq'], params['amplitude'], params['ring_width']] + params['intensity'])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0:2].tolist(),
'freq' : numpy_record[2],
'amplitude' : numpy_record[3],
'ring_width' : numpy_record[4],
'intensity' : numpy_record[4:7].tolist()
}
class PlasmaNoise(Augmentation):
tags = ['additive_noise', 'color', 'filterpedia', 'plasma_noise', 'slow']
name = 'plasma_noise'
def sample_parameters(self):
time = np.random.uniform(low=0.0, high=6*np.pi)
iterations = np.random.randint(low=4, high=7)
sharpness = np.random.uniform(low=0.5, high=1.0)
scale = np.random.uniform(low=0.075, high=0.2) * self.im_size
intensity = float_parameter(sample_level(self.severity,self.max_intensity),64)
return {'time' : time, 'iterations' : iterations, 'sharpness' : sharpness,
'scale' : scale, 'intensity' : intensity}
def transform(self, image, time, iterations, sharpness, scale, intensity):
def kernel(x, y, rand, iters, sharp, scale):
x /= scale
y /= scale
i = np.array([1.0, 1.0, 1.0, 0.0])
for s in range(iters):
r = np.array([np.cos(y * i[0] - i[3] + rand / i[1]), np.sin(x * i[0] - i[3] + rand / i[1])]) / i[2]
r += np.array([-r[1],r[0]]) * 0.3
x += r[0]
y += r[1]
i *= np.array([1.93, 1.15, (2.25 - sharp), rand * i[1]])
r = np.sin(x - rand)
b = np.sin(y + rand)
g = np.sin((x + y + np.sin(rand))*0.5)
return [r,g,b]
noise = np.array([[kernel(x,y, time, iterations, sharpness, scale)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip((1-intensity/255) * image + intensity * noise, 0, 255).astype(np.uint8)
class VoronoiNoise(Augmentation):
tags = ['additive_noise', 'filterpedia', 'voronoi_noise', 'slow']
name = 'voronoi_noise'
def sample_parameters(self):
seed = np.random.uniform(low=0, high=10)
density = np.random.uniform(low=0.5, high=0.9)
size = np.random.uniform(low=0.05, high=0.2) * self.im_size
intensity = float_parameter(sample_level(self.severity,self.max_intensity),255)
if np.random.uniform() > 0.5:
intensity = -intensity
return {'seed' : seed, 'density' : density, 'size' : size, 'intensity' : intensity}
def transform(self, image, size, seed, density, intensity):
def voronoi_hash(v, time):
m = np.array([[13.85, 47.77], [99.41, 88.48]])
w = np.matmul(m,v)
return (np.sin(w) * np.sqrt(w) * time * 0.0025) % 1
def kernel(x, y, size, seed, density):
v = np.array([[x],[y]]) / size + 1
g = v // 1
f = v % 1
dist = 1.0
for i in [-1,0,1]:
for j in [-1,0,1]:
p = np.array([[i],[j]])
curr_dist = np.linalg.norm((p + voronoi_hash(g+p, seed) - f).flatten())
dist = min(curr_dist, dist)
r = smoothstep(0, 1, dist * density) - 0.5
return r
noise = np.array([[kernel(x,y, size, seed, density)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((noise, noise, noise), axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class CausticNoise(Augmentation):
tags = ['additive_noise', 'filterpedia']
name = 'caustic_noise'
def sample_parameters(self):
time = np.random.uniform(low=0.5, high=2.0)
size = np.random.uniform(low=0.75, high=1.25) * self.im_size
#size = self.im_size
intensity = float_parameter(sample_level(self.severity,self.max_intensity), 255)
return { 'time' : time, 'size' : size, 'intensity' : intensity}
def transform(self, image, time, size, intensity):
def kernel(point, time, size):
point = point / size
p = (point % 1) * 6.28318530718 - 250
i = p.copy()
c = 1.0
inten = 0.005
for n in range(5):
t = time * (1.0 - (3.5 / (n+1)))
i = p + np.array([np.cos(t-i[0])+np.sin(t+i[1]),np.sin(t-i[1])+np.cos(t+i[0])])
length = np.sqrt((p[0] / (np.sin(i[0]+t)/inten))**2 + (p[1] / (np.cos(i[1]+t)/inten))**2)
c += 1.0/length
c /= 5.0
c = 1.17 - c ** 1.4
color = np.clip(np.abs(c) ** 8.0, 0, 1)
return np.array([color, color, color])
noise = np.array([[kernel(np.array([y,x]), time, size)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
#return np.clip(255 * noise, 0, 255).astype(np.uint8)
class Sparkles(Augmentation):
tags = ['additive_noise']
name = 'sparkles'
def sample_parameters(self):
centers = np.random.uniform(low=0, high=self.im_size, size=(5, 2))
radii = np.array([float_parameter(sample_level(self.severity, self.max_intensity), 0.1)\
for i in range(5)]) * self.im_size
#radii = np.array([0.1 for i in range(5)]) * self.im_size
#amounts = np.array([float_parameter(sample_level(self.severity, self.max_intensity), 50)\
# for i in range(5)])
amounts = np.array([50 for i in range(5)])
color = np.array([255, 255, 255])
randomness = 25
seed = np.random.randint(low=0, high=2**32)
nrays = np.random.randint(low=50, high=200, size=5)
return {'centers' : centers, 'radii' : radii, 'color' : color, 'randomness' : randomness,
'seed' : seed, 'nrays' : nrays, 'amounts' : amounts
}
def transform(self, image, centers, radii, nrays, amounts, color, randomness, seed):
def kernel(point, value, center, radius, ray_lengths, amount, color):
rays = len(ray_lengths)
dp = point - center
dist = np.linalg.norm(dp)
angle = np.arctan2(dp[1], dp[0])
d = (angle + np.pi) / (2 * np.pi) * rays
i = int(d)
f = d - i
if radius != 0:
length = ray_lengths[i % rays] + f * (ray_lengths[(i+1) % rays] - ray_lengths[i % rays])
g = length**2 / (dist**2 + 1e-4)
g = g ** ((100 - amount) / 50.0)
f -= 0.5
f = 1 - f**2
f *= g
f = np.clip(f, 0, 1)
return value + f * (color - value)
random_state = np.random.RandomState(seed=seed)
for center, rays, amount, radius in zip(centers, nrays, amounts, radii):
ray_lengths = [max(1,radius + randomness / 100.0 * radius * random_state.randn())\
for i in range(rays)]
image = np.array([[kernel(np.array([y,x]), image[y,x,:].astype(np.float32), center, radius, ray_lengths, amount, color)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class InverseSparkles(Augmentation):
tags = ['additive_noise']
name = 'inverse_sparkles'
def sample_parameters(self):
center = np.random.uniform(low=0.25, high=0.75, size=2) * self.im_size
#radius = self.im_size // 4
#radius = float_parameter(sample_level(self.severity, self.max_intensity), 0.5)
#radius = (0.75 - radius) * self.im_size
radius = 0.25 * self.im_size
#amount = 25
amount = 100
amount = float_parameter(sample_level(self.severity, self.max_intensity), 65)
amount = 100 - amount
color = np.array([255, 255, 255])
randomness = 25
seed = np.random.randint(low=0, high=2**32)
rays = np.random.randint(low=50, high=200)
return {'center' : center, 'radius' : radius, 'color' : color, 'randomness' : randomness,
'seed' : seed, 'rays' : rays, 'amount' : amount
}
def transform(self, image, center, radius, rays, amount, color, randomness, seed):
def kernel(point, value, center, radius, ray_lengths, amount, color):
rays = len(ray_lengths)
dp = point - center
dist = np.linalg.norm(dp)
angle = np.arctan2(dp[1], dp[0])
d = (angle + np.pi) / (2 * np.pi) * rays
i = int(d)
f = d - i
if radius != 0:
length = ray_lengths[i % rays] + f * (ray_lengths[(i+1) % rays] - ray_lengths[i % rays])
g = length**2 / (dist**2 + 1e-4)
g = g ** ((100 - amount) / 50.0)
f -= 0.5
f = 1 - f**2
f *= g
f = np.clip(f, 0, 1)
return color + f * (value - color)
random_state = np.random.RandomState(seed=seed)
ray_lengths = [radius + randomness / 100.0 * radius * random_state.randn()\
for i in range(rays)]
out = np.array([[kernel(np.array([y,x]), image[y,x,:].astype(np.float32), center, radius, ray_lengths, amount, color)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
class PerlinNoise(Augmentation):
tags = ['additive_noise']
name = 'perlin_noise'
def sample_parameters(self):
m = np.array([[1,0],[0,1]]) / (32 * self.im_size / 224)
turbulence = 16.0
gain = 0.5
bias = 0.5
alpha = float_parameter(sample_level(self.severity, self.im_size), 0.50)
seed = np.random.randint(low=0, high=2**32)
return {'m': m, 'turbulence' : turbulence, 'seed': seed,
'gain': gain, 'bias': bias, 'alpha': alpha}
def transform(self, image, m, turbulence, seed, gain, bias, alpha):
random_state = np.random.RandomState(seed=seed)
noise = PerlinNoiseGenerator(random_state)
def kernel(point, m, turbulence, gain, bias):
npoint = np.matmul(point, m)
f = noise.turbulence(npoint[0], npoint[1], turbulence)\
if turbulence != 1.0 else noise.noise(npoint[0], npoint[1])
f = gain * f + bias
return np.clip(np.array([f,f,f]),0,1.0)
noise = np.array([[kernel(np.array([y,x]),m,turbulence,gain, bias) for x in range(self.im_size)]\
for y in range(self.im_size)])
out = (1 - alpha) * image.astype(np.float32) + 255 * alpha * noise
return np.clip(out, 0, 255).astype(np.uint8)
class BlueNoise(Augmentation):
tags = ['additive_noise']
name = 'blue_noise'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
intensity = float_parameter(sample_level(self.severity, self.max_intensity), 196)
return {'seed' : seed, 'intensity' : intensity}
def gen_noise(self, random_state):
center = self.im_size / 2
power = np.array([[np.linalg.norm(np.array([x,y])-center)\
for x in range(self.im_size)] for y in range(self.im_size)])
#power = power / self.im_size
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
return noise
def transform(self, image, seed, intensity):
random_state = np.random.RandomState(seed=seed)
noise = np.stack([self.gen_noise(random_state) for i in range(3)],axis=2)
#luma_noise = noise.reshape(self.im_size, self.im_size, 1) * np.array([[[0.2126, 0.7152, 0.0722]]])
#return np.clip(image + intensity * luma_noise, 0, 255).astype(np.uint8)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class BrownishNoise(Augmentation):
tags = ['additive_noise']
name = 'brownish_noise'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
intensity = float_parameter(sample_level(self.severity, self.max_intensity), 64)
return {'seed' : seed, 'intensity' : intensity}
def gen_noise(self, random_state):
center = self.im_size / 2
power = np.array([[1/(np.linalg.norm(np.array([x,y])-center)**2+1)\
for x in range(self.im_size)] for y in range(self.im_size)])
#power = power / self.im_size
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
return noise
def transform(self, image, seed, intensity):
random_state = np.random.RandomState(seed=seed)
noise = np.stack([self.gen_noise(random_state) for i in range(3)],axis=2)
#luma_noise = noise.reshape(self.im_size, self.im_size, 1) * np.array([[[0.2126, 0.7152, 0.0722]]])
#return np.clip(image + intensity * luma_noise, 0, 255).astype(np.uint8)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/additive_noise.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
import numpy as np
class Cifar10CropAndFlip(Augmentation):
def sample_parameters(self):
crop_pos = np.random.randint(low=-4, high=5, size=2)
flip = (np.random.uniform() < 0.5)
return {'crop_pos': crop_pos, 'flip': flip}
def transform(self, image, crop_pos, flip):
image = np.pad(image, ((4,4),(4,4),(0,0)))
pos = crop_pos+4
image = image[pos[0]:pos[0]+self.im_size,pos[1]:pos[1]+self.im_size,:]
if flip:
image = np.ascontiguousarray(image[:,::-1,:])
return image.astype(np.uint8)
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/standard_augmentations.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from .utils.severity import float_parameter, int_parameter, sample_level
from PIL import Image, ImageOps, ImageEnhance
import numpy as np
class AutoContrast(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'color', 'autocontrast']
name = 'autocontrast'
def sample_parameters(self):
return {}
def transform(self, image):
im = ImageOps.autocontrast(Image.fromarray(image))
return np.array(im)
class Equalize(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'color', 'equalize']
name = 'equalize'
def sample_parameters(self):
return {}
def transform(self, image):
im = ImageOps.equalize(Image.fromarray(image))
return np.array(im)
class Posterize(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'color', 'posterize']
name = 'posterize'
def sample_parameters(self):
bits = 4 - int_parameter(sample_level(self.severity,self.max_intensity), 4)
return {'bits' : bits}
def transform(self, image, bits):
im = ImageOps.posterize(Image.fromarray(image), int(bits))
return np.array(im)
class Solarize(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'color', 'solarize']
name = 'solarize'
def sample_parameters(self):
threshold = 256 - int_parameter(sample_level(self.severity,self.max_intensity), 256)
return {'threshold' : threshold}
def transform(self, image, threshold):
im = ImageOps.solarize(Image.fromarray(image), threshold)
return np.array(im)
class Affine(Augmentation):
tags = ['pil', 'spatial', 'affine']
name = 'affine'
def sample_parameters(self):
offset_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.3)
if np.random.uniform() > 0.5:
offset_x = -offset_x
offset_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.3)
if np.random.uniform() > 0.5:
offset_y = -offset_y
shift_x = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
shift_x = -shift_x
shift_y = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
shift_y = -shift_y
factor_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor_x = -factor_x
factor_x = 2 ** factor_x
factor_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor_y = -factor_y
factor_y = 2 ** factor_y
affine_matrix = np.array([[factor_x, offset_x, shift_x],[offset_y, factor_y, shift_y]])
return {'affine_matrix' : affine_matrix}
def transform(self, image, affine_matrix):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.AFFINE,
affine_matrix.flatten(),
resample=Image.BILINEAR
)
return np.array(im)
def convert_to_numpy(self, params):
return params['affine_matrix'].flatten()
def convert_from_numpy(self, numpy_record):
return {'affine_matrix' : numpy_record.reshape(2,3)}
class ShearX(Affine):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'shear_x']
name = 'shear_x'
def sample_parameters(self):
offset = float_parameter(sample_level(self.severity,self.max_intensity), 0.3)
if np.random.uniform() > 0.5:
offset = -offset
return {'offset' : offset}
def transform(self, image, offset):
affine_matrix = np.array([[1, offset, 0],[0, 1, 0]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['offset']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0]}
class ShearY(Affine):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'shear_y']
name = 'shear_y'
def sample_parameters(self):
offset = float_parameter(sample_level(self.severity,self.max_intensity), 0.3)
if np.random.uniform() > 0.5:
offset = -offset
return {'offset' : offset}
def transform(self, image, offset):
affine_matrix = np.array([[1, 0, 0],[offset, 1, 0]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['offset']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0]}
class TranslateX(Affine):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'translate_x']
name = 'translate_x'
def sample_parameters(self):
offset = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
offset = -offset
return {'offset' : offset}
def transform(self, image, offset):
affine_matrix = np.array([[1, 0, offset],[0, 1, 0]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['offset']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0]}
class TranslateY(Affine):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'translate_y']
name = 'translate_y'
def sample_parameters(self):
offset = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
offset = -offset
return {'offset' : offset}
def transform(self, image, offset):
affine_matrix = np.array([[1, 0, 0],[0, 1, offset]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['offset']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0]}
class Rotate(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'rotate']
name = 'rotate'
def sample_parameters(self):
degrees = float_parameter(sample_level(self.severity,self.max_intensity), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return {'degrees' : degrees}
def transform(self, image, degrees):
im = Image.fromarray(image)
im = im.rotate(degrees, resample=Image.BILINEAR)
return np.array(im)
class Invert(Augmentation):
tags = ['autoaugment', 'pil', 'color', 'invert']
name = 'invert'
def sample_parameters(self):
return {}
def transform(self, image):
im = ImageOps.invert(Image.fromarray(image))
return np.array(im)
class ColorBalance(Augmentation):
tags = ['autoaugment', 'pil', 'color', 'color_balance']
name = 'color_balance'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Color(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class Sharpness(Augmentation):
tags = ['autoaugment', 'pil', 'blur', 'sharpness']
name = 'sharpness'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Sharpness(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class Contrast(Augmentation):
tags = ['autoaugment', 'pil', 'color', 'imagenet_c_overlap', 'contrast']
name = 'contrast_pil'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Contrast(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class Brightness(Augmentation):
tags = ['autoaugment', 'pil', 'color', 'imagenet_c_overlap', 'brightness']
name = 'brightness_pil'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Brightness(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class ScaleX(Affine):
tags = ['pil', 'spatial', 'scale_x']
name = 'scale_x'
def sample_parameters(self):
factor = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor = -factor
factor = 2 ** factor
return {'factor' : factor}
def transform(self, image, factor):
affine_matrix = np.array([[factor, 0, (1-factor)*self.im_size/2],[0, 1, 0]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['factor']])
def convert_from_numpy(self, numpy_record):
return {'factor' : numpy_record[0]}
class ScaleY(Affine):
tags = ['pil', 'spatial', 'scale_y']
name = 'scale_y'
def sample_parameters(self):
factor = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor = -factor
factor = 2 ** factor
return {'factor' : factor}
def transform(self, image, factor):
affine_matrix = np.array([[1, 0, 0],[0, factor, (1-factor)*self.im_size/2]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['factor']])
def convert_from_numpy(self, numpy_record):
return {'factor' : numpy_record[0]}
class ScaleFixedAspectRatio(Affine):
tags = ['pil', 'spatial', 'scale_fixed_aspect_ratio']
name = 'scale_fixed_aspect_ratio'
def sample_parameters(self):
factor = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor = -factor
factor = 2 ** factor
return {'factor' : factor}
def transform(self, image, factor):
affine_matrix = np.array([[factor, 0, (1-factor)*self.im_size/2],[0, factor, (1-factor)*self.im_size/2]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['factor']])
def convert_from_numpy(self, numpy_record):
return {'factor' : numpy_record[0]}
class Quadrilateral(Augmentation):
tags = ['pil', 'spatial', 'quadrilateral']
name = 'quadrilateral'
def sample_parameters(self):
points = np.array([
[0,0],
[0, self.im_size],
[self.im_size, self.im_size],
[self.im_size, 0]
]).astype(np.float32)
shift = float_parameter(self.severity, self.im_size / 3) * np.random.uniform(low=-1,high=1, size=(4,2))
points += shift
return {'points' : points}
def transform(self, image, points):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
return np.array(im)
def convert_to_numpy(self, params):
return params['points'].flatten()
def convert_from_numpy(self, numpy_record):
return {'points' : numpy_record.reshape(4,2)}
class QuadrilateralNoBars(Augmentation):
tags = ['pil', 'spatial', 'quadrilateral_no_bars']
name = 'quadrilateral_no_bars'
def sample_parameters(self):
points = np.array([
[0,0],
[0, self.im_size],
[self.im_size, self.im_size],
[self.im_size, 0]
]).astype(np.float32)
shift = float_parameter(self.severity, self.im_size / 3) * np.random.uniform(low=-1,high=1, size=(4,2))
points += shift
return {'points' : points}
def transform(self, image, points):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
im = np.array(im).astype(np.float32)
mask = Image.fromarray(np.ones_like(image).astype(np.uint8)*255)
mask = mask.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
mask = np.array(mask).astype(np.float32) / 255
im = mask * im + (1-mask) * image
return im.astype(np.uint8)
def convert_to_numpy(self, params):
return params['points'].flatten()
def convert_from_numpy(self, numpy_record):
return {'points' : numpy_record.reshape(4,2)}
class KeystoneH(Quadrilateral):
tags = ['pil', 'spatial', 'keystone_h']
name = 'keystone_h'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
shift = - shift
return {'shift' : shift}
def transform(self, image, shift):
points = np.array([
[0,shift],
[0, self.im_size-shift],
[self.im_size, self.im_size+shift],
[self.im_size, -shift],
])
return super().transform(image, points)
def convert_to_numpy(self, params):
return np.array([params['shift']])
def convert_from_numpy(self, numpy_record):
return {'shift' : numpy_record[0]}
class KeystoneV(Quadrilateral):
tags = ['pil', 'spatial', 'keystone_v']
name = 'keystone_v'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
shift = - shift
return {'shift' : shift}
def transform(self, image, shift):
points = np.array([
[shift,0],
[-shift, self.im_size],
[self.im_size+shift, self.im_size],
[self.im_size-shift, 0]
])
return super().transform(image, points)
def convert_to_numpy(self, params):
return np.array([params['shift']])
def convert_from_numpy(self, numpy_record):
return {'shift' : numpy_record[0]}
class Perspective(Augmentation):
tags = ['pil', 'spatial', 'perspective']
name = 'perspective'
def sample_parameters(self):
offset_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.1)
if np.random.uniform() > 0.5:
offset_x = -offset_x
offset_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.1)
if np.random.uniform() > 0.5:
offset_y = -offset_y
shift_x = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 10)
#shift_x = 0.0
if np.random.uniform() > 0.5:
shift_x = -shift_x
shift_y = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 10)
#shift_y = 0.0
if np.random.uniform() > 0.5:
shift_y = -shift_y
factor_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.15)
if np.random.uniform() > 0.5:
factor_x = -factor_x
factor_x = 2 ** factor_x
factor_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.15)
if np.random.uniform() > 0.5:
factor_y = -factor_y
factor_y = 2 ** factor_y
denom_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_x = denom_x
denom_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_y = denom_y
perspective_params = np.array([factor_x, offset_x, shift_x,offset_y, factor_y, shift_y, denom_x, denom_y])
return {'perspective_params' : perspective_params}
def transform(self, image, perspective_params):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
return np.array(im)
def convert_to_numpy(self, params):
return params['perspective_params']
def convert_from_numpy(self, numpy_record):
return {'perspective_params' : numpy_record}
class PerspectiveNoBars(Augmentation):
tags = ['pil', 'spatial', 'perspective_no_bars']
name = 'perspective_no_bars'
def sample_parameters(self):
offset_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.1)
if np.random.uniform() > 0.5:
offset_x = -offset_x
offset_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.1)
if np.random.uniform() > 0.5:
offset_y = -offset_y
shift_x = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 10)
#shift_x = 0.0
if np.random.uniform() > 0.5:
shift_x = -shift_x
shift_y = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 10)
#shift_y = 0.0
if np.random.uniform() > 0.5:
shift_y = -shift_y
factor_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.15)
if np.random.uniform() > 0.5:
factor_x = -factor_x
factor_x = 2 ** factor_x
factor_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.15)
if np.random.uniform() > 0.5:
factor_y = -factor_y
factor_y = 2 ** factor_y
denom_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_x = denom_x
denom_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_y = denom_y
perspective_params = np.array([factor_x, offset_x, shift_x,offset_y, factor_y, shift_y, denom_x, denom_y])
return {'perspective_params' : perspective_params}
def transform(self, image, perspective_params):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
im = np.array(im).astype(np.float32)
mask = Image.fromarray(np.ones_like(image).astype(np.uint8)*255)
mask = mask.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
mask = np.array(mask).astype(np.float32) / 255
im = mask * im + (1-mask) * image
return im.astype(np.uint8)
def convert_to_numpy(self, params):
return params['perspective_params']
def convert_from_numpy(self, numpy_record):
return {'perspective_params' : numpy_record}
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/pil.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from math import floor, ceil
import numpy as np
from .base import Augmentation
from .utils.severity import int_parameter, sample_level, float_parameter
from skimage.draw import line_aa
from scipy.fftpack import ifft2
class CutOut(Augmentation):
tags = ['autoaugment', 'cutout']
name = 'cutout'
def sample_parameters(self):
center = np.random.randint(low=0, high=self.im_size, size=2)
size = int_parameter(sample_level(self.severity, self.max_intensity), 15)+1
return {'center' : center, 'size' : size}
def transform(self, image, center, size):
out = image.copy()
lu = np.clip(center-floor(size/2), 0, self.im_size)
rd = np.clip(center+ceil(size/2), 0, self.im_size)
out[lu[0]:rd[0],lu[1]:rd[1],:] = [128,128,128]
return out
def convert_to_numpy(self, params):
return np.array(params['center'].tolist() + [params['size']])
def convert_from_numpy(self, numpy_record):
return {'center' : numpy_record[0:2].astype(np.int), 'size' : numpy_record[2]}
'''
class CheckerBoardCutOut(Augmentation):
tags = ['checkerboard_cutout']
name = 'checkerboard_cutout'
def sample_parameters(self):
if self.max_intensity:
size = max(1, int(self.severity))
else:
size = np.random.randint(low=1, high=max(1,int(self.severity))+1)
offset = np.random.randint(low=0, high=size+1, size=2)
return { 'offset' : offset, 'size' : size}
def transform(self, image, offset, size):
out = image.copy()
num = self.im_size // size + 2
for i in range(num):
for j in range(num):
if (i+j) % 2 == 0:
continue
l = np.clip((i-1)*size+offset[0],0,self.im_size)
r = np.clip((i)*size+offset[0],0,self.im_size)
u = np.clip((j-1)*size+offset[1],0,self.im_size)
d = np.clip((j)*size+offset[1],0,self.im_size)
out[l:r,u:d,:] = [128,128,128]
return out
def convert_to_numpy(self, params):
return np.array(params['offset'].tolist() + [params['size']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0:2].astype(np.int), 'size' : numpy_record[2].astype(np.int)}
'''
'''
class CheckerBoardCutOut(Augmentation):
tags = ['obscure']
name = 'checkerboard_cutout'
def sample_parameters(self):
angle = np.random.uniform(low=0, high=2*np.pi)
#scales = np.array([0.5, 0.5]) * self.im_size
scales = np.array([float_parameter(sample_level(self.severity, self.max_intensity), 1.0)\
for i in range(2)])
scales = np.maximum((1.1 - scales) * 0.25 * self.im_size, 1)
return {'angle' : angle, 'scales' : scales}
def transform(self, image, scales, angle):
def mask_kernel(point, scales, angle):
nx = (np.cos(angle) * point[0] + np.sin(angle) * point[1]) / scales[0]
ny = (-np.sin(angle) * point[0] + np.cos(angle) * point[1]) / scales[1]
return int(nx % 2) != int(ny % 2)
out = np.array([[image[y,x,:] if mask_kernel([y,x], scales, angle) else np.array([128,128,128])\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
'''
class CheckerBoardCutOut(Augmentation):
tags = ['obscure']
name = 'checkerboard_cutout'
def sample_parameters(self):
angle = np.random.uniform(low=0, high=2*np.pi)
scales = np.maximum(np.random.uniform(low=0.1, high=0.25) * self.im_size, 1)
scales = (scales, scales)
fraction = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
seed = np.random.randint(low=0, high=2**32)
return {'angle' : angle, 'scales' : scales, 'fraction' : fraction, 'seed' : seed}
def transform(self, image, scales, angle, fraction, seed):
random_state = np.random.RandomState(seed=seed)
grid = random_state.uniform(size=(int(4*self.im_size//scales[0]), int(4*self.im_size//scales[1]))) < fraction
def mask_kernel(point, scales, angle, grid):
nx = (np.cos(angle) * point[0] + np.sin(angle) * point[1]) / scales[0]
ny = (-np.sin(angle) * point[0] + np.cos(angle) * point[1]) / scales[1]
return (int(nx % 2) != int(ny % 2)) or not grid[int(nx),int(ny)]
out = np.array([[image[y,x,:] if mask_kernel([y,x], scales, angle, grid) else np.array([128,128,128])\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
class Lines(Augmentation):
tags = ['obscure']
name = 'lines'
def sample_parameters(self):
length = 1.0
density = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
angle = np.random.uniform(low=0.0, high=2*np.pi)
angle_variation = np.random.uniform(low=0.1, high=1.0)
seed = np.random.randint(low=0, high=2**32)
return {'length' : length, 'density' : density, 'angle' : angle, 'angle_variation' : angle_variation, 'seed' : seed}
def transform(self, image, length, density, angle, angle_variation, seed):
num_lines = int(density * self.im_size)
l = length * self.im_size
random_state = np.random.RandomState(seed=seed)
out = image.copy()
for i in range(num_lines):
x = self.im_size * random_state.uniform()
y = self.im_size * random_state.uniform()
a = angle + 2 * np.pi * angle_variation * (random_state.uniform() - 0.5)
s = np.sin(a) * l
c = np.cos(a) * l
#x1 = max(min(int(x-c), self.im_size-1), 0)
#x2 = max(min(int(x+c), self.im_size-1), 0)
#y1 = max(min(int(y-s), self.im_size-1), 0)
#y2 = max(min(int(y+s), self.im_size-1), 0)
x1 = int(x-c)
x2 = int(x+c)
y1 = int(y-s)
y2 = int(y+s)
rxc, ryc, rval = line_aa(x1, y1, x2, y2)
xc, yc, val = [], [], []
for rx, ry, rv in zip(rxc, ryc, rval):
if rx >= 0 and ry >= 0 and rx < self.im_size and ry < self.im_size:
xc.append(rx)
yc.append(ry)
val.append(rv)
xc, yc, val = np.array(xc, dtype=np.int), np.array(yc, dtype=np.int), np.array(val)
out[xc, yc, :] = (1.0 - val.reshape(-1,1)) * out[xc, yc, :].astype(np.float32) + val.reshape(-1,1)*128
return out.astype(np.uint8)
class RandomSample(Augmentation):
tags = ['obscure']
name = 'random_sample'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
density = 1.0 - float_parameter(sample_level(self.severity, self.max_intensity), 0.8)
return {'density' : density, 'seed' : seed}
def transform(self, image, density, seed):
random_state = np.random.RandomState(seed=seed)
num = int(density * self.im_size ** 2)
out = np.zeros_like(image)
#for i in range(num):
# point = np.random.randint(low=0, high=self.im_size, size=2)
# out[point[0], point[1], :] = image[point[0], point[1], :]
indices = random_state.choice(np.arange(self.im_size**2), size=num, replace=False)
for idx in indices:
out[idx//self.im_size, idx % self.im_size, :] = image[idx//self.im_size, idx % self.im_size, :]
return out
class BlueNoiseSample(Augmentation):
tags = ['obscure']
name = 'blue_noise_sample'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
threshold = float_parameter(sample_level(self.severity, self.max_intensity), 3.0) - 2.5
return {'seed' : seed, 'threshold' : threshold}
def transform(self, image, seed, threshold):
random_state = np.random.RandomState(seed=seed)
center = self.im_size / 2
power = np.array([[np.linalg.norm(np.array([x,y])-center)\
for x in range(self.im_size)] for y in range(self.im_size)])
#power = power / self.im_size
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
mask = noise > threshold
out = image * mask.reshape(self.im_size, self.im_size, 1)
return np.clip(out, 0, 255).astype(np.uint8)
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/obscure.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
class Identity(Augmentation):
tags = ["identity"]
name = ['identity']
def __init__(self, severity=None, record=False, **kwargs):
super(Identity, self).__init__(severity, record, **kwargs)
def sample_parameters(self):
return {}
def transform(self, image):
return image
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/identity.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from collections import namedtuple
import numpy as np
class Augmix(Augmentation):
tags = ['compositor', 'augmix_compose']
def __init__(self, severity=None, im_size=None, augmentation_list=[], width=3, max_depth=3, prob_coeff=1.0, random_depth=True, record=False, float_output=True):
super(Augmix, self).__init__(severity, im_size, record)
self.width = width
self.depth = max_depth
self.random_depth = random_depth
self.prob_coeff = prob_coeff
self.augs = augmentation_list
self.float_output = float_output
self.record_length = max([len(a.convert_to_numpy(a.sample_parameters())) for a in self.augs])\
if self.augs else 0
def transform(self, image, m, ws, aug_record):
if not self.augs:
return image
mix = np.zeros_like(image).astype(np.float32)
for i in range(self.width):
image_aug = image.copy()
for j in range(self.depth):
pos = self.depth * i + j
if aug_record[pos].idx < 0:
continue
op = self.augs[aug_record[pos].idx].transform
image_aug = op(image_aug, **(aug_record[pos].params))
mix += ws[i] * image_aug.astype(np.float32)
mixed = (1 - m) * image.astype(np.float32) + m * mix
if self.float_output:
return mixed
return mixed.astype(np.uint8)
def sample_parameters(self):
ws = np.float32(np.random.dirichlet([self.prob_coeff] * self.width))
m = np.float32(np.random.beta(self.prob_coeff, self.prob_coeff))
if not self.augs:
return { 'm' : m, 'ws' : ws, 'aug_record': []}
aug_idxs = np.random.randint(low=0, high=len(self.augs), size=self.width*self.depth)
if self.random_depth:
for i in range(self.width):
inverse_depth = np.random.randint(1,self.depth+1)
aug_idxs[self.depth*i+inverse_depth:self.depth*(i+1)] = -1
aug_params = [self.augs[i].sample_parameters() if i != -1 else {} for i in aug_idxs]
AugRecord = namedtuple('AugRecord', ('idx', 'params'))
return { 'm' : m, 'ws' : ws, 'aug_record' : [AugRecord(idx, params) for idx, params in zip(aug_idxs, aug_params)]}
def convert_from_numpy(self, record):
out = {}
out['m'] = record[0]
out['ws'] = record[1:self.width+1]
if not self.augs:
out['aug_record'] = {}
return out
idxs = record[self.width+1:self.width+1+self.width*self.depth]
params = []
for i,idx in enumerate(idxs):
offset = self.width+1+self.width*self.depth + i * self.record_length
if idx < 0:
params.append({})
continue
sub_params = self.augs[int(idx)].convert_from_numpy(record[offset:offset+self.record_length])
params.append(sub_params)
AugRecord = namedtuple('AugRecord', ('idx', 'params'))
out['aug_record'] = [AugRecord(int(idx), params) for idx, params in zip(idxs, params)]
return out
def convert_to_numpy(self, record):
out = np.zeros(1+self.width+(self.width*self.depth*(self.record_length+1)))
if not self.augs:
return out
out[0] = record['m']
out[1:self.width+1] = record['ws']
sub_record = record['aug_record']
out[self.width+1:self.width+1+self.width*self.depth] = [i.idx for i in sub_record]
param_list = []
for a in record['aug_record']:
if a.idx >= 0:
curr_params = self.augs[a.idx].convert_to_numpy(a.params)
if len(curr_params) < self.record_length:
curr_params = np.concatenate((curr_params, np.zeros(self.record_length-len(curr_params))))
else:
curr_params = np.zeros(self.record_length)
param_list.append(curr_params)
params = np.concatenate(param_list)
out[self.width+1+self.width*self.depth:] = params
return out
class AutoAugmentOld(Augmentation):
tags = ['compositor', 'autoaugment_compose']
def __init__(self, subpolicies, severity=None, im_size=None, record=False):
super(AutoAugmentOld, self).__init__(severity, im_size, record)
self.subpolicies = subpolicies
self.record_length = 1+2*max([len(policy) for policy in self.subpolicies])
def sample_parameters(self):
policy_idx = np.random.randint(low=0, high=len(self.subpolicies))
selected = np.random.uniform(low=0.0, high=1.0, size=len(self.subpolicies[policy_idx]))
thresholds = np.array([transform_tuple[1] for transform_tuple in self.subpolicies[policy_idx]])
selected = (selected < thresholds).tolist()
flipped = [(np.random.choice([1,-1]) if (selected[i]==True and p[2] is not None and p[2]<0) else 1) for i,p in enumerate(self.subpolicies[policy_idx])]
return { 'policy_idx' : policy_idx, 'selections' : selected, 'flipped' : flipped }
def transform(self, image, policy_idx, selections, flipped):
policy = self.subpolicies[policy_idx]
for i, transform_tuple in enumerate(policy):
if selections[i]:
transform = transform_tuple[0]
magnitude = transform_tuple[2]
if magnitude is not None:
image = transform.transform(image, magnitude * flipped[i])
else:
image = transform.transform(image)
return image
def convert_to_numpy(self, params):
out = np.zeros(self.record_length)
out[0] = params['policy_idx']
curr_len = len(self.subpolicies[params['policy_idx']])
out[1:curr_len+1] = params['selections']
out[1+curr_len:1+2*curr_len] = params['flipped']
return out
def convert_from_numpy(self, numpy_record):
params = {}
params['policy_idx'] = int(numpy_record[0])
curr_len = len(self.subpolicies[params['policy_idx']])
params['selections'] = [True if int(x)==1 else False for x in numpy_record[1:1+curr_len]]
params['flipped'] = [int(x) for x in numpy_record[1+curr_len:1+2*curr_len]]
return params
class AutoAugment(Augmentation):
tags = ['compositor', 'autoaugment_compose']
def __init__(self, subpolicies, severity=None, im_size=None, record=False):
super(AutoAugment, self).__init__(severity, im_size, record)
self.subpolicies = subpolicies
self.record_length = 1+2*max([len(policy) for policy in self.subpolicies])
def sample_parameters(self):
policy_idx = np.random.randint(low=0, high=len(self.subpolicies))
selected = np.random.uniform(low=0.0, high=1.0, size=len(self.subpolicies[policy_idx]))
thresholds = np.array([transform_tuple[1] for transform_tuple in self.subpolicies[policy_idx]])
selected = (selected < thresholds).tolist()
flipped = [(np.random.choice([1,-1]) if (selected[i]==True and p[3] is not None) else 1) for i,p in enumerate(self.subpolicies[policy_idx])]
return { 'policy_idx' : policy_idx, 'selections' : selected, 'flipped' : flipped }
def transform(self, image, policy_idx, selections, flipped):
policy = self.subpolicies[policy_idx]
for i, transform_tuple in enumerate(policy):
if selections[i]:
transform = transform_tuple[0]
magnitude = transform_tuple[2]
if magnitude is not None:
magnitude = (transform_tuple[3] if transform_tuple[3] is not None else 0) + magnitude * flipped[i]
image = transform.transform(image, magnitude)
else:
image = transform.transform(image)
return image
def convert_to_numpy(self, params):
out = np.zeros(self.record_length)
out[0] = params['policy_idx']
curr_len = len(self.subpolicies[params['policy_idx']])
out[1:curr_len+1] = params['selections']
out[1+curr_len:1+2*curr_len] = params['flipped']
return out
def convert_from_numpy(self, numpy_record):
params = {}
params['policy_idx'] = int(numpy_record[0])
curr_len = len(self.subpolicies[params['policy_idx']])
params['selections'] = [True if int(x)==1 else False for x in numpy_record[1:1+curr_len]]
params['flipped'] = [int(x) for x in numpy_record[1+curr_len:1+2*curr_len]]
return params
class RandomSample(Augmentation):
def __init__(self, augmentation_list, weights=None, severity=None, im_size=None, record=False):
super(RandomSample, self).__init__(severity=severity, im_size=im_size, record=record)
self.transforms = augmentation_list
self.weights = weights
assert weights is None or (len(weights)==len(augmentation_list)),\
"Must have equal number of weights as transforms."
assert weights is None or (np.sum(weights)==1.0),\
"Weights must sum to one."
self.record_length = max([len(a.convert_to_numpy(a.sample_parameters())) for a in self.transforms])\
if self.transforms else 0
def sample_parameters(self):
idx = np.random.choice(np.arange(len(self.transforms)), p=self.weights)
transform_params = self.transforms[idx].sample_parameters()
return {'idx': idx, 'transform_params': transform_params}
def transform(self, image, idx, transform_params):
return self.transforms[idx].transform(image, **transform_params)
def convert_from_numpy(self, record):
idx = int(record[0])
transform_params = self.transforms[idx].convert_from_numpy(record[1:])
return {'idx' : idx, 'transform_params': transform_params}
def convert_to_numpy(self, record):
numpy_record = np.zeros(1+self.record_length)
numpy_record[0] = record['idx']
numpy_params = self.transforms[record['idx']].convert_to_numpy(record['transform_params'])
numpy_record[1:1+len(numpy_params)] = numpy_params
return numpy_record
class ComposeSerially(Augmentation):
def __init__(self, augmentation_list, severity=None, im_size=None, record=False):
self.augmentation_list = augmentation_list
self.record_lengths = [len(a.convert_to_numpy(a.sample_parameters())) for a in augmentation_list]
def sample_parameters(self):
params = {'param_list' : [a.sample_parameters() for a in self.augmentation_list]}
return params
def transform(self, image, param_list):
for a, p in zip(self.augmentation_list, param_list):
image = a.transform(image, **p)
return image
def convert_to_numpy(self, params):
record = None
for a, p in zip(self.augmentation_list, params['param_list']):
record = np.concatenate((record, a.convert_to_numpy(p)), axis=0)\
if record is not None else a.convert_to_numpy(p)
return record
def convert_from_numpy(self, numpy_record):
offset = 0
params = {'params_list' : []}
for a, d in zip(self.augmentation_list, self.record_lengths):
params['params_list'].append(numpy_record[offset:offset+d])
offset += d
return params
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/compositions.py
|
# This source code is adapted from code licensed under the license at
# third_party/imagenetc_license from the root directory of the repository
# Originally available: github.com/hendrycks/robustness
# Modifications Copyright (c) Facebook, Inc. and its affiliates,
# licensed under the MIT license found in the LICENSE file in the root
# directory of this source tree.
from .base import Augmentation
import pickle
import torch
import torch.utils.data
# Per-channel mean and SD values in BGR order
_MEAN = [125.3, 123.0, 113.9]
_SD = [63.0, 62.1, 66.7]
import os
from PIL import Image
import os.path
import time
import torch
import torchvision.datasets as dset
import torchvision.transforms as trn
import torch.utils.data as data
import numpy as np
from PIL import Image
# /////////////// Distortion Helpers ///////////////
import skimage as sk
from skimage.filters import gaussian
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
import warnings
warnings.simplefilter("ignore", UserWarning)
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(seed, mapsize, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
random_state = np.random.RandomState(seed=seed)
def wibbledmean(array):
return array / 4 + wibble * random_state.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / zoom_factor))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
# /////////////// End Distortion Helpers ///////////////
# /////////////// Distortions ///////////////
def gaussian_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [0.04, 0.06, .08, .09, .10][int(severity) - 1]
else:
c = [.08, .12, 0.18, 0.26, 0.38][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(x + random_state.normal(size=x.shape, scale=c), 0, 1) * 255
def shot_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [500, 250, 100, 75, 50][int(severity) - 1]
else:
c = [60, 25, 12, 5, 3][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(random_state.poisson(x * c) / c, 0, 1) * 255
def impulse_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [.01, .02, .03, .05, .07][int(severity) - 1]
else:
c = [.03, .06, .09, 0.17, 0.27][int(severity) - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c, seed=seed)
return np.clip(x, 0, 1) * 255
def speckle_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [.06, .1, .12, .16, .2][int(severity) - 1]
else:
c = [.15, .2, 0.35, 0.45, 0.6][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(x + x * random_state.normal(size=x.shape, scale=c), 0, 1) * 255
def gaussian_blur(x, im_size, severity=1):
if im_size == 32:
c = [.4, .6, 0.7, .8, 1][int(severity) - 1]
else:
c = [1, 2, 3, 4, 6][int(severity) - 1]
x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)
return np.clip(x, 0, 1) * 255
def glass_blur(x, im_size, seed, severity=1):
# sigma, max_delta, iterations
if im_size == 32:
c = [(0.05,1,1), (0.25,1,1), (0.4,1,1), (0.25,1,2), (0.4,1,2)][int(severity) - 1]
else:
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for i in range(c[2]):
for h in range(im_size - c[1], c[1], -1):
for w in range(im_size - c[1], c[1], -1):
dx, dy = random_state.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255
def defocus_blur(x, im_size, severity=1):
if im_size == 32:
c = [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (1, 0.2), (1.5, 0.1)][int(severity) - 1]
else:
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][int(severity) - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x32x32 -> 32x32x3
return np.clip(channels, 0, 1) * 255
def motion_blur(x, im_size, angle, severity=1):
if im_size == 32:
c = [(6,1), (6,1.5), (6,2), (8,2), (9,2.5)][int(severity) - 1]
else:
c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][int(severity) - 1]
output = BytesIO()
x = Image.fromarray(x)
x.save(output, format='PNG')
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=angle)
x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED)
if x.shape != (im_size, im_size):
return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
def zoom_blur(x, im_size, severity=1):
if im_size == 32:
c = [np.arange(1, 1.06, 0.01), np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.01), np.arange(1, 1.26, 0.01)][int(severity) - 1]
else:
c = [np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)][int(severity) - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, im_size, seed, severity=1):
if im_size == 32:
c = [(.2,3), (.5,3), (0.75,2.5), (1,2), (1.5,1.75)][int(severity) - 1]
mapsize = 32
else:
c = [(1.5, 2), (2, 2), (2.5, 1.7), (2.5, 1.5), (3, 1.4)][int(severity) - 1]
mapsize = 256
x = np.array(x) / 255.
max_val = x.max()
x += c[0] * plasma_fractal(wibbledecay=c[1], seed=seed, mapsize=mapsize)[:im_size, :im_size][..., np.newaxis]
return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
def frost(x, im_size, frost_path, image_idx, crop_pos, severity=1):
if im_size == 32:
c = [(1, 0.2), (1, 0.3), (0.9, 0.4), (0.85, 0.4), (0.75, 0.45)][int(severity) - 1]
else:
c = [(1, 0.4),
(0.8, 0.6),
(0.7, 0.7),
(0.65, 0.7),
(0.6, 0.75)][int(severity) - 1]
idx = image_idx
filename = ['./frost1.png', './frost2.png', './frost3.png', './frost4.jpg', './frost5.jpg', './frost6.jpg'][idx]
filename = os.path.join(frost_path, filename)
frost = cv2.imread(filename)
if im_size == 32:
frost = cv2.resize(frost, (0, 0), fx=0.2, fy=0.2)
# randomly crop and convert to rgb
#x_start, y_start = np.random.randint(0, frost.shape[0] - 32), np.random.randint(0, frost.shape[1] - 32)
x_start, y_start = crop_pos[0], crop_pos[1]
frost = frost[x_start:x_start + im_size, y_start:y_start + im_size][..., [2, 1, 0]]
return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255)
def snow(x, im_size, seed, severity=1):
if im_size == 32:
c = [(0.1,0.2,1,0.6,8,3,0.95),
(0.1,0.2,1,0.5,10,4,0.9),
(0.15,0.3,1.75,0.55,10,4,0.9),
(0.25,0.3,2.25,0.6,12,6,0.85),
(0.3,0.3,1.25,0.65,14,12,0.8)][int(severity) - 1]
else:
c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),
(0.2, 0.3, 2, 0.5, 12, 4, 0.7),
(0.55, 0.3, 4, 0.9, 12, 8, 0.7),
(0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),
(0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x, dtype=np.float32) / 255.
snow_layer = random_state.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=random_state.uniform(-135, -45))
snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(im_size, im_size, 1) * 1.5 + 0.5)
return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def spatter(x, im_size, seed, severity=1):
if im_size == 32:
c = [(0.62,0.1,0.7,0.7,0.5,0),
(0.65,0.1,0.8,0.7,0.5,0),
(0.65,0.3,1,0.69,0.5,0),
(0.65,0.1,0.7,0.69,0.6,1),
(0.65,0.1,0.5,0.68,0.6,1)][int(severity) - 1]
else:
c = [(0.65, 0.3, 4, 0.69, 0.6, 0),
(0.65, 0.3, 3, 0.68, 0.6, 0),
(0.65, 0.3, 2, 0.68, 0.5, 0),
(0.65, 0.3, 1, 0.65, 1.5, 1),
(0.67, 0.4, 1, 0.65, 1.5, 1)][int(severity) - 1]
x = np.array(x, dtype=np.float32) / 255.
random_state = np.random.RandomState(seed=seed)
liquid_layer = random_state.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
# ker = np.array([[-1,-2,-3],[-2,0,0],[-3,0,1]], dtype=np.float32)
# ker -= np.mean(ker)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CV_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1])), axis=2)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255
else:
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# m = np.abs(m) ** (1/c[4])
# mud brown
color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),
42 / 255. * np.ones_like(x[..., :1]),
20 / 255. * np.ones_like(x[..., :1])), axis=2)
color *= m[..., np.newaxis]
x *= (1 - m[..., np.newaxis])
return np.clip(x + color, 0, 1) * 255
def contrast(x, im_size, severity=1):
if im_size == 32:
c = [.75, .5, .4, .3, 0.15][int(severity) - 1]
else:
c = [0.4, .3, .2, .1, .05][int(severity) - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
return np.clip((x - means) * c + means, 0, 1) * 255
def brightness(x, im_size, severity=1):
if im_size == 32:
c = [.05, .1, .15, .2, .3][int(severity) - 1]
else:
c = [.1, .2, .3, .4, .5][int(severity) - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def saturate(x, im_size, severity=1):
if im_size == 32:
c = [(0.3, 0), (0.1, 0), (1.5, 0), (2, 0.1), (2.5, 0.2)][int(severity) - 1]
else:
c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][int(severity) - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def jpeg_compression(x, im_size, severity=1):
if im_size == 32:
c = [80, 65, 58, 50, 40][int(severity) - 1]
else:
c = [25, 18, 15, 10, 7][int(severity) - 1]
x = Image.fromarray(x)
output = BytesIO()
x.save(output, 'JPEG', quality=c)
x = PILImage.open(output)
return x
def pixelate(x, im_size, severity=1):
if im_size == 32:
c = [0.95, 0.9, 0.85, 0.75, 0.65][int(severity) - 1]
else:
c = [0.6, 0.5, 0.4, 0.3, 0.25][int(severity) - 1]
x = Image.fromarray(x)
x = x.resize((int(im_size * c), int(im_size * c)), PILImage.BOX)
x = x.resize((im_size, im_size), PILImage.BOX)
return x
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, im_size, seed, severity=1):
IMSIZE = im_size
if im_size == 32:
c = [(IMSIZE*0, IMSIZE*0, IMSIZE*0.08),
(IMSIZE*0.05, IMSIZE*0.2, IMSIZE*0.07),
(IMSIZE*0.08, IMSIZE*0.06, IMSIZE*0.06),
(IMSIZE*0.1, IMSIZE*0.04, IMSIZE*0.05),
(IMSIZE*0.1, IMSIZE*0.03, IMSIZE*0.03)][int(severity) - 1]
else:
c = [(244 * 2, 244 * 0.7, 244 * 0.1), # 244 should have been 224, but ultimately nothing is incorrect
(244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02),
(244 * 0.07, 244 * 0.01, 244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
image = np.array(image, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = (gaussian(random_state.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dy = (gaussian(random_state.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
class GaussianNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'gaussian_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(gaussian_noise(image, self.im_size, seed, severity=self.severity))
class ShotNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'shot_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(shot_noise(image, self.im_size, seed, severity=self.severity))
class ImpulseNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'impulse_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(impulse_noise(image, self.im_size, seed, severity=self.severity))
class SpeckleNoise(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'speckle_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(speckle_noise(image, self.im_size, seed, severity=self.severity))
class ElasticTransform(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'elastic_transform'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(elastic_transform(image, self.im_size, seed, severity=self.severity))
class GlassBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'glass_blur'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(glass_blur(image, self.im_size, seed, severity=self.severity))
class Snow(Augmentation):
tags = ['imagenet_c', 'weather']
name = 'snow'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(snow(image, self.im_size, seed, severity=self.severity))
class Spatter(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'spatter'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(spatter(image, self.im_size, seed, severity=self.severity))
class Fog(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'fog'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(fog(image, self.im_size, seed, severity=self.severity))
class ZoomBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'zoom_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(zoom_blur(image, self.im_size, severity=self.severity))
class Pixelate(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'pixelate'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(pixelate(image, self.im_size, severity=self.severity))
class JPEGCompression(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'jpeg_compression'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(jpeg_compression(image, self.im_size, severity=self.severity))
class Contrast(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'contrast'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(contrast(image, self.im_size, severity=self.severity))
class Brightness(Augmentation):
tags = ['imagenet_c', 'weather']
name = 'brightness'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(brightness(image, self.im_size, severity=self.severity))
class MotionBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'motion_blur'
def sample_parameters(self):
angle = np.random.uniform(-45,45)
return {'angle' : angle}
def transform(self, image, angle):
return np.uint8(motion_blur(image, self.im_size, angle=angle, severity=self.severity))
class GaussianBlur(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'gaussian_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(gaussian_blur(image, self.im_size, severity=self.severity))
class Frost(Augmentation):
tags = ['imagenet_c', 'path_required', 'weather']
name = 'frost'
def __init__(self, severity, im_size, record=False, max_intensity=False, frost_path=None):
super().__init__(severity, im_size, record, max_intensity)
self.frost_path = frost_path
def sample_parameters(self):
image_idx = np.random.randint(5)
filename = ['./frost1.png', './frost2.png', './frost3.png', './frost4.jpg', './frost5.jpg', './frost6.jpg'][image_idx]
filename = os.path.join(self.frost_path, filename)
frost = cv2.imread(filename)
if self.im_size == 32:
frost = cv2.resize(frost, (0, 0), fx=0.2, fy=0.2)
x_start, y_start = np.random.randint(0, frost.shape[0] - self.im_size), np.random.randint(0, frost.shape[1] - self.im_size)
return {'image_idx' : image_idx, 'crop_pos' : (x_start, y_start)}
def transform(self, image, image_idx, crop_pos):
return np.uint8(frost(image, self.im_size, frost_path=self.frost_path, image_idx=image_idx, crop_pos=crop_pos, severity=self.severity))
def convert_to_numpy(self, params):
return np.array([params['image_idx']] + list( params['crop_pos']))
def convert_from_numpy(self, numpy_record):
return {'image_idx' : int(numpy_record[0]), 'crop_pos' : tuple(numpy_record[1:].astype(np.int).tolist())}
class DefocusBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'defocus_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(defocus_blur(image, self.im_size, severity=self.severity))
class Saturate(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'saturate'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(saturate(image, self.im_size, severity=self.severity))
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/imagenetc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import numpy as np
def is_iterable(obj):
try:
iter(obj)
except:
return False
else:
return True
class Augmentation(abc.ABC):
tags = ["abstract_base_class"]
def __init__(self, severity, im_size, record=False, max_intensity=False, **kwargs):
self.im_size = im_size
self.severity = severity
self.record = record
self.max_intensity = max_intensity
@abc.abstractmethod
def transform(self, image, **kwargs):
...
@abc.abstractmethod
def sample_parameters(self):
...
def __call__(self, image):
params = self.sample_parameters()
out = self.transform(image, **params)
if self.record:
return out, params
return out
def convert_to_numpy(self, params):
out = []
for k, v in params.items():
if isinstance(v, np.ndarray):
out.extend(v.flatten().tolist())
elif is_iterable(v):
out.extend([x for x in v])
else:
out.append(v)
return np.array(out)
def convert_from_numpy(self, numpy_record):
param_signature = self.sample_parameters()
#assert len(param_signature.keys())<=len(numpy_record), "Mismatched numpy_record."
offset = 0
for k, v in param_signature.items():
if isinstance(v, np.ndarray):
num = len(v.flatten())
data = numpy_record[offset:offset+num]
if v.dtype==np.int or v.dtype==np.uint:
data = np.round(data, 3)
data = data.astype(v.dtype)
param_signature[k] = data.reshape(v.shape)
offset += num
elif is_iterable(v):
data = []
for x in v:
if type(x) == 'int':
data.append(int(np.round(numpy_record[offset],3)))
else:
data.append(type(x)(numpy_record[offset]))
offset += 1
param_signature[k] = data
else:
if type(v) == 'int':
param_signature[k] = int(np.round(numpy_record[offset],3))
else:
param_signature[k] = type(v)(numpy_record[offset])
offset += 1
return param_signature
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def int_parameter(level, maxval):
return int(level * maxval / 10)
def float_parameter(level, maxval):
return float(level) * maxval / 10.
def sample_level(n, fixed=False):
if fixed:
return n
return np.random.uniform(low=0.1, high=n)
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/utils/severity.py
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/utils/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ... import augmentations as aug
master_aug_list = [
aug.pil.AutoContrast,
aug.pil.Equalize,
aug.pil.Posterize,
aug.pil.Solarize,
aug.pil.Affine,
aug.pil.ShearX,
aug.pil.ShearY,
aug.pil.TranslateX,
aug.pil.TranslateY,
aug.pil.Rotate,
aug.pil.ScaleX,
aug.pil.ScaleY,
aug.pil.ScaleFixedAspectRatio,
aug.pil.Invert,
aug.pil.ColorBalance,
aug.pil.Sharpness,
aug.pil.Contrast,
aug.pil.Brightness,
aug.pil.Quadrilateral,
aug.pil.KeystoneH,
aug.pil.KeystoneV,
aug.pil.Perspective,
aug.pil.QuadrilateralNoBars,
aug.pil.PerspectiveNoBars,
aug.additive_noise.SingleFrequencyGreyscale,
aug.additive_noise.SingleFrequencyColor,
aug.additive_noise.CocentricSineWaves,
aug.additive_noise.PlasmaNoise,
aug.additive_noise.VoronoiNoise,
aug.additive_noise.CausticNoise,
aug.additive_noise.PerlinNoise,
aug.additive_noise.BlueNoise,
aug.additive_noise.BrownishNoise,
aug.blurs.Scatter,
aug.blurs.ChromaticAbberation,
aug.blurs.TransverseChromaticAbberation,
aug.blurs.HomogeneousColorBlur,
aug.blurs.Erosion,
aug.blurs.Dilation,
aug.blurs.CircularMotionBlur,
aug.color.BleachBypass,
aug.color.Technicolor,
aug.color.Pseudocolor,
aug.color.HueShift,
aug.color.ColorDither,
aug.obscure.CutOut,
aug.obscure.CheckerBoardCutOut,
aug.additive_noise.Sparkles,
aug.additive_noise.InverseSparkles,
aug.obscure.Lines,
aug.obscure.RandomSample,
aug.obscure.BlueNoiseSample,
aug.distortion.PinchAndTwirl,
aug.distortion.PinchAndTwirlV2,
aug.distortion.CausticRefraction,
aug.distortion.FishEyeV2,
aug.distortion.WaterDrop,
aug.distortion.Ripple,
aug.imagenetc.GaussianNoise,
aug.imagenetc.ShotNoise,
aug.imagenetc.ImpulseNoise,
aug.imagenetc.SpeckleNoise,
aug.imagenetc.MotionBlur,
aug.imagenetc.DefocusBlur,
aug.imagenetc.ZoomBlur,
aug.imagenetc.GlassBlur,
aug.imagenetc.GaussianBlur,
aug.imagenetc.Brightness,
aug.imagenetc.Fog,
aug.imagenetc.Frost,
aug.imagenetc.Snow,
aug.imagenetc.Spatter,
aug.imagenetc.Contrast,
aug.imagenetc.Pixelate,
aug.imagenetc.JPEGCompression,
aug.imagenetc.ElasticTransform,
aug.imagenetc.Saturate,
]
aug_dict = {a.name : a for a in master_aug_list}
def get_aug_by_name(name):
return aug_dict[name]
def get_augs_by_tag(inclusions, exclusions=[]):
augs = []
for a in master_aug_list:
skip = False
for e in exclusions:
if e in a.tags:
skip = True
if skip:
continue
include = False
for i in inclusions:
if i in a.tags:
include = True
break
if include:
augs.append(a)
return augs
def parse_aug_string(aug_string, im_size, max_intensity=False, record=False, **aug_kwargs):
augs = []
for s in aug_string.split("--"):
if not s:
continue
name, sev = s.split("-")
a = aug_dict[name]
augs.append(a(float(sev),im_size, max_intensity=max_intensity, **aug_kwargs))
return augs
def build_aug_string(augs):
aug_string = ''
for aug in augs:
if aug_string != '':
aug_string += "--"
aug_string = aug_string + aug.name + "-" + str(aug.severity)
if aug_string == '':
aug_string = '--'
return aug_string
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/utils/aug_finder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
class PerlinNoiseGenerator(object):
def __init__(self, random_state=None):
self.rand = np.random if random_state is None else random_state
B = 256
N = 16*256
def normalize(arr):
return arr / np.linalg.norm(arr)
self.p = np.arange(2*B+2)
self.g = np.array([normalize((random_state.randint(low=0, high=2**31, size=2) % (2*B) - B )/ B)\
for i in range(2*B+2)])
for i in np.arange(B-1,-1,-1):
k = self.p[i]
j = self.rand.randint(low=0, high=2**31) % B
self.p[i] = self.p[j]
self.p[j] = k
for i in range(B+2):
self.p[B+i] = self.p[i]
self.g[B+i,:] = self.g[i,:]
self.B = B
self.N = N
def s_curve(t):
return t**2 * (3.0 - 2.0 * t)
def noise(self, x, y):
t = x + self.N
bx0 = int(t) % self.B
bx1 = (bx0+1) % self.B
rx0 = t % 1
rx1 = rx0 - 1.0
t = y + self.N
by0 = int(t) % self.B
by1 = (by0+1) % self.B
ry0 = t % 1
ry1 = ry0 - 1.0
i = self.p[bx0]
j = self.p[bx1]
b00 = self.p[i + by0]
b10 = self.p[j + by0]
b01 = self.p[i + by1]
b11 = self.p[j + by1]
sx = PerlinNoiseGenerator.s_curve(rx0)
sy = PerlinNoiseGenerator.s_curve(ry0)
u = rx0 * self.g[b00,0] + ry0 * self.g[b00,1]
v = rx1 * self.g[b10,0] + ry0 * self.g[b10,1]
a = u + sx * (v - u)
u = rx0 * self.g[b01,0] + ry1 * self.g[b01,1]
v = rx1 * self.g[b11,0] + ry1 * self.g[b11,1]
b = u + sx * (v - u)
return 1.5 * (a + sy * (b - a))
def turbulence(self, x, y, octaves):
t = 0.0
f = 1.0
while f <= octaves:
t += np.abs(self.noise(f*x, f*y)) / f
f = f * 2
return t
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/utils/noise.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from PIL import Image
import torch
class PilToNumpy(object):
def __init__(self, as_float=False, scaled_to_one=False):
self.as_float = as_float
self.scaled_to_one = scaled_to_one
assert (not scaled_to_one) or (as_float and scaled_to_one),\
"Must output a float if rescaling to one."
def __call__(self, image):
if not self.as_float:
return np.array(image).astype(np.uint8)
elif notself.scaled_to_one:
return np.array(image).astype(np.float32)
else:
return np.array(image).astype(np.float32) / 255
class NumpyToPil(object):
def __init__(self):
pass
def __call__(self, image):
return Image.fromarray(image)
class NumpyToTensor(object):
def __init__(self, HWC_to_CHW=True, bit_to_float=True):
self.HWC_to_CHW = HWC_to_CHW
self.bit_to_float = bit_to_float
pass
def __call__(self, image):
image = image.astype(np.float32)
if self.bit_to_float:
image /= 255
if self.HWC_to_CHW:
image = image.transpose(2,0,1)
return torch.Tensor(image)
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/utils/converters.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def smoothstep(low, high, x):
x = np.clip(x, low, high)
x = (x - low) / (high - low)
return np.clip(3 * (x ** 2) - 2 * (x ** 3), 0, 1)
def bilinear_interpolation(image, point):
l = int(np.floor(point[0]))
u = int(np.floor(point[1]))
r, d = l+1, u+1
lu = image[l,u,:] if l >= 0 and l < image.shape[0]\
and u >= 0 and u < image.shape[1] else np.array([0,0,0])
ld = image[l,d,:] if l >= 0 and l < image.shape[0]\
and d >= 0 and d < image.shape[1] else np.array([0,0,0])
ru = image[r,u,:] if r >= 0 and r < image.shape[0]\
and u >= 0 and u < image.shape[1] else np.array([0,0,0])
rd = image[r,d,:] if r >= 0 and r < image.shape[0]\
and d >= 0 and d < image.shape[1] else np.array([0,0,0])
al = lu * (1.0 - point[1] + u) + ld * (1.0 - d + point[1])
ar = ru * (1.0 - point[1] + u) + rd * (1.0 - d + point[1])
out = al * (1.0 - point[0] + l) + ar * (1.0 - r + point[0])
return out
|
augmentation-corruption-fbr_main
|
experiments/overlap/augmentations/utils/image.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from transform_finder import build_transform
import torch
import torchvision as tv
from utils.converters import PilToNumpy, NumpyToTensor
CIFAR_MEAN = [125.3/255, 123.0/255, 113.9/255]
CIFAR_STD = [63.0/255, 62.1/255, 66.7/255]
#This is in RGB order since that is the standard for PIL
IM_MEAN = [0.485, 0.456, 0.406]
IM_STD = [0.229, 0.224, 0.225]
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = []
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions.extend([(vals[0], float(v)) for v in vals[1:]])
return corruptions
@torch.no_grad()
def test_c_bar(
model,
dataset_type,
dataset_path,
batch_size,
corruption_string=None,
loader_kwargs={},
logger=None,
calculate_averages=True,
distributed=False,
num_gpus=1
):
assert dataset_type in ['imagenet', 'cifar'],\
"Only ImageNet and CIFAR-10 are supported."
if corruption_string is None:
corruption_filename = 'imagenet_c_bar.csv' if dataset_type=='imagenet'\
else 'cifar10_c_bar.csv'
corruptions = read_corruption_csv(corruption_filename)
else:
corruptions = [(c.split("-")[0], float(c.split("-")[1])) for c in corruption_string.split("--")]
results = {}
for name, severity in corruptions:
if dataset_type=='imagenet':
transform = tv.transforms.Compose([
tv.transforms.Resize(256),
tv.transforms.CenterCrop(224),
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type=dataset_type),
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)
])
path = os.path.join(dataset_path, 'val')
dataset = tv.datasets.ImageFolder(path, transform=transform)
elif dataset_type=='cifar':
transform = tv.transforms.Compose([
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type=dataset_type),
NumpyToTensor(),
tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
])
dataset = tv.datasets.CIFAR10(dataset_path, train=False, download=False, transform=transform)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)\
if distributed and num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
drop_last=False,
**loader_kwargs
)
num_correct = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if distributed and num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(dataset))
corruption_string = "{}-{:.2f}".format(name, severity)
if logger:
logger.info("Top1 Error for {}: {:.2f}".format(corruption_string, err))
results[corruption_string] = err
if calculate_averages:
import numpy as np
unique_corruption_names = list(set([c.split("-")[0] for c in results]))
avg_errs = {"{}-avg".format(u) : np.mean([results[c] for c in results if c.split("-")[0]==u])
for u in unique_corruption_names}
overall_err = np.mean(list(results.values()))
results.update(avg_errs)
results['overall-avg'] = overall_err
if logger:
for k,v in avg_errs.items():
logger.info("Top1 Error for {}: {:.2f}".format(k,v))
logger.info("Average Top1 Error: {}".format(overall_err))
return results
|
augmentation-corruption-fbr_main
|
imagenet_c_bar/test_c_bar.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import corrupt as corr
transform_list = [
corr.ColorBalance,
corr.QuadrilateralNoBars,
corr.PerspectiveNoBars,
corr.SingleFrequencyGreyscale,
corr.CocentricSineWaves,
corr.PlasmaNoise,
corr.VoronoiNoise,
corr.CausticNoise,
corr.PerlinNoise,
corr.BlueNoise,
corr.BrownishNoise,
corr.Scatter,
corr.ChromaticAbberation,
corr.TransverseChromaticAbberation,
corr.CircularMotionBlur,
corr.BleachBypass,
corr.Technicolor,
corr.Pseudocolor,
corr.HueShift,
corr.ColorDither,
corr.CheckerBoardCutOut,
corr.Sparkles,
corr.InverseSparkles,
corr.Lines,
corr.BlueNoiseSample,
corr.PinchAndTwirl,
corr.CausticRefraction,
corr.FishEye,
corr.WaterDrop,
corr.Ripple,
]
transform_dict = {t.name : t for t in transform_list}
def build_transform(name, severity, dataset_type):
assert dataset_type in ['cifar', 'imagenet'],\
"Only cifar and imagenet image resolutions are supported."
return transform_dict[name](severity=severity,
im_size=(32 if dataset_type=='cifar' else 224)
)
def build_transforms_from_string(string, dataset_type):
im_size = (32 if dataset_type=='cifar' else 224)
transforms = []
for s in string.split("--"):
if not s:
continue
name, sev = s.split("-")
t = transform_dict[name]
transforms.append(t(float(sev),im_size))
return transforms
def transform_string(transforms):
string = ''
for t in transforms:
if string != '':
string += "--"
string = string + t.name + "-" + str(t.severity)
if string == '':
string = '--'
return string
def get_transforms_by_tag(inclusions, exclusions=[]):
transforms = []
for t in transform_list:
if any([i in t.tags for i in inclusions])\
and not any([e in t.tags for e in exclusions]):
transforms.append(t)
return transforms
|
augmentation-corruption-fbr_main
|
imagenet_c_bar/transform_finder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchvision as tv
from transform_finder import build_transform
from utils.converters import PilToNumpy, NumpyToPil
import os
import numpy as np
import torch
from PIL import Image
parser = argparse.ArgumentParser(description="Make CIFAR-10-C-Bar")
parser.add_argument('--imagenet_dir', type=str, required=True,
help='The path to the ImageNet dataset. This path should contain '
'the folder val/')
parser.add_argument('--out_dir', type=str, default='.',
help='The path to where ImageNet-C will be saved.')
parser.add_argument('--num_workers', type=int, default=10,
help='The number of workers to build images with.')
parser.add_argument('--batch_size', type=int, default=100,
help='Batch size of torch data loader used to parallelize '
'data processing.')
parser.add_argument('--corruption_file', type=str, default='imagenet_c_bar.csv',
help='A file that specifies which corruptions in which severities '
'to produce. Path is relative to the script.')
parser.add_argument('--seed', type=int, default=0,
help='The random seed used to generate corruptions.')
class SavingDataset(tv.datasets.ImageFolder):
def __init__(self, root, out_dir, transform=None):
super(SavingDataset, self).__init__(root, transform=transform)
self.out_dir = out_dir
def __getitem__(self, index):
image, label = super(SavingDataset, self).__getitem__(index)
class_name = self.classes[label]
out_dir = os.path.join(self.out_dir, class_name)
try:
os.mkdir(out_dir)
except FileExistsError:
pass
file_name = os.path.basename(self.samples[index][0])
save_path = os.path.join(out_dir, file_name)
Image.fromarray(np.uint8(image)).save(save_path, quality=85, optimize=True)
return image, label
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = {}
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions[vals[0]] = [float(v) for v in vals[1:]]
return corruptions
def main():
args = parser.parse_args()
dataset_path = args.imagenet_dir
corruption_file = args.corruption_file
out_dir = os.path.join(args.out_dir, 'ImageNet-C-Bar')
np.random.seed(args.seed)
bs = args.batch_size
if not os.path.exists(out_dir):
os.mkdir(out_dir)
file_dir = os.path.dirname(os.path.realpath(__file__))
corruption_csv = os.path.join(file_dir, corruption_file)
corruptions = read_corruption_csv(corruption_csv)
for name, severities in corruptions.items():
corruption_dir = os.path.join(out_dir, name)
if not os.path.exists(corruption_dir):
os.mkdir(corruption_dir)
for i, severity in enumerate(severities):
severity_dir = os.path.join(corruption_dir, "{:.2f}".format(severity))
if not os.path.exists(severity_dir):
os.mkdir(severity_dir)
print("Starting {}-{:.2f}...".format(name, severity))
transform = tv.transforms.Compose([
tv.transforms.Resize(256),
tv.transforms.CenterCrop(224),
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type='imagenet'),
])
path = os.path.join(dataset_path, 'val')
dataset = SavingDataset(path, severity_dir, transform=transform)
loader = torch.utils.data.DataLoader(
dataset,
shuffle=False,
sampler=None,
drop_last=False,
pin_memory=False,
num_workers=args.num_workers,
batch_size=bs
)
for j, (im, label) in enumerate(loader):
if (j+1) % 10 == 0:
print("Completed {}/{}".format(j, len(loader)))
if __name__=="__main__":
main()
|
augmentation-corruption-fbr_main
|
imagenet_c_bar/make_imagenet_c_bar.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from math import floor, ceil
from PIL import Image
from scipy.fftpack import ifft2
from scipy.ndimage import gaussian_filter, rotate, shift, zoom
from skimage.draw import line_aa
from skimage.color import rgb2hsv, hsv2rgb
from utils.image import bilinear_interpolation, smoothstep
from utils.perlin_noise import PerlinNoiseGenerator
from base import Transform
import abc
def int_parameter(level, maxval):
return int(level * maxval / 10)
def float_parameter(level, maxval):
return float(level) * maxval / 10.
class SingleFrequencyGreyscale(Transform):
name = 'single_frequency_greyscale'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
freq_mag = np.random.uniform(low=-np.pi, high=np.pi)
freq_2 = np.random.uniform(low=-abs(freq_mag), high=abs(freq_mag))
freq = np.array([freq_mag, freq_2])[np.random.permutation(2)]
phase = np.random.uniform(low=0, high=2*np.pi)
intensity = float_parameter(self.severity, 196)
return { 'freq' : freq, 'phase' : phase, 'intensity' : intensity}
def transform(self, image, freq, phase, intensity):
noise = np.array([[np.sin(x * freq[0] + y * freq[1] + phase)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((noise, noise, noise), axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['freq'].tolist() + [params['phase'], params['intensity']])
def convert_from_numpy(self, numpy_record):
return {'freq' : numpy_record[0:2],
'phase' : numpy_record[2],
'intensity' : numpy_record[3]
}
class CocentricSineWaves(Transform):
name = 'cocentric_sine_waves'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
offset = np.random.uniform(low=0, high=self.im_size, size=2)
freq = np.random.uniform(low=0, high=10)
amplitude = np.random.uniform(low=0, high=self.im_size/10)
ring_width = np.random.uniform(low=0, high=self.im_size/10)
intensity = [float_parameter(self.severity, 128) for i in range(3)]
return { 'offset' : offset,
'freq' : freq,
'amplitude' : amplitude,
'ring_width' : ring_width,
'intensity' : intensity
}
def transform(self, image, offset, freq, amplitude, ring_width, intensity):
def calc_intensity(x, y, x0, y0, freq, amplitude, ring_width):
angle = np.arctan2(x-x0, y-y0) * freq
distance = ((np.sqrt((x-x0)**2 + (y-y0)**2) + np.sin(angle) * amplitude) % ring_width) / ring_width
distance -= 1/2
return distance
noise = np.array([[calc_intensity(x, y, offset[0], offset[1], freq, amplitude, ring_width)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((intensity[0] * noise, intensity[1] * noise, intensity[2] * noise), axis=2)
return np.clip(image + noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['offset'].tolist() + [params['freq'], params['amplitude'], params['ring_width']] + params['intensity'])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0:2].tolist(),
'freq' : numpy_record[2],
'amplitude' : numpy_record[3],
'ring_width' : numpy_record[4],
'intensity' : numpy_record[4:7].tolist()
}
class PlasmaNoise(Transform):
name = 'plasma_noise'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
time = np.random.uniform(low=0.0, high=6*np.pi)
iterations = np.random.randint(low=4, high=7)
sharpness = np.random.uniform(low=0.5, high=1.0)
scale = np.random.uniform(low=0.075, high=0.2) * self.im_size
intensity = float_parameter(self.severity,64)
return {'time' : time, 'iterations' : iterations, 'sharpness' : sharpness,
'scale' : scale, 'intensity' : intensity}
def transform(self, image, time, iterations, sharpness, scale, intensity):
def kernel(x, y, rand, iters, sharp, scale):
x /= scale
y /= scale
i = np.array([1.0, 1.0, 1.0, 0.0])
for s in range(iters):
r = np.array([np.cos(y * i[0] - i[3] + rand / i[1]), np.sin(x * i[0] - i[3] + rand / i[1])]) / i[2]
r += np.array([-r[1],r[0]]) * 0.3
x += r[0]
y += r[1]
i *= np.array([1.93, 1.15, (2.25 - sharp), rand * i[1]])
r = np.sin(x - rand)
b = np.sin(y + rand)
g = np.sin((x + y + np.sin(rand))*0.5)
return [r,g,b]
noise = np.array([[kernel(x,y, time, iterations, sharpness, scale)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip((1-intensity/255) * image + intensity * noise, 0, 255).astype(np.uint8)
class VoronoiNoise(Transform):
name = 'voronoi_noise'
tags = ['new_corruption']
def sample_parameters(self):
seed = np.random.uniform(low=0, high=10)
density = np.random.uniform(low=0.5, high=0.9)
size = np.random.uniform(low=0.05, high=0.2) * self.im_size
intensity = float_parameter(self.severity,255)
if np.random.uniform() > 0.5:
intensity = -intensity
return {'seed' : seed, 'density' : density, 'size' : size, 'intensity' : intensity}
def transform(self, image, size, seed, density, intensity):
def voronoi_hash(v, time):
m = np.array([[13.85, 47.77], [99.41, 88.48]])
w = np.matmul(m,v)
return (np.sin(w) * np.sqrt(w) * time * 0.0025) % 1
def kernel(x, y, size, seed, density):
v = np.array([[x],[y]]) / size + 1
g = v // 1
f = v % 1
dist = 1.0
for i in [-1,0,1]:
for j in [-1,0,1]:
p = np.array([[i],[j]])
curr_dist = np.linalg.norm((p + voronoi_hash(g+p, seed) - f).flatten())
dist = min(curr_dist, dist)
r = smoothstep(0, 1, dist * density) - 0.5
return r
noise = np.array([[kernel(x,y, size, seed, density)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((noise, noise, noise), axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class CausticNoise(Transform):
name = 'caustic_noise'
tags = ['new_corruption']
def sample_parameters(self):
time = np.random.uniform(low=0.5, high=2.0)
size = np.random.uniform(low=0.75, high=1.25) * self.im_size
#size = self.im_size
intensity = float_parameter(self.severity, 255)
return { 'time' : time, 'size' : size, 'intensity' : intensity}
def transform(self, image, time, size, intensity):
def kernel(point, time, size):
point = point / size
p = (point % 1) * 6.28318530718 - 250
i = p.copy()
c = 1.0
inten = 0.005
for n in range(5):
t = time * (1.0 - (3.5 / (n+1)))
i = p + np.array([np.cos(t-i[0])+np.sin(t+i[1]),np.sin(t-i[1])+np.cos(t+i[0])])
length = np.sqrt((p[0] / (np.sin(i[0]+t)/inten))**2 + (p[1] / (np.cos(i[1]+t)/inten))**2)
c += 1.0/length
c /= 5.0
c = 1.17 - c ** 1.4
color = np.clip(np.abs(c) ** 8.0, 0, 1)
return np.array([color, color, color])
noise = np.array([[kernel(np.array([y,x]), time, size)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class Sparkles(Transform):
name = 'sparkles'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
centers = np.random.uniform(low=0, high=self.im_size, size=(5, 2))
radii = np.array([float_parameter(self.severity, 0.1)\
for i in range(5)]) * self.im_size
amounts = np.array([50 for i in range(5)])
color = np.array([255, 255, 255])
randomness = 25
seed = np.random.randint(low=0, high=2**32)
nrays = np.random.randint(low=50, high=200, size=5)
return {'centers' : centers, 'radii' : radii, 'color' : color, 'randomness' : randomness,
'seed' : seed, 'nrays' : nrays, 'amounts' : amounts
}
def transform(self, image, centers, radii, nrays, amounts, color, randomness, seed):
def kernel(point, value, center, radius, ray_lengths, amount, color):
rays = len(ray_lengths)
dp = point - center
dist = np.linalg.norm(dp)
angle = np.arctan2(dp[1], dp[0])
d = (angle + np.pi) / (2 * np.pi) * rays
i = int(d)
f = d - i
if radius != 0:
length = ray_lengths[i % rays] + f * (ray_lengths[(i+1) % rays] - ray_lengths[i % rays])
g = length**2 / (dist**2 + 1e-4)
g = g ** ((100 - amount) / 50.0)
f -= 0.5
f = 1 - f**2
f *= g
f = np.clip(f, 0, 1)
return value + f * (color - value)
random_state = np.random.RandomState(seed=seed)
for center, rays, amount, radius in zip(centers, nrays, amounts, radii):
ray_lengths = [max(1,radius + randomness / 100.0 * radius * random_state.randn())\
for i in range(rays)]
image = np.array([[kernel(np.array([y,x]), image[y,x,:].astype(np.float32), center, radius, ray_lengths, amount, color)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class InverseSparkles(Transform):
name = 'inverse_sparkles'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
center = np.random.uniform(low=0.25, high=0.75, size=2) * self.im_size
radius = 0.25 * self.im_size
amount = 100
amount = float_parameter(self.severity, 65)
amount = 100 - amount
color = np.array([255, 255, 255])
randomness = 25
seed = np.random.randint(low=0, high=2**32)
rays = np.random.randint(low=50, high=200)
return {'center' : center, 'radius' : radius, 'color' : color, 'randomness' : randomness,
'seed' : seed, 'rays' : rays, 'amount' : amount
}
def transform(self, image, center, radius, rays, amount, color, randomness, seed):
def kernel(point, value, center, radius, ray_lengths, amount, color):
rays = len(ray_lengths)
dp = point - center
dist = np.linalg.norm(dp)
angle = np.arctan2(dp[1], dp[0])
d = (angle + np.pi) / (2 * np.pi) * rays
i = int(d)
f = d - i
if radius != 0:
length = ray_lengths[i % rays] + f * (ray_lengths[(i+1) % rays] - ray_lengths[i % rays])
g = length**2 / (dist**2 + 1e-4)
g = g ** ((100 - amount) / 50.0)
f -= 0.5
f = 1 - f**2
f *= g
f = np.clip(f, 0, 1)
return color + f * (value - color)
random_state = np.random.RandomState(seed=seed)
ray_lengths = [radius + randomness / 100.0 * radius * random_state.randn()\
for i in range(rays)]
out = np.array([[kernel(np.array([y,x]), image[y,x,:].astype(np.float32), center, radius, ray_lengths, amount, color)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
class PerlinNoise(Transform):
name = 'perlin_noise'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
m = np.array([[1,0],[0,1]]) / (32 * self.im_size / 224)
turbulence = 16.0
gain = 0.5
bias = 0.5
alpha = float_parameter(self.severity, 0.50)
seed = np.random.randint(low=0, high=2**32)
return {'m': m, 'turbulence' : turbulence, 'seed': seed,
'gain': gain, 'bias': bias, 'alpha': alpha}
def transform(self, image, m, turbulence, seed, gain, bias, alpha):
random_state = np.random.RandomState(seed=seed)
noise = PerlinNoiseGenerator(random_state)
def kernel(point, m, turbulence, gain, bias):
npoint = np.matmul(point, m)
f = noise.turbulence(npoint[0], npoint[1], turbulence)\
if turbulence != 1.0 else noise.noise(npoint[0], npoint[1])
f = gain * f + bias
return np.clip(np.array([f,f,f]),0,1.0)
noise = np.array([[kernel(np.array([y,x]),m,turbulence,gain, bias) for x in range(self.im_size)]\
for y in range(self.im_size)])
out = (1 - alpha) * image.astype(np.float32) + 255 * alpha * noise
return np.clip(out, 0, 255).astype(np.uint8)
class BlueNoise(Transform):
name = 'blue_noise'
tags = ['new_corruption']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
intensity = float_parameter(self.severity, 196)
return {'seed' : seed, 'intensity' : intensity}
def gen_noise(self, random_state):
center = self.im_size / 2
power = np.array([[np.linalg.norm(np.array([x,y])-center)\
for x in range(self.im_size)] for y in range(self.im_size)])
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
return noise
def transform(self, image, seed, intensity):
random_state = np.random.RandomState(seed=seed)
noise = np.stack([self.gen_noise(random_state) for i in range(3)],axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class BrownishNoise(Transform):
name = 'brownish_noise'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
intensity = float_parameter(self.severity, 64)
return {'seed' : seed, 'intensity' : intensity}
def gen_noise(self, random_state):
center = self.im_size / 2
power = np.array([[1/(np.linalg.norm(np.array([x,y])-center)**2+1)\
for x in range(self.im_size)] for y in range(self.im_size)])
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
return noise
def transform(self, image, seed, intensity):
random_state = np.random.RandomState(seed=seed)
noise = np.stack([self.gen_noise(random_state) for i in range(3)],axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class BleachBypass(Transform):
name = 'bleach_bypass'
tags = ['new_corruption']
def sample_parameters(self):
amount = float_parameter(self.severity, 1.0)
return { 'amount' : amount }
def transform(self, image, amount):
vals = np.array([0.2126, 0.7152, 0.0722]).reshape(1,1,3)
luma = np.sum(image*vals, axis=2, keepdims=True)/255
l = np.clip(10.0 * (luma - 0.45), 0, 1.0)
result1 = 2 * image * luma / 255
result2 = 1.0 - 2.0 * (1.0 - luma) * (1.0 - image /255)
out = ((1-l) * result1 + l * result2) * 255
return ((1-amount) * image + amount * out).astype(np.uint8)
class Technicolor(Transform):
name = 'technicolor'
tags = ['new_corruption']
def sample_parameters(self):
amount = float_parameter(self.severity, 1.0)
return { 'amount' : amount }
def transform(self, image, amount):
redmatte = 1.0 - (image[:,:,0]/255 - ((image[:,:,1]/2+image[:,:,2]/2))/255)
greenmatte = 1.0 - (image[:,:,1]/255 - ((image[:,:,0]/2+image[:,:,2]/2))/255)
bluematte = 1.0 - (image[:,:,2]/255 - ((image[:,:,0]/2+image[:,:,1]/2))/255)
red = greenmatte * bluematte * image[:,:,0].astype(np.float32)
green = redmatte * bluematte * image[:,:,1].astype(np.float32)
blue = redmatte * greenmatte * image[:,:,2].astype(np.float32)
new_color = np.stack((red, green, blue), axis=2)
return ((1-amount) * image + amount * new_color).astype(np.uint8)
class Pseudocolor(Transform):
name = 'pseudocolor'
tags = ['new_corruption']
def sample_parameters(self):
smoothness = np.random.uniform(low=0.25, high=0.75)
color0 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color1 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color2 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color3 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color4 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
amount = float_parameter(self.severity, 0.5)
return { 'smoothness' : smoothness, 'color0' : color0, 'color1': color1,
'color2': color2, 'color3' : color3, 'color4' : color4, 'amount' : amount }
def transform(self, image, color0, color1, color2, color3, color4, smoothness, amount):
color0 = color0.astype(np.uint8)
color1 = color1.astype(np.uint8)
color2 = color2.astype(np.uint8)
color3 = color3.astype(np.uint8)
color4 = color4.astype(np.uint8)
def get_color(color0, color1, edge0, edge1, luma, smoothness):
smooth_color = color0 + ((color1 - color0) * smoothstep(edge0, edge1, luma))
a = 4.0 * (luma - edge0)
linear_color = (1 - a) * color0 + a * color1
return (1 - smoothness) * linear_color + smoothness * smooth_color
vals = np.array([0.2126, 0.7152, 0.0722]).reshape(1,1,3)
luma = np.sum(image.astype(np.float32)*vals, axis=2, keepdims=True)/255
c1 = get_color(color0, color1, 0.0, 0.25, luma, smoothness)
c2 = get_color(color1, color2, 0.25, 0.50, luma, smoothness)
c3 = get_color(color2, color3, 0.5, 0.75, luma, smoothness)
c4 = get_color(color3, color4, 0.75, 1.0, luma, smoothness)
out = (luma < 0.25) * c1 + ((luma >= 0.25)&(luma < 0.5)) * c2\
+ ((luma >= 0.5)&(luma < 0.75)) * c3 + (luma >= 0.75) * c4
return np.clip((1 - amount) * image + amount * out, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
colors = []
for i in range(5):
colors.extend(params['color'+str(i)].tolist())
return np.array([params['smoothness']] + colors + [params['amount']])
def convert_from_numpy(self, numpy_record):
params = {'smoothness' : numpy_record[0], 'amount' : numpy_record[16]}
for i in range(5):
params['color'+str(i)] = numpy_record[1+3*i:1+3*(i+1)]
return params
class HueShift(Transform):
name = 'hue_shift'
tags = ['new_corruption']
def sample_parameters(self):
amount = float_parameter(self.severity, 0.5)
if np.random.uniform() < 0.5:
amount *= -1
return {'amount' : amount}
def transform(self, image, amount):
hsv_image = rgb2hsv(image.astype(np.float32)/255)
hsv_image[:,:,0] += (amount % 1.0)
return np.clip(hsv2rgb(hsv_image)*255, 0, 255).astype(np.uint8)
class ColorDither(Transform):
name = 'color_dither'
tags = ['new_corruption']
def sample_parameters(self):
levels = int_parameter(self.severity,10)
levels = 14-levels
return {'levels' : levels}
def transform(self, image, levels):
index = 0
color_map = [int(255 * i / (levels -1)) for i in range(levels)]
div = [int(levels*i / 256) for i in range(256)]
out = np.zeros_like(image)
image_copy = image.copy()
m = np.array([[0,0,0],[0,0,7],[3,5,1]])
for y in range(self.im_size):
reverse = ((y % 1) == 1)
if reverse:
index = y*self.im_size + self.im_size - 1
direction = -1
else:
index = y*self.im_size
direction = 1
for x in range(self.im_size):
curr_val = image_copy[index//self.im_size, index%self.im_size,:]
new_val = np.array([color_map[div[c]] for c in curr_val])
out[index//self.im_size, index%self.im_size,:] = new_val
e = curr_val - new_val
for i in [-1,0,1]:
iy = y+i
if iy > 0 and iy < self.im_size:
for j in [-1,0,1]:
jx = x+j
if jx > 0 and jx < self.im_size:
if reverse:
w = m[(i+1),-j+1]
else:
w = m[(i+1),j+1]
if w != 0:
k = index - j if reverse else index + j
curr_val = image_copy[k//self.im_size, k%self.im_size,:].astype(np.float32)
curr_val = np.clip(curr_val + e * w/np.sum(m),0,255).astype(np.uint8)
image_copy[k//self.im_size,k%self.im_size,:] = curr_val
index += direction
return np.clip(out, 0, 255).astype(np.uint8)
class ColorBalance(Transform):
name = 'color_balance'
tags = ['new_corruption']
def sample_parameters(self):
shift = float_parameter(self.severity, 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Color(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class CheckerBoardCutOut(Transform):
name = 'checkerboard_cutout'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
angle = np.random.uniform(low=0, high=2*np.pi)
scales = np.maximum(np.random.uniform(low=0.1, high=0.25) * self.im_size, 1)
scales = (scales, scales)
fraction = float_parameter(self.severity, 1.0)
seed = np.random.randint(low=0, high=2**32)
return {'angle' : angle, 'scales' : scales, 'fraction' : fraction, 'seed' : seed}
def transform(self, image, scales, angle, fraction, seed):
random_state = np.random.RandomState(seed=seed)
grid = random_state.uniform(size=(int(4*self.im_size//scales[0]), int(4*self.im_size//scales[1]))) < fraction
def mask_kernel(point, scales, angle, grid):
nx = (np.cos(angle) * point[0] + np.sin(angle) * point[1]) / scales[0]
ny = (-np.sin(angle) * point[0] + np.cos(angle) * point[1]) / scales[1]
return (int(nx % 2) != int(ny % 2)) or not grid[int(nx),int(ny)]
out = np.array([[image[y,x,:] if mask_kernel([y,x], scales, angle, grid) else np.array([128,128,128])\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
class Lines(Transform):
name = 'lines'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
length = 1.0
density = float_parameter(self.severity, 1.0)
angle = np.random.uniform(low=0.0, high=2*np.pi)
angle_variation = np.random.uniform(low=0.1, high=1.0)
seed = np.random.randint(low=0, high=2**32)
return {'length' : length, 'density' : density, 'angle' : angle, 'angle_variation' : angle_variation, 'seed' : seed}
def transform(self, image, length, density, angle, angle_variation, seed):
num_lines = int(density * self.im_size)
l = length * self.im_size
random_state = np.random.RandomState(seed=seed)
out = image.copy()
for i in range(num_lines):
x = self.im_size * random_state.uniform()
y = self.im_size * random_state.uniform()
a = angle + 2 * np.pi * angle_variation * (random_state.uniform() - 0.5)
s = np.sin(a) * l
c = np.cos(a) * l
x1 = int(x-c)
x2 = int(x+c)
y1 = int(y-s)
y2 = int(y+s)
rxc, ryc, rval = line_aa(x1, y1, x2, y2)
xc, yc, val = [], [], []
for rx, ry, rv in zip(rxc, ryc, rval):
if rx >= 0 and ry >= 0 and rx < self.im_size and ry < self.im_size:
xc.append(rx)
yc.append(ry)
val.append(rv)
xc, yc, val = np.array(xc, dtype=np.int), np.array(yc, dtype=np.int), np.array(val)
out[xc, yc, :] = (1.0 - val.reshape(-1,1)) * out[xc, yc, :].astype(np.float32) + val.reshape(-1,1)*128
return out.astype(np.uint8)
class BlueNoiseSample(Transform):
name = 'blue_noise_sample'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
threshold = float_parameter(self.severity, 3.0) - 2.5
return {'seed' : seed, 'threshold' : threshold}
def transform(self, image, seed, threshold):
random_state = np.random.RandomState(seed=seed)
center = self.im_size / 2
power = np.array([[np.linalg.norm(np.array([x,y])-center)\
for x in range(self.im_size)] for y in range(self.im_size)])
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
mask = noise > threshold
out = image * mask.reshape(self.im_size, self.im_size, 1)
return np.clip(out, 0, 255).astype(np.uint8)
class CausticRefraction(Transform):
name = 'caustic_refraction'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
time = np.random.uniform(low=0.5, high=2.0)
size = np.random.uniform(low=0.75, high=1.25) * self.im_size
#size = self.im_size
eta = 4.0
lens_scale = float_parameter(self.severity, 0.5*self.im_size)
lighting_amount = float_parameter(self.severity, 2.0)
softening = 1
return { 'time' : time, 'size' : size, 'eta' : eta, 'lens_scale' : lens_scale, 'lighting_amount': lighting_amount, 'softening' : softening}
def transform(self, image, time, size, eta, lens_scale, lighting_amount, softening):
def caustic_noise_kernel(point, time, size):
point = point / size
p = (point % 1) * 6.28318530718 - 250
i = p.copy()
c = 1.0
inten = 0.005
for n in range(5):
t = time * (1.0 - (3.5 / (n+1)))
i = p + np.array([np.cos(t-i[0])+np.sin(t+i[1]),np.sin(t-i[1])+np.cos(t+i[0])])
length = np.sqrt((p[0] / (np.sin(i[0]+t)/inten))**2 + (p[1] / (np.cos(i[1]+t)/inten))**2)
c += 1.0/length
c /= 5.0
c = 1.17 - c ** 1.4
color = np.clip(np.abs(c) ** 8.0, 0, 1)
return np.array([color, color, color])
def refract(incident, normal, eta):
if np.abs(np.dot(incident, normal)) >= 1.0 - 1e-3:
return incident
angle = np.arccos(np.dot(incident, normal))
out_angle = np.arcsin(np.sin(angle) / eta)
out_unrotated = np.array([np.cos(out_angle), np.sin(out_angle), 0.0])
spectator_dim = np.cross(incident, normal)
spectator_dim /= np.linalg.norm(spectator_dim)
orthogonal_dim = np.cross(normal, spectator_dim)
rotation_matrix = np.stack((normal, orthogonal_dim, spectator_dim), axis=0)
return np.matmul(np.linalg.inv(rotation_matrix), out_unrotated)
def luma_at_offset(image, origin, offset):
pixel_value = image[origin[0]+offset[0], origin[1]+offset[1], :]\
if origin[0]+offset[0] >= 0 and origin[0]+offset[0] < image.shape[0]\
and origin[1]+offset[1] >= 0 and origin[1]+offset[1] < image.shape[1]\
else np.array([0.0,0.0,0])
return np.dot(pixel_value, np.array([0.2126, 0.7152, 0.0722]))
def luma_based_refract(point, image, caustics, eta, lens_scale, lighting_amount):
north_luma = luma_at_offset(caustics, point, np.array([0,-1]))
south_luma = luma_at_offset(caustics, point, np.array([0, 1]))
west_luma = luma_at_offset(caustics, point, np.array([-1, 0]))
east_luma = luma_at_offset(caustics, point, np.array([1,0]))
lens_normal = np.array([east_luma - west_luma, south_luma - north_luma, 1.0])
lens_normal = lens_normal / np.linalg.norm(lens_normal)
refract_vector = refract(np.array([0.0, 0.0, 1.0]), lens_normal, eta) * lens_scale
refract_vector = np.round(refract_vector, 3)
out_pixel = bilinear_interpolation(image, point+refract_vector[0:2])
out_pixel += (north_luma - south_luma) * lighting_amount
out_pixel += (east_luma - west_luma) * lighting_amount
return np.clip(out_pixel, 0, 1)
noise = np.array([[caustic_noise_kernel(np.array([y,x]), time, size)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = gaussian_filter(noise, sigma=softening)
image = image.astype(np.float32) / 255
out = np.array([[luma_based_refract(np.array([y,x]), image, noise, eta, lens_scale, lighting_amount)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip((out * 255).astype(np.uint8), 0, 255)
class PinchAndTwirl(Transform):
name = 'pinch_and_twirl'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
num_per_axis = 5 if self.im_size==224 else 3
angles = np.array([np.random.choice([1,-1]) * float_parameter(self.severity, np.pi/2) for i in range(num_per_axis ** 2)]).reshape(num_per_axis, num_per_axis)
amount = float_parameter(self.severity, 0.4) + 0.1
return {'num_per_axis' : num_per_axis, 'angles' : angles, 'amount' : amount}
def transform(self, image, num_per_axis, angles, amount):
def warp_kernel(point, center, radius, amount, angle):
dx = point[0] - center[0]
dy = point[1] - center[1]
dist = np.linalg.norm(point - center)
if dist > radius or np.round(dist, 3) == 0.0:
return point
d = dist / radius
t = np.sin(np.pi * 0.5 * d) ** (- amount)
dx *= t
dy *= t
e = 1 - d
a = angle * (e ** 2)
out = center + np.array([dx*np.cos(a) - dy*np.sin(a), dx*np.sin(a) + dy*np.cos(a)])
return out
out = image.copy().astype(np.float32)
grid_size = self.im_size // num_per_axis
radius = grid_size / 2
for i in range(num_per_axis):
for j in range(num_per_axis):
l, r = i * grid_size, (i+1) * grid_size
u, d = j * grid_size, (j+1) * grid_size
center = np.array([u+radius, l+radius])
out[u:d,l:r,:] = np.array([[bilinear_interpolation(out, warp_kernel(np.array([y,x]), center, radius, amount, angles[i,j]))\
for x in np.arange(l,r)] for y in np.arange(u,d)])
return np.clip(out, 0, 255).astype(np.uint8)
class FishEye(Transform):
name = 'fish_eye'
tags = ['new_corruption']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
density = 0.01 * 224**2 / (self.im_size**2)
eta = float_parameter(self.severity, 2.0) + 1.0
radius = max(0.05 * self.im_size, 3)
return {'seed' : seed, 'density' : density, 'eta': eta, 'radius' : radius}
def transform(self, image, density, eta, radius, seed):
def warp_kernel(point, center, a, b, eta):
dx = point[0] - center[0]
dy = point[1] - center[1]
x2 = dx**2
y2 = dy**2
a2 = a**2
b2 = b**2
if (y2 >= (b2 - b2*x2/a2)):
return point
r = 1.0 / eta
z = np.sqrt((1.0 - x2/a2 - y2/b2) * (a*b))
z2 = z**2
x_angle = np.arccos(dx / np.sqrt(x2+z2))
angle_1 = np.pi/2 - x_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - x_angle - angle_2
out_x = point[0] - np.tan(angle_2)*z
y_angle = np.arccos(dy / np.sqrt(y2+z2))
angle_1 = np.pi/2 - y_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - y_angle - angle_2
out_y = point[1] - np.tan(angle_2)*z
return np.array([out_x, out_y])
random_state = np.random.RandomState(seed=seed)
num = int(density * self.im_size**2)
out = image.copy().astype(np.float32)
for i in range(num):
center = random_state.uniform(low=0, high=self.im_size, size=2)
l = max(np.floor(center[1]-radius).astype(np.int), 0)
r = min(np.ceil(center[1]+radius).astype(np.int), self.im_size)
u = max(np.floor(center[0]-radius).astype(np.int), 0)
d = min(np.ceil(center[0]+radius).astype(np.int), self.im_size)
out[u:d,l:r,:] = np.array([[bilinear_interpolation(out, warp_kernel(np.array([y,x]), center, radius, radius, eta)) for x in np.arange(l,r)] for y in np.arange(u,d)])
return np.clip(out, 0, 255).astype(np.uint8)
class WaterDrop(Transform):
name = 'water_drop'
tags = ['new_corruption']
def sample_parameters(self):
center = np.array([self.im_size //2, self.im_size//2])
center = np.random.uniform(low=0.25, high=0.75, size=2) * self.im_size
radius = self.im_size//2
amplitude = float_parameter(self.severity, 0.25)
wavelength = np.random.uniform(low=0.05, high=0.2) * self.im_size
phase = np.random.uniform(low=0.0, high=2*np.pi)
return {'center': center, 'radius' : radius, 'amplitude' : amplitude, 'wavelength' : wavelength, 'phase': phase}
def transform(self, image, center, radius, amplitude, wavelength, phase):
def warp_kernel(point, center, radius, amplitude, wavelength, phase):
dx, dy = point - center
dist = np.linalg.norm(point-center)
if dist > radius:
return point
amount = amplitude * np.sin(dist / wavelength * np.pi * 2 - phase)
if dist != 0.0:
amount *= wavelength / dist
return point + amount * (point - center)
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), center, radius, amplitude, wavelength, phase))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class Ripple(Transform):
name = 'ripple'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
amplitudes = np.array([float_parameter(self.severity, 0.025)\
for i in range(2)]) * self.im_size
wavelengths = np.random.uniform(low=0.1, high=0.3, size=2) * self.im_size
phases = np.random.uniform(low=0, high=2*np.pi, size=2)
return {'amplitudes' : amplitudes, 'wavelengths' : wavelengths, 'phases' : phases}
def transform(self, image, wavelengths, phases, amplitudes):
def warp_kernel(point, wavelengths, phases, amplitudes):
return point + amplitudes * np.sin(2 * np.pi * point / wavelengths + phases)
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), wavelengths, phases, amplitudes))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class PerspectiveNoBars(Transform):
name = 'perspective_no_bars'
tags = ['new_corruption']
def sample_parameters(self):
offset_x = float_parameter(self.severity, 0.1)
if np.random.uniform() > 0.5:
offset_x = -offset_x
offset_y = float_parameter(self.severity, 0.1)
if np.random.uniform() > 0.5:
offset_y = -offset_y
shift_x = float_parameter(self.severity, self.im_size / 10)
if np.random.uniform() > 0.5:
shift_x = -shift_x
shift_y = float_parameter(self.severity, self.im_size / 10)
if np.random.uniform() > 0.5:
shift_y = -shift_y
factor_x = float_parameter(self.severity, 0.15)
if np.random.uniform() > 0.5:
factor_x = -factor_x
factor_x = 2 ** factor_x
factor_y = float_parameter(self.severity, 0.15)
if np.random.uniform() > 0.5:
factor_y = -factor_y
factor_y = 2 ** factor_y
denom_x = float_parameter(self.severity, 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_x = denom_x
denom_y = float_parameter(self.severity, 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_y = denom_y
perspective_params = np.array([factor_x, offset_x, shift_x,offset_y, factor_y, shift_y, denom_x, denom_y])
return {'perspective_params' : perspective_params}
def transform(self, image, perspective_params):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
im = np.array(im).astype(np.float32)
mask = Image.fromarray(np.ones_like(image).astype(np.uint8)*255)
mask = mask.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
mask = np.array(mask).astype(np.float32) / 255
im = mask * im + (1-mask) * image
return im.astype(np.uint8)
def convert_to_numpy(self, params):
return params['perspective_params']
def convert_from_numpy(self, numpy_record):
return {'perspective_params' : numpy_record}
class QuadrilateralNoBars(Transform):
name = 'quadrilateral_no_bars'
tags = ['new_corruption']
def sample_parameters(self):
points = np.array([
[0,0],
[0, self.im_size],
[self.im_size, self.im_size],
[self.im_size, 0]
]).astype(np.float32)
shift = float_parameter(self.severity, self.im_size / 3) * np.random.uniform(low=-1,high=1, size=(4,2))
points += shift
return {'points' : points}
def transform(self, image, points):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
im = np.array(im).astype(np.float32)
mask = Image.fromarray(np.ones_like(image).astype(np.uint8)*255)
mask = mask.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
mask = np.array(mask).astype(np.float32) / 255
im = mask * im + (1-mask) * image
return im.astype(np.uint8)
def convert_to_numpy(self, params):
return params['points'].flatten()
def convert_from_numpy(self, numpy_record):
return {'points' : numpy_record.reshape(4,2)}
class Scatter(Transform):
name = 'scatter'
tags = ['new_corruption']
def sample_parameters(self):
seed = np.random.uniform(low=0.0, high=10.0)
radius = float_parameter(self.severity, self.im_size/10)
return {'seed' : seed, 'radius' : radius}
def transform(self, image, seed, radius):
def noise(x, y, seed):
i, j = np.sin(x * seed), np.cos(y * seed)
return (np.sin(12.9898*i + 78.233*j) * 43758.5453) % 1
def warp_kernel(x, y, seed, radius):
x_offset = radius * (-1.0 + noise(x, y, seed) * 2)
y_offset = radius * (-1.0 + noise(y, x, seed) * 2)
x_new = min(max(0, x+x_offset), self.im_size-1)
y_new = min(max(0, y+y_offset), self.im_size-1)
return y_new, x_new
out = np.array([[bilinear_interpolation(image, warp_kernel(x, y, seed, radius))\
for x in range(self.im_size)] for y in range(self.im_size)])
return out.astype(np.uint8)
class ChromaticAbberation(Transform):
name = 'chromatic_abberation'
tags = ['new_corruption']
def sample_parameters(self):
angles = np.random.uniform(low=0, high=2*np.pi, size=3)
dists = np.array([float_parameter(self.severity, self.im_size / 10)\
for i in range(3)])
shifts = np.array([[np.cos(a)*d, np.sin(a)*d] for a, d in zip(angles, dists)])
return { 'rgb_shifts' : shifts}
def transform(self, image, rgb_shifts):
out = image.copy()
for i in range(3):
out[:,:,i] = shift(image[:,:,i], rgb_shifts[i], prefilter=False)
return out
def convert_to_numpy(self, params):
return params['rgb_shifts'].flatten()
def convert_from_numpy(self, numpy_record):
return {'rgb_shifts' : numpy_record.reshape(3,2).astype(np.int)}
class TransverseChromaticAbberation(Transform):
name = 'transverse_chromatic_abberation'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
scales = np.array([float_parameter(self.severity, 0.5)\
for i in range(3)])
scale = float_parameter(self.severity, 0.5)
scales = np.array([1.0, 1.0+scale/2, 1.0+scale])
scales = scales[np.random.permutation(3)]
return { 'scales' : scales }
def transform(self, image, scales):
out = image.copy()
for c in range(3):
zoomed = zoom(image[:,:,c], scales[c], prefilter=False)
edge = (zoomed.shape[0]-self.im_size)//2
out[:,:,c] = zoomed[edge:edge+self.im_size, edge:edge+self.im_size]
return out.astype(np.uint8)
def convert_to_numpy(self, params):
return params['scales'].flatten()
def convert_from_numpy(self, numpy_record):
return {'scales' : numpy_record}
class CircularMotionBlur(Transform):
name = 'circular_motion_blur'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
amount = float_parameter(self.severity,15)
return {'amount' : amount}
def transform(self, image, amount):
num = 21
factors = []
rotated = []
image = image.astype(np.float32) / 255
for i in range(num):
angle = (2*i/(num-1) - 1) * amount
rotated.append(rotate(image, angle, reshape=False))
factors.append(np.exp(- 2*(2*i/(num-1)-1)**2))
out = np.zeros_like(image)
for i, f in zip(rotated, factors):
out += f * i
out /= sum(factors)
return np.clip(out*255, 0, 255).astype(np.uint8)
|
augmentation-corruption-fbr_main
|
imagenet_c_bar/corrupt.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchvision as tv
from transform_finder import build_transform
from utils.converters import PilToNumpy, NumpyToPil
import os
import numpy as np
import torch
parser = argparse.ArgumentParser(description="Make CIFAR-10-C-Bar")
parser.add_argument('--cifar_dir', type=str, required=True,
help='The path to the CIFAR-10 dataset. This path should contain '
'the folder cifar-10-batches-py/')
parser.add_argument('--out_dir', type=str, default='.',
help='The path to where CIFAR-10-C will be saved.')
parser.add_argument('--num_workers', type=int, default=10,
help='The number of workers to build images with.')
parser.add_argument('--batch_size', type=int, default=200,
help='Batch size of torch data loader used to parallelize '
'data processing.')
parser.add_argument('--seed', type=int, default=0,
help='The random seed used to generate corruptions.')
parser.add_argument('--corruption_file', type=str, default='imagenet_c_bar.csv',
help='A file that specifies which corruptions in which severities '
'to produce. Path is relative to the script.')
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = {}
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions[vals[0]] = [float(v) for v in vals[1:]]
return corruptions
def main():
args = parser.parse_args()
dataset_path = args.cifar_dir
out_dir = os.path.join(args.out_dir, 'CIFAR-10-C-Bar')
bs = args.batch_size
if not os.path.exists(out_dir):
os.mkdir(out_dir)
file_dir = os.path.dirname(os.path.realpath(__file__))
corruption_csv = os.path.join(file_dir, 'cifar10_c_bar.csv')
corruptions = read_corruption_csv(corruption_csv)
for name, severities in corruptions.items():
data = np.zeros((len(severities)*10000, 32, 32, 3)).astype(np.uint8)
labels = np.zeros(len(severities)*10000).astype(np.int)
for i, severity in enumerate(severities):
print("Starting {}-{:.2f}...".format(name, severity))
transform = tv.transforms.Compose([
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type='cifar'),
])
dataset = tv.datasets.CIFAR10(dataset_path, train=False, download=False, transform=transform)
loader = torch.utils.data.DataLoader(
dataset,
shuffle=False,
sampler=None,
drop_last=False,
pin_memory=False,
num_workers=args.num_workers,
batch_size=bs
)
for j, (im, label) in enumerate(loader):
if im.size(0)==bs:
data[i*10000+j*bs:i*10000+bs*(j+1),:,:,:] = im.numpy().astype(np.uint8)
labels[i*10000+j*bs:i*10000+bs*(j+1)] = label.numpy()
else:
data[i*10000+j:,:,:,:] = im.numpy().astype(np.uint8)
labels[i*10000+j:] = label.numpy()
out_file = os.path.join(out_dir, name + ".npy")
print("Saving {} to {}.".format(name, out_file))
np.save(out_file, data)
labels_file = os.path.join(out_dir, "labels.npy")
np.save(labels_file, labels)
if __name__=="__main__":
main()
|
augmentation-corruption-fbr_main
|
imagenet_c_bar/make_cifar10_c_bar.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import numpy as np
def is_iterable(obj):
try:
iter(obj)
except:
return False
else:
return True
class Transform(abc.ABC):
name = "abstract_transform"
def __init__(self, severity, im_size, record=False, max_intensity=False, **kwargs):
self.im_size = im_size
self.severity = severity
self.record = record
self.max_intensity = max_intensity
@abc.abstractmethod
def transform(self, image, **kwargs):
...
@abc.abstractmethod
def sample_parameters(self):
...
def __call__(self, image):
params = self.sample_parameters()
out = self.transform(image, **params)
if self.record:
return out, params
return out
def convert_to_numpy(self, params):
out = []
for k, v in params.items():
if isinstance(v, np.ndarray):
out.extend(v.flatten().tolist())
elif is_iterable(v):
out.extend([x for x in v])
else:
out.append(v)
return np.array(out)
def convert_from_numpy(self, numpy_record):
param_signature = self.sample_parameters()
#assert len(param_signature.keys())<=len(numpy_record), "Mismatched numpy_record."
offset = 0
for k, v in param_signature.items():
if isinstance(v, np.ndarray):
num = len(v.flatten())
data = numpy_record[offset:offset+num]
if v.dtype==np.int or v.dtype==np.uint:
data = np.round(data, 3)
data = data.astype(v.dtype)
param_signature[k] = data.reshape(v.shape)
offset += num
elif is_iterable(v):
data = []
for x in v:
if type(x) == 'int':
data.append(int(np.round(numpy_record[offset],3)))
else:
data.append(type(x)(numpy_record[offset]))
offset += 1
param_signature[k] = data
else:
if type(v) == 'int':
param_signature[k] = int(np.round(numpy_record[offset],3))
else:
param_signature[k] = type(v)(numpy_record[offset])
offset += 1
return param_signature
|
augmentation-corruption-fbr_main
|
imagenet_c_bar/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from PIL import Image
import torch
class PilToNumpy(object):
def __init__(self, as_float=False, scaled_to_one=False):
self.as_float = as_float
self.scaled_to_one = scaled_to_one
assert (not scaled_to_one) or (as_float and scaled_to_one),\
"Must output a float if rescaling to one."
def __call__(self, image):
if not self.as_float:
return np.array(image).astype(np.uint8)
elif notself.scaled_to_one:
return np.array(image).astype(np.float32)
else:
return np.array(image).astype(np.float32) / 255
class NumpyToPil(object):
def __init__(self):
pass
def __call__(self, image):
return Image.fromarray(image)
class NumpyToTensor(object):
def __init__(self, HWC_to_CHW=True, bit_to_float=True):
self.HWC_to_CHW = HWC_to_CHW
self.bit_to_float = bit_to_float
pass
def __call__(self, image):
image = image.astype(np.float32)
if self.bit_to_float:
image /= 255
if self.HWC_to_CHW:
image = image.transpose(2,0,1)
return torch.Tensor(image)
|
augmentation-corruption-fbr_main
|
imagenet_c_bar/utils/converters.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.