python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import torch
import torch.nn.functional as F
import opt_einsum as oe
from einops import repeat, rearrange
from model.functional.krylov import krylov
from model.ssm.companion import CompanionSSM
class ClosedLoopCompanionSSM(CompanionSSM):
"""
Closed-loop implementation of Companion SSM:
- Instantiate A, B, C; so we can compute both:
- Open-loop inference:
-> y_{n + h} = \sum_{i = 0}^{n + h - 1} CA^{n + h - 1 - i} B u_i
- Closed-loop inference:
-> y_{n + h} = C(A + BK)^{h} x_n
= C(A + BK)^{h} \sum_{j = 0}^{n - 1} A^{n - 1 - j} B u_j
= C(A + BK)^{n + h - 1} x_1
= C(A + BK)^{n + h - 1} B u_0
= \sum_{i = 0}^{n + h - 1} C(A + BK)^{n + h - 1 - i} B u_i, u_j = 0 for j > 0
"""
def __init__(self,
lag: int=1,
horizon: int=1,
use_initial: bool=False,
**kwargs):
self.lag = lag
self.horizon = horizon
self.use_initial = use_initial # When False, assumes initial hidden_state x_0 = 0. True not implemented
self.closed_loop = True # Toggle closed or open-loop forward pass, see self.forward
self.inference_only = False # Toggle different behavior during training and test
kwargs['kernel_repeat'] = 1
kwargs['kernel_weights'] = None
kwargs['kernel_train'] = True
kwargs['skip_connection'] = False
super().__init__(**kwargs)
def init_kernel_weights(self, kernel_init):
if kernel_init == 'normal':
kernel = torch.randn(self.n_kernels, self.kernel_dim)
elif kernel_init == 'xavier':
# Xavier-ish initialization
stdv = 1. / math.sqrt(self.kernel_dim)
kernel = torch.FloatTensor(
self.n_kernels, self.kernel_dim).uniform_(-stdv, stdv)
else:
raise NotImplementedError
return kernel
def init_weights(self):
super().init_weights() # Initializes skip connection, A, B, C
# K matrix
k = self.init_kernel_weights(self.kernel_init)
self.register("k", k, trainable=True, lr=None, wd=None)
def get_companion_matrix(self, p):
# Construct companion matrix
return self.shift_matrix.to(p.device) + (
oe.contract('h i, h j -> h j i',
self.p_padding.to(p.device), p)
)
def fft_conv_d(self, u, v):
L = u.shape[-1]
u_f = torch.fft.rfft(u, n=2*L, dim=2) # (B H L)
v_f = torch.fft.rfft(v, n=2*L, dim=2) # (H D L)
y_f = oe.contract('b h l, h d l -> b h l d', u_f, v_f)
y = torch.fft.irfft(y_f, n=2*L, dim=2)[:, :, :L, :] # (B H L D)
return y
def forward(self, u):
"""
During training, call this function twice to compute closed-loop and open-loop
-> minimize the closed-loop?
"""
u = rearrange(u, 'b l d -> b d l')
b, d, l = u.shape
l_horizon = self.horizon
# Normalize just the non-shift column,
# alternatively could normalize A + BK below
a = (self.norm(self.a, ord=self.norm_order)
if self.norm_order > 0 else self.a)
A = self.get_companion_matrix(a)
if self.closed_loop: # Compute closed-loop forecast
# Compute hidden state
# -> x_lag = \sum_{i = 0}^{lag - 1} A^{lag - 1 - i}B u_i
k_x = krylov(l, A, self.b, c=None).to(u.device)
x = self.fft_conv_d(u, k_x) # shape: B x H x L x D
# Compute A + BK matrix
b = (self.norm(self.b, ord=self.norm_order)
if self.norm_order > 0 else self.b)
k = (self.norm(self.k, ord=self.norm_order)
if self.norm_order > 0 else self.b)
A_BK = A + oe.contract('h i, h j -> h i j', b, k)
# Rollout: Compute C(A + BK)^{h} * x_lag and K(A + BK)^{h} * x_lag
# First compute hidden state
x = krylov(l_horizon, A_BK, x[:, :, -1, :], c=None)
# Compute predictions for layer output
c = self.norm(self.c, ord=self.norm_order) if self.norm_order > 0 else self.c
y = torch.einsum('...nl, ...n -> ...l', x, c).contiguous()
y = rearrange(y, 'b d l -> b l d')
# Compute predictions for layer next-time-step input (prior layer next-time-step output)
if not self.inference_only and self.closed_loop:
u = torch.einsum('...nl, ...n -> ...l', x, self.k).contiguous()
u = rearrange(u, 'b d l -> b l d')
else:
u = None
# Layer outputs, and next-time-step layer inputs
return y, u
else: # Compute open-loop forecast up to L
# A = self.norm(A, ord=self.norm_order)
# Return CA^{n}B where A = a is computed companion matrix from self.a
b = (self.norm(self.b, ord=self.norm_order)
if self.norm_order > 0 else self.b)
c = self.norm(self.c, ord=self.norm_order) if self.norm_order > 0 else self.c
k = krylov(l, A, b, c).to(u.device)
k = repeat(k, 'nk kd -> (kr nk nh hd) kd',
kr=self.kernel_repeat, nh=self.n_heads, hd=self.head_dim)
y = rearrange(self.fft_conv(u, k), 'b d l -> b l d')
if not self.inference_only:
_k = self.norm(self.k, ord=self.norm_order)
k_u = krylov(l, A, b, _k).to(u.device)
k_u = repeat(k_u, 'nk kd -> (kr nk nh hd) kd',
kr=self.kernel_repeat, nh=self.n_heads, hd=self.head_dim)
y_u = rearrange(self.fft_conv(u, k_u), 'b d l -> b l d')
else:
y_u = None
return y, y_u
|
spacetime-main
|
model/ssm/closed_loop/companion.py
|
from .companion import ClosedLoopCompanionSSM
from .shift import ClosedLoopShiftSSM
|
spacetime-main
|
model/ssm/closed_loop/__init__.py
|
import torch
import torch.nn.functional as F
import opt_einsum as oe
from einops import repeat, rearrange
from model.functional.krylov import krylov
from model.ssm.closed_loop.companion import ClosedLoopCompanionSSM
class ClosedLoopShiftSSM(ClosedLoopCompanionSSM):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def init_weights(self):
super().init_weights() # Initializes skip connection, A, B, C
# A Matrix
a = torch.zeros(self.n_kernels, self.kernel_dim)
self.register("a", a, trainable=False, lr=None, wd=None)
# B Matrix - make it not learnable
b = torch.zeros(self.n_kernels, self.kernel_dim)
b[:, 0] = 1
self.register("b", b, trainable=False, lr=None, wd=None)
# C matrix
c = self.init_kernel_weights(self.kernel_init)
self.register("c", c, trainable=True, lr=None, wd=None)
# K matrix
k = self.init_kernel_weights(self.kernel_init)
self.register("k", k, trainable=True, lr=None, wd=None)
def get_companion_matrix(self, p):
# Construct "companion" matrix
return self.shift_matrix.to(p.device)
|
spacetime-main
|
model/ssm/closed_loop/shift.py
|
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from model.ssm.base import SSM
from model.ssm.preprocess.differencing import get_pascal
class ResidualSSM(SSM):
"""
Computes both order-N differencing and moving average residuals over input sequence
"""
def __init__(self,
max_diff_order: int=4,
min_avg_window: int=4,
max_avg_window: int=720,
n_kernels: int=8,
kernel_repeat: int=16,
**kwargs):
self.max_diff_order = max_diff_order
self.min_avg_window = min_avg_window
self.max_avg_window = max_avg_window
self.n_ma_kernels = (n_kernels - self.max_diff_order) * kernel_repeat
kwargs['n_heads'] = 1
kwargs['kernel_weights'] = None
kwargs['kernel_train'] = False
kwargs['skip_connection'] = False
# Set kwargs['kernel_repeat'] to number of model n_kernels
super().__init__(n_kernels=n_kernels, kernel_repeat=kernel_repeat, **kwargs)
def init_weights(self):
diff_kernel = repeat(self.init_differencing_weights(), 'nk kd -> (kr nk) kd',
kr=self.kernel_repeat)
ma_r_kernel = self.init_moving_average_weights() # Shape: (kr x nk) x hd
self.register('diff_kernel', diff_kernel, trainable=False, lr=None, wd=None)
self.register('ma_r_kernel', ma_r_kernel, trainable=False, lr=None, wd=None)
def init_differencing_weights(self):
kernel = torch.zeros(self.max_diff_order, self.max_diff_order).float()
diff_coeffs = get_pascal(self.max_diff_order, self.max_diff_order).float()
kernel[:, :self.max_diff_order] += diff_coeffs
return kernel
def init_moving_average_weights(self):
ma_window = torch.randint(low=self.min_avg_window,
high=self.max_avg_window,
size=(1, self.n_ma_kernels))
# Compute moving average kernel
max_window = self.max_avg_window
kernel = torch.zeros(self.n_ma_kernels, max_window)
kernel[:, 0] = 1.
moving_avg = (1. / torch.clamp(ma_window, min=self.min_avg_window, max=max_window))
for ix, window in enumerate(ma_window[0]):
kernel[ix, :window] -= moving_avg[:1, ix]
return kernel
def get_kernel(self, u):
"""
Initialize weights for differencing kernel
- Assume u is shape B x D x L
"""
b, d, l = u.shape
l = max(l, self.diff_kernel.shape[1])
# Pad kernels to input length
diff_kernel = F.pad(self.diff_kernel, (0, l - self.diff_kernel.shape[1]), 'constant', 0)
ma_r_kernel = F.pad(self.ma_r_kernel, (0, l - self.ma_r_kernel.shape[1]), 'constant', 0)
# Combine kernels
diff_kernel = rearrange(diff_kernel, '(kr nk) kd -> kr nk kd',
kr=self.kernel_repeat)
ma_r_kernel = rearrange(ma_r_kernel, '(kr nk) kd -> kr nk kd',
kr=self.kernel_repeat)
kernel = torch.cat([diff_kernel, ma_r_kernel], dim=1)
kernel = repeat(kernel, 'kr nk kd -> (kr nk hd) kd', hd=self.head_dim)
return kernel
def forward(self, u):
# Same as base SSM forward, but kernel repeating already taken care of
u = rearrange(u, 'b l d -> b d l')
k = self.get_kernel(u)
y = self.fft_conv(u, k)
return rearrange(y, 'b d l -> b l d')
|
spacetime-main
|
model/ssm/preprocess/residual.py
|
import torch.nn as nn
from .differencing import DifferencingSSM
from .ma_residual import MovingAvgResidualSSM
from .residual import ResidualSSM
def init_preprocess_ssm(config):
if config['method'] == 'differencing':
ssm = DifferencingSSM
elif config['method'] == 'ma_residual':
ssm = MovingAvgResidualSSM
elif config['method'] == 'residual':
ssm = ResidualSSM
elif config['method'] in ['identity', None]:
return nn.Identity()
else:
raise NotImplementedError(f"Preprocessing config method {config['method']} not implemented!")
return ssm(**config['kwargs'])
|
spacetime-main
|
model/ssm/preprocess/__init__.py
|
import torch
import torch.nn.functional as F
from model.ssm.base import SSM
class DifferencingSSM(SSM):
"""
Computes order-N differencing over input sequence
"""
def __init__(self, max_diff_order=4, **kwargs):
self.max_diff_order = max_diff_order
kwargs['n_heads'] = 1
kwargs['kernel_weights'] = None
kwargs['kernel_train'] = False
kwargs['skip_connection'] = False
# Set kwargs['kernel_repeat'] to number of model n_kernels
super().__init__(**kwargs)
def init_weights(self):
kernel = torch.zeros(self.n_kernels, self.kernel_dim).float()
# Hard-coded up to 4 orders, but just the binomial coeffs / Pascal's triangle (with negatives)
diff_coeffs = get_pascal(self.max_diff_order)
# Could be slow, but just done once at initialization
for ix in range(self.n_kernels):
try:
kernel[ix, :self.max_diff_order] += diff_coeffs[ix % len(diff_coeffs)].float()
except:
breakpoint()
self.register('kernel', kernel, trainable=False, lr=None, wd=None)
def get_kernel(self, u):
"""
Initialize weights for differencing kernel
- Assume u is shape B x D x L
"""
b, d, l = u.shape
return F.pad(self.kernel, (0, l-self.kernel_dim, 0, 0), 'constant', 0)
def get_pascal(n, total_rows=None):
total_rows = n if total_rows is None else total_rows
# Compute binomial coeffs for all rows up to n
line = torch.zeros(total_rows, n).float()
line[:, 0] = 1.
for j in range(1, n): # For all rows,
for k in range(0, j): # Compute C(j, k)
# Coefficients are binomial coeffs,
# C(n, k + 1) = C(n, k) * (n - k) / (k + 1)
negate = 2 * k % 2 - 1 # Negate even elements
line[j][k+1] += (line[j][k] * (j - k) / (k + 1)) * negate
return line
|
spacetime-main
|
model/ssm/preprocess/differencing.py
|
import torch
import torch.nn.functional as F
from model.ssm.base import SSM
class MovingAvgResidualSSM(SSM):
"""
Computes moving average residuals over input sequence
"""
def __init__(self, min_avg_window=4, max_avg_window=720, **kwargs):
self.min_avg_window = min_avg_window
self.max_avg_window = max_avg_window
kwargs['n_heads'] = 1
kwargs['kernel_weights'] = None
kwargs['kernel_train'] = False
kwargs['skip_connection'] = False
# Set kwargs['kernel_repeat'] to number of model n_kernels
super().__init__(**kwargs)
def init_weights(self):
# Moving average window kernels
kernel = torch.zeros(self.n_kernels, self.kernel_dim).float()
kernel[:, 0] = 1.
# Low is a heuristic for now
ma_window = torch.randint(low=self.min_avg_window,
high=self.max_avg_window, # self.kernel_dim
size=(1, self.n_kernels)).float()
self.register('ma_window', ma_window, trainable=True, lr=None, wd=None)
self.register('kernel', kernel, trainable=False, lr=None, wd=None)
def get_kernel(self, u):
"""
Initialize weights for differencing kernel
- Assume u is shape B x D x L
"""
b, d, l = u.shape
# Set kernel values s.t. convolution computes residuals
# from moving average, i.e., y[t] - mean(y[t:t - m])
max_window = min(self.max_avg_window, l)
kernel = self.kernel - (1. / torch.clamp(torch.round(self.ma_window),
min=self.min_avg_window,
max=max_window).T)
return F.pad(self.kernel, (0, l-self.kernel_dim, 0, 0), 'constant', 0)
|
spacetime-main
|
model/ssm/preprocess/ma_residual.py
|
import torch.nn as nn
from .base import Embedding
class LinearEmbedding(Embedding):
def __init__(self, input_dim, embedding_dim):
super().__init__(input_dim, embedding_dim)
def initialize_layers(self):
self.layers = nn.Linear(self.input_dim, self.embedding_dim)
|
spacetime-main
|
model/embedding/linear.py
|
from .base import Embedding
from .linear import LinearEmbedding
from .repeat import RepeatEmbedding
def init_embedding(config):
methods = ['linear', 'identity', 'repeat']
if config['method'] == 'linear':
return LinearEmbedding(**config['kwargs'])
elif config['method'] == 'repeat':
return RepeatEmbedding(**config['kwargs'])
elif config['method'] == 'identity' or method is None:
return Embedding(**config['kwargs'])
else:
raise NotImplementedError(f'Embedding method {method} not implemented. Please select among {methods}')
|
spacetime-main
|
model/embedding/__init__.py
|
from einops import repeat
from .base import Embedding
class RepeatEmbedding(Embedding):
def __init__(self,
input_dim: int,
embedding_dim: int=None,
n_heads: int=None,
n_kernels: int=None):
if embedding_dim is None:
try:
embedding_dim = input_dim * n_heads * n_kernels
except Exception as e:
raise e('If embedding_dim not specified, must specify n_kernels and n_heads')
else:
assert embedding_dim % input_dim == 0, 'Embedding_dim should be multiple of input_dim'
super().__init__(input_dim, embedding_dim)
def repeat(self, x):
return repeat(x, 'b l d -> b l (r d)',
r=self.embedding_dim // self.input_dim)
def initialize_layers(self):
self.layers = self.repeat
|
spacetime-main
|
model/embedding/repeat.py
|
import torch.nn as nn
class Embedding(nn.Module):
def __init__(self,
input_dim: int,
embedding_dim: int):
"""
Generic class for encoding
"""
super().__init__()
self.input_dim = input_dim
self.embedding_dim = embedding_dim
self.initialize_layers()
def initialize_layers(self):
self.layers = nn.Identity()
def forward(self, x):
return self.layers(x)
|
spacetime-main
|
model/embedding/base.py
|
import math
import torch
import torch.nn.functional as F
from einops import rearrange, reduce
def companion_from_p(p):
"""
Arguments:
p: (..., d)
Return:
A: (..., d, d)
"""
batch_size, d = p.shape[:-1], p.shape[-1]
A = torch.zeros(*batch_size, d, d, dtype=p.dtype, device=p.device)
A[..., 1:, :-1] = torch.eye(d - 1, dtype=p.dtype, device=p.device)
A[..., -1] = p
return A
def companion_krylov(L, p, b, c=None, c_tilde=None):
"""
Compute the Krylov matrix (c^T b, c^T A b, c^T A^2 b, ...), where A = shift + p e_d^T.
Arguments:
p: (..., d), real
b: (..., d), real
c: (..., d), real. One can instead supply c_tilde (below).
c_tilde: (..., d), real, where c_tilde = c^T (I - A^L)
At least c or c_tilde must be supplied.
"""
d = p.shape[-1]
batch_size = p.shape[:-1]
e_d = torch.zeros(*batch_size, d, device=p.device, dtype=p.dtype)
e_d[..., -1] = 1.0
assert e_d.shape == p.shape
assert b.shape == p.shape
if c_tilde is None:
assert c is not None, 'at least c or c_tilde must be supplied'
assert c.shape == p.shape
A = companion_from_p(p)
c_tilde = c - torch.einsum('...m,...mn->...n', c, torch.linalg.matrix_power(A, L).to(dtype=c.dtype))
else:
assert c_tilde.shape == p.shape
def fft_conv(u, v): # This is actually convolution and not cross-correlation
d = u.shape[-1]
u_f = torch.fft.rfft(u, n=2 * d)
v_f = torch.fft.rfft(v, n=2 * d)
return torch.fft.irfft(u_f * v_f.conj(), n=2 * d)[..., :d]
def quadratic_form(u, v):
d_rounded = math.ceil(d / L) * L
# The reduce is to deal with the case where d > L
return torch.fft.rfft(reduce(F.pad(fft_conv(u, v), (0, d_rounded - d)),
'... (m L) -> ... L', L=L, reduction='sum'), n=L)
Zconj = torch.exp(1j * 2 * math.pi * torch.arange(L // 2 + 1, dtype=torch.float32, device=p.device) / L)
# woodbury = quadratic_form(c_tilde, b) + quadratic_form(c_tilde, p) * quadratic_form(e_d, b) / (Zconj - quadratic_form(e_d, p))
quad = quadratic_form(rearrange(torch.stack([c_tilde, e_d], dim=-2), '... two d -> ... two 1 d'),
rearrange(torch.stack([b, p], dim=-2), '... two d -> ... 1 two d'))
woodbury = quad[..., 0, 0, :] + quad[..., 0, 1, :] * quad[..., 1, 0, :] / (Zconj - quad[..., 1, 1, :])
woodbury_irfft = torch.fft.irfft(woodbury, n=L)
return woodbury_irfft
if __name__ == '__main__':
torch.manual_seed(0)
d = 25
L = 9
H = 2
p = torch.randn(H, d)
p /= torch.linalg.norm(p, ord=1, dim=-1, keepdim=True)
b = torch.randn(H, d)
c = torch.randn(H, d)
A = companion_from_p(p)
from src.ops.krylov import krylov
K = krylov(L, A, b, c)
K_fast = companion_krylov(L, p, b, c=c)
print((K - K_fast).abs().max())
from benchmarks.utils import benchmark_all
torch.manual_seed(0)
d = 512
L = 1024
H = 256
p = torch.randn(H, d, device='cuda', requires_grad=True)
p = p / torch.linalg.norm(p, ord=1, dim=-1, keepdim=True)
b = torch.randn(H, d, device='cuda', requires_grad=True)
c = torch.randn(H, d, device='cuda', requires_grad=True)
A = companion_from_p(p)
benchmark_all(krylov, L, A, b, c, desc='krylov')
benchmark_all(companion_krylov, L, p, b, c, desc='companion fast krylov')
benchmark_all(companion_krylov, L, p, b, c_tilde=c, desc='companion fast krylov c_tilde')
|
spacetime-main
|
model/functional/companion_krylov.py
|
"""
Fast and helpful functions in the style of torch.nn.functional.
Credit to Albert Hungry Hippo Gu and Tri Flying Butterfly Dao.
- companion_krylov.py from Tri
- Others from Albert: https://github.com/HazyResearch/state-spaces/tree/main/src/models/functional
"""
|
spacetime-main
|
model/functional/__init__.py
|
""" pykeops implementations of the core Cauchy kernel used in the S3 algorithm.
The interface of the Cauchy multiplication is:
v: (N)
z: (N)
w: (L)
Return: y (L)
y_k = \sum_i v_i / (z_i - w_k)
"""
if __name__ == '__main__':
import sys
import pathlib
p = pathlib.Path().absolute()
print("Adding path: ", p)
sys.path.append(str(p))
import math
import torch
from einops import rearrange
import os
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def _c2r(x): return torch.view_as_real(x)
def _r2c(x): return torch.view_as_complex(x)
def cauchy_slow(v, z, w, conj=True):
"""
v: (..., N)
z: (..., L)
w: (..., N)
returns: (..., L) \sum v/(z-w)
"""
if conj:
v = _conj(v)
w = _conj(w)
cauchy_matrix = v.unsqueeze(-1) / (z.unsqueeze(-2) - w.unsqueeze(-1)) # (... N L)
return torch.sum(cauchy_matrix, dim=-2)
def cauchy_lazy(v, z, w, conj=True):
if conj:
v = _conj(v)
w = _conj(w)
v, z, w = _broadcast_dims(v, z, w)
v_l = LazyTensor(rearrange(v, '... N -> ... N 1 1'))
w_l = LazyTensor(rearrange(w, '... N -> ... N 1 1'))
z_l = LazyTensor(rearrange(z, '... L -> ... 1 L 1'))
sub = z_l - w_l # (b N L 1), for some reason it doesn't display the last dimension
div = v_l / sub
s = div.sum(dim=len(v_l.shape)-2)
return s.squeeze(-1)
def cauchy(v, z, w, conj=False):
expr = 'ComplexDivide(v, z-w)'
cauchy_mult = Genred(
expr,
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
if conj:
v = _conj(v)
w = _conj(w)
v, z, w = _broadcast_dims(v, z, w)
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = cauchy_mult(v, z, w, backend='GPU')
return _r2c(r)
def cauchy_real(v, z, w):
expr = 'v / (z - w)'
cauchy_mult = Genred(
expr,
[
'v = Vj(1)',
'z = Vi(1)',
'w = Vj(1)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
v = v.unsqueeze(-1)
z = z.unsqueeze(-1)
w = w.unsqueeze(-1)
r = cauchy_mult(v, z, w, backend='GPU')
return r
def cauchy_conj(v, z, w, num=2, denom=2):
if num == 1:
expr_num = 'z * ComplexReal(v) - Real2Complex(ComplexReal(v)*ComplexReal(w) + ComplexImag(v)*ComplexImag(w))'
elif num == 2:
expr_num = 'z * ComplexReal(v) - Real2Complex(Sum(v * w))'
else: raise NotImplementedError
if denom == 1:
expr_denom = 'ComplexMult(z-Real2Complex(ComplexReal(w)), z-Real2Complex(ComplexReal(w))) + Real2Complex(Square(ComplexImag(w)))'
elif denom == 2:
expr_denom = 'ComplexMult(z-w, z-Conj(w))'
else: raise NotImplementedError
cauchy_mult = Genred(
f'ComplexDivide({expr_num}, {expr_denom})',
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = 2*cauchy_mult(v, z, w, backend='GPU')
return _r2c(r)
def cauchy_conj_components(v, z, w):
""" Assumes z is pure imaginary (as in S4 with bilinear) """
expr_num = 'Imag2Complex(zi*vr) - Real2Complex(vr*wr + vi*wi)'
expr_denom = 'Real2Complex(Square(wr)+Square(wi)-Square(zi)) - Imag2Complex(IntCst(2)*zi*wr)'
cauchy_mult = Genred(
f'ComplexDivide({expr_num}, {expr_denom})',
[
'vr = Vj(1)',
'vi = Vj(1)',
'wr = Vj(1)',
'wi = Vj(1)',
'zi = Vi(1)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
v = v.unsqueeze(-1)
z = z.unsqueeze(-1)
w = w.unsqueeze(-1)
v_r, v_i = v.real.contiguous(), v.imag.contiguous()
w_r, w_i = w.real.contiguous(), w.imag.contiguous()
z_i = z.imag.contiguous()
r = 2*cauchy_mult(v_r, v_i, w_r, w_i, z_i, backend='GPU')
return _r2c(r)
def cauchy_conj_components_lazy(v, z, w, type=1):
v, z, w = _broadcast_dims(v, z, w)
v_r, v_i = v.real.contiguous(), v.imag.contiguous()
w_r, w_i = w.real.contiguous(), w.imag.contiguous()
z_i = z.imag.contiguous()
v_r = LazyTensor(rearrange(v_r, '... N -> ... 1 N 1'))
v_i = LazyTensor(rearrange(v_i, '... N -> ... 1 N 1'))
w_r = LazyTensor(rearrange(w_r, '... N -> ... 1 N 1'))
w_i = LazyTensor(rearrange(w_i, '... N -> ... 1 N 1'))
z_i = LazyTensor(rearrange(z_i, '... L -> ... L 1 1'))
if type == 1:
num = -v_r*w_r-v_i*w_i + 1j* z_i*v_r
denom = w_r**2+w_i**2-z_i**2 - 2j*w_r*z_i
else:
# z = torch.complex(-w_r, z_i) # Not supported
z = -w_r + 1j* z_i
num = v_r * z - v_i*w_i
denom = z*z + w_i**2 # z**2 is bugged for complex
r = num / denom
r = 2*r.sum(dim=len(z_i.shape)-1)
return r.squeeze(-1)
def cauchy_conj2(v, z, w):
expr = 'ComplexDivide(v, z-w) + ComplexDivide(Conj(v), z-Conj(w))'
# expr = 'ComplexDivide(v, z-w)'
cauchy_mult = Genred(
expr,
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
if complex:
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = cauchy_mult(v, z, w, backend='GPU')
return _r2c(r)
def trigger_compilation():
""" Small function to trigger the compilation of a pykeops kernel
Used in scenarios where we must manually control compilation, e.g. the multi-gpu case (https://github.com/getkeops/keops/issues/168) """
B = 2
N = 4
L = 16
w = torch.randn(B, N//2, dtype=torch.cfloat, device='cuda')
v = torch.randn(B, N//2, dtype=torch.cfloat, device='cuda')
z = torch.randn(B, L, dtype=torch.cfloat, device='cuda')
w.requires_grad = True
v.requires_grad = True
cauchy_conj(v, z, w)
|
spacetime-main
|
model/functional/cauchy.py
|
""" Compute a Krylov function efficiently. (S3 renames the Krylov function to a "state space kernel")
A : (N, N)
b : (N,)
c : (N,)
Return: [c^T A^i b for i in [L]]
"""
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from model.functional.toeplitz import causal_convolution
def krylov_sequential(L, A, b, c=None):
""" Constant matrix A
A : (..., N, N)
b : (..., N)
c : (..., N)
Returns
if c:
x : (..., L)
x[i, l] = c[i] @ A^l @ b[i]
else:
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
# Check which of dim b and c is smaller to save memory
if c is not None and c.numel() < b.numel():
return krylov_sequential(L, A.transpose(-1, -2), c, b)
b_ = b
x = []
for _ in range(L):
if c is not None:
x_ = torch.sum(c*b_, dim=-1) # (...) # could be faster with matmul or einsum?
else:
x_ = b_
x.append(x_)
b_ = (A @ b_.unsqueeze(-1)).squeeze(-1)
x = torch.stack(x, dim=-1)
return x
def krylov(L, A, b, c=None, return_power=False):
"""
Compute the Krylov matrix (b, Ab, A^2b, ...) using the squaring trick.
If return_power=True, return A^{L-1} as well
"""
# TODO There is an edge case if L=1 where output doesn't get broadcasted, which might be an issue if caller is expecting broadcasting semantics... can deal with it if it arises
x = b.unsqueeze(-1) # (..., N, 1)
A_ = A
AL = None
if return_power:
AL = torch.eye(A.shape[-1], dtype=A.dtype, device=A.device)
_L = L-1
done = L == 1
# loop invariant: _L represents how many indices left to compute
while not done:
if return_power:
if _L % 2 == 1: AL = A_ @ AL
_L //= 2
# Save memory on last iteration
l = x.shape[-1]
if L - l <= l:
done = True
_x = x[..., :L-l]
else: _x = x
try:
_x = A_ @ _x
except Exception as e:
print(e)
breakpoint()
x = torch.cat([x, _x], dim=-1) # there might be a more efficient way of ordering axes
if not done: A_ = A_ @ A_
try:
assert x.shape[-1] == L
except:
print('x.shape', x.shape)
print('L', L)
breakpoint()
if c is not None:
x = torch.einsum('...nl, ...n -> ...l', x, c)
x = x.contiguous() # WOW!!
if return_power:
return x, AL
else:
return x
def power(L, A, v=None):
""" Compute A^L and the scan sum_i A^i v_i
A: (..., N, N)
v: (..., N, L)
"""
I = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
l = 1
while True:
if L % 2 == 1: I = powers[-1] @ I
L //= 2
if L == 0: break
l *= 2
powers.append(powers[-1] @ powers[-1])
if v is None: return I
# Invariants:
# powers[-1] := A^l
# l := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (l == L)
k = v.size(-1) - l
v_ = powers.pop() @ v[..., l:]
v = v[..., :l]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, '... (z l) -> ... z l', z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return I, v.squeeze(-1)
def krylov_toeplitz(L, A, b, c=None):
""" Specializes to lower triangular Toeplitz matrix A represented by its diagonals
A : (..., N)
b : (..., N)
c : (..., N)
Returns
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
x = b.unsqueeze(0) # (1, ..., N)
A_ = A
while x.shape[0] < L:
xx = causal_convolution(A_, x)
x = torch.cat([x, xx], dim=0) # there might be a more efficient way of ordering axes
A_ = causal_convolution(A_, A_)
x = x[:L, ...] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
def krylov_toeplitz_(L, A, b, c=None):
""" Padded version of krylov_toeplitz that saves some fft's
TODO currently not faster than original version, not sure why
"""
N = A.shape[-1]
x = b.unsqueeze(0) # (1, ..., N)
x = F.pad(x, (0, N))
A = F.pad(A, (0, N))
done = L == 1
while not done:
l = x.shape[0]
# Save memory on last iteration
if L - l <= l:
done = True
_x = x[:L-l]
else: _x = x
Af = torch.fft.rfft(A, n=2*N, dim=-1)
xf = torch.fft.rfft(_x, n=2*N, dim=-1)
xf_ = Af * xf
x_ = torch.fft.irfft(xf_, n=2*N, dim=-1)
x_[..., N:] = 0
x = torch.cat([x, x_], dim=0) # there might be a more efficient way of ordering axes
if not done:
A = torch.fft.irfft(Af*Af, n=2*N, dim=-1)
A[..., N:] = 0
x = x[:L, ..., :N] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
|
spacetime-main
|
model/functional/krylov.py
|
""" Custom implementation of fast complex operations.
This was written during earlier versions of Pytorch.
Later versions have native support for complex numbers and much of this is no longer necessary.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from src.torch.utils.dlpack import to_dlpack, from_dlpack
use_cupy = True
try:
import cupy as cp
except:
use_cupy = False
use_pt_native = hasattr(torch, 'view_as_complex')
def complex_mul_native(X, Y):
return torch.view_as_real(torch.view_as_complex(X) * torch.view_as_complex(Y))
def conjugate_native(X):
return torch.view_as_real(torch.view_as_complex(X).conj())
def torch2numpy(X):
"""Convert a torch float32 tensor to a numpy array, sharing the same memory.
"""
return X.detach().numpy()
def torch2cupy(tensor):
return cp.fromDlpack(to_dlpack(tensor.cuda()))
def cupy2torch(tensor):
return from_dlpack(tensor.toDlpack())
def real_to_complex(X):
"""A version of X that's complex (i.e., last dimension is 2).
Parameters:
X: (...) tensor
Return:
X_complex: (..., 2) tensor
"""
return torch.stack((X, torch.zeros_like(X)), dim=-1)
def conjugate_torch(X):
assert X.shape[-1] == 2, 'Last dimension must be 2'
return X * torch.tensor((1, -1), dtype=X.dtype, device=X.device)
class Conjugate(torch.autograd.Function):
'''X is a complex64 tensors but stored as float32 tensors, with last dimension = 2.
'''
@staticmethod
def forward(ctx, X):
assert X.shape[-1] == 2, 'Last dimension must be 2'
if X.is_cuda:
if use_cupy:
# TODO: do we need .contiguous here? I think it doesn't work if the last dimension isn't contiguous
return cupy2torch(torch2cupy(X).view('complex64').conj().view('float32'))
else:
return conjugate_torch(X)
else:
return torch.from_numpy(np.ascontiguousarray(torch2numpy(X)).view('complex64').conj().view('float32'))
@staticmethod
def backward(ctx, grad):
return Conjugate.apply(grad)
conjugate = conjugate_native if use_pt_native else Conjugate.apply
def complex_mul_torch(X, Y):
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
return torch.stack(
(X[..., 0] * Y[..., 0] - X[..., 1] * Y[..., 1],
X[..., 0] * Y[..., 1] + X[..., 1] * Y[..., 0]),
dim=-1)
def complex_mul_numpy(X, Y):
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
X_np = np.ascontiguousarray(torch2numpy(X)).view('complex64')
Y_np = np.ascontiguousarray(torch2numpy(Y)).view('complex64')
return torch.from_numpy((X_np * Y_np).view('float32'))
class ComplexMul(torch.autograd.Function):
'''X and Y are complex64 tensors but stored as float32 tensors, with last dimension = 2.
'''
@staticmethod
def forward(ctx, X, Y):
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
ctx.save_for_backward(X, Y)
if X.is_cuda:
assert Y.is_cuda, 'X and Y must both be torch.cuda.FloatTensor'
if use_cupy:
# TODO: do we need .contiguous here? I think it doesn't work if the last dimension isn't contiguous
return cupy2torch((torch2cupy(X).view('complex64') * torch2cupy(Y).view('complex64')).view('float32'))
else:
return complex_mul_torch(X, Y)
else:
assert not Y.is_cuda, 'X and Y must both be torch.FloatTensor'
X_np = np.ascontiguousarray(torch2numpy(X)).view('complex64')
Y_np = np.ascontiguousarray(torch2numpy(Y)).view('complex64')
return torch.from_numpy((X_np * Y_np).view('float32'))
@staticmethod
def backward(ctx, grad):
X, Y = ctx.saved_tensors
grad_X, grad_Y = None, None
if ctx.needs_input_grad[0]:
grad_X = ComplexMul.apply(grad, conjugate(Y)).sum_to_size(*X.shape)
if ctx.needs_input_grad[1]:
grad_Y = ComplexMul.apply(grad, conjugate(X)).sum_to_size(*Y.shape)
# grad_X, grad_Y = ComplexMul.apply(grad, conjugate(Y)), ComplexMul.apply(grad, conjugate(X))
# # Need to sum over dimensions that were broadcasted
# grad_X = grad_X.sum_to_size(*X.shape)
# grad_Y = grad_Y.sum_to_size(*Y.shape)
# dims_to_sum_X = [-i for i in range(1, X.dim() + 1) if X.shape[-i] != grad.shape[-i]]
# dims_to_sum_Y = [-i for i in range(1, Y.dim() + 1) if Y.shape[-i] != grad.shape[-i]]
# if dims_to_sum_X: # If empty list is passed to sum, it sums all the dimensions
# grad_X = grad_X.sum(dim=dims_to_sum_X, keepdim=True)
# if dims_to_sum_Y: # If empty list is passed to sum, it sums all the dimensions
# grad_Y = grad_Y.sum(dim=dims_to_sum_Y, keepdim=True)
# if grad.dim() > X.dim():
# grad_X = grad_X.sum(tuple(range(grad.dim() - X.dim())))
# if grad.dim() > Y.dim():
# grad_Y = grad_Y.sum(tuple(range(grad.dim() - Y.dim())))
return grad_X, grad_Y
complex_mul = ComplexMul.apply if use_cupy else complex_mul_torch
if use_pt_native:
complex_mul = complex_mul_native
# @profile
# def complex_mul(X, Y):
# assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
# prod = X.unsqueeze(-1) * Y.unsqueeze(-2)
# real = prod[..., 0, 0] - prod[..., 1, 1]
# imag = prod[..., 0, 1] + prod[..., 1, 0]
# return torch.stack( (real, imag), dim=-1)
# TODO maybe optimizations to be had by wrapping this into a function
# real = X.select(-1, 0) * Y.select(-1, 0) - X.select(-1, 1) * Y.select(-1, 1)
# imag = X.select(-1, 0) * Y.select(-1, 1) + X.select(-1, 1) * Y.select(-1, 0)
# return torch.stack( (real, imag), dim=-1)
# return torch.stack(
# (X[..., 0] * Y[..., 0] - X[..., 1] * Y[..., 1],
# X[..., 0] * Y[..., 1] + X[..., 1] * Y[..., 0]),
# dim=-1)
|
spacetime-main
|
model/functional/complex.py
|
""" Utilities for computing convolutions.
There are 3 equivalent views:
1. causal convolution
2. multiplication of (lower) triangular Toeplitz matrices
3. polynomial multiplication (mod x^N)
"""
import torch
# import torch.nn as nn
import torch.nn.functional as F
# from model.complex import complex_mul
# from pytorch_memlab import profile
def construct_toeplitz(v, f=0.0):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{n-1} @ v]
where A = Z_f. This uses vectorized indexing and cumprod so it's much
faster than using the Krylov function.
Parameters:
v: the starting vector of size n or (rank, n).
f: real number
Returns:
K: Krylov matrix of size (n, n) or (rank, n, n).
"""
n = v.shape[-1]
a = torch.arange(n, device=v.device)
b = -a
indices = a[:, None] + b[None]
K = v[..., indices]
K[..., indices < 0] *= f
return K
def triangular_toeplitz_multiply_(u, v, sum=None):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
uv_f = u_f * v_f
if sum is not None:
uv_f = uv_f.sum(dim=sum)
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
def triangular_toeplitz_multiply_padded_(u, v):
""" Same as triangular_toeplitz_multiply but inputs and output assume to be 0-padded already. """
n = u.shape[-1]
assert n % 2 == 0
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n:] = 0
return output
class TriangularToeplitzMult(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
return triangular_toeplitz_multiply_(u, v)
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultFast(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad.flip(-1), (0, n))
g_f = torch.fft.rfft(g_expand, n=2*n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=2*n, dim=-1)[..., :n]
d_v = torch.fft.irfft(gu_f, n=2*n, dim=-1)[..., :n]
d_u = d_u.flip(-1)
d_v = d_v.flip(-1)
return d_u, d_v
class TriangularToeplitzMultPadded(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
output = triangular_toeplitz_multiply_(u, v)
return output
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_padded_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_padded_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultPaddedFast(torch.autograd.Function):
""" Trade off speed (20-25% faster) for more memory (20-25%) """
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n//2:].zero_()
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad[..., :n//2].flip(-1), (0, n//2))
g_f = torch.fft.rfft(g_expand, n=n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=n, dim=-1)
d_v = torch.fft.irfft(gu_f, n=n, dim=-1)
d_u[..., n//2:].zero_()
d_v[..., n//2:].zero_()
d_u[..., :n//2] = d_u[..., :n//2].flip(-1) # TODO
d_v[..., :n//2] = d_v[..., :n//2].flip(-1) # TODO
return d_u, d_v
# triangular_toeplitz_multiply = triangular_toeplitz_multiply_
triangular_toeplitz_multiply = TriangularToeplitzMult.apply
triangular_toeplitz_multiply_fast = TriangularToeplitzMultFast.apply
triangular_toeplitz_multiply_padded = TriangularToeplitzMultPadded.apply
triangular_toeplitz_multiply_padded_fast = TriangularToeplitzMultPaddedFast.apply
def causal_convolution(u, v, fast=True, pad=False):
if not pad and not fast:
return triangular_toeplitz_multiply(u, v)
if not pad and fast:
return triangular_toeplitz_multiply_fast(u, v)
if pad and not fast:
return triangular_toeplitz_multiply_padded(u, v)
if pad and fast:
return triangular_toeplitz_multiply_padded_fast(u, v)
def _fft(x, N): return torch.fft.rfft(F.pad(x, (0, 2*N-x.shape[-1])), n=2*N, dim=-1)
def _ifft(x, N): return torch.fft.irfft(x, n=2*N, dim=-1)[..., :N]
def causal_convolution_inverse(u):
""" Invert the causal convolution/polynomial/triangular Toeplitz matrix represented by u.
This is easiest in the polynomial view:
https://www.csa.iisc.ac.in/~chandan/courses/CNT/notes/lec5.pdf
The idea is that
h = g^{-1} (mod x^m) => 2h - gh^2 = g^{-1} (mod x^{2m})
# TODO this can be numerically unstable if input is "poorly conditioned",
# for example if u[0] is magnitudes different from the rest of u
"""
N = u.shape[-1]
v = u[..., :1].reciprocal()
while v.shape[-1] < N:
M = v.shape[-1]
v_f = _fft(v, 2*M)
u_f = _fft(u[..., :2*M], 2*M)
_v = -_ifft(u_f * v_f**2, 2*M)
_v[..., :M] = _v[..., :M] + 2*v
v = _v
# TODO contiguous?
v = v[..., :N]
return v
""" Below are experimental functions for improving the stability of LSSL/S3 algorithm. Currently not used anywhere. """
def causal_convolution_inverse_wrong(u, v):
""" Solve u * x = v. Initial attempt by inverting the multiplication algorithm, which I think doesn't work. """
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
uv_f = v_f / u_f
x = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return x
def construct_toeplitz_log(v):
n = v.shape[-1]
a = torch.arange(n, device=v.device)
b = -a
indices = a[:, None] + b[None]
K = v[..., indices]
K[..., indices < 0] = -100.0
return K
def _logsumexp(x, dim=-1):
""" logsumexp for complex """
m = torch.max(torch.real(x), dim=dim, keepdim=True)[0]
x = x - m
x = torch.log(torch.sum(torch.exp(x), dim=dim))
x = x + m.squeeze(dim)
return x
def causal_convolution_inverse_log(u, N=-1):
""" Invert the causal convolution/polynomial/triangular Toeplitz matrix represented by u.
This is easiest in the polynomial view:
https://www.csa.iisc.ac.in/~chandan/courses/CNT/notes/lec5.pdf
The idea is that
h = g^{-1} (mod x^m) => 2h - gh^2 = g^{-1} (mod x^{2m})
# TODO this can be numerically unstable if input is "poorly conditioned",
# for example if u[0] is magnitudes different from the rest of u
"""
if N < 0:
N = u.shape[-1]
v = - u[..., :1]
while v.shape[-1] < N:
M = v.shape[-1]
_v = F.pad(v, (0, M), value=-100.0)
_v_ = construct_toeplitz_log(_v)
u_ = u[..., :2*M] if u.shape[-1] >= 2*M else F.pad(u, (0, 2*M-u.shape[-1]), value=-100.0)
_u = _logsumexp(_v_ + u_, dim=-1)
_u = _logsumexp(_v_ + _u, dim=-1)
_u = _u + torch.log(-torch.ones_like(_u))
_v = _v + torch.log(2.0 * torch.ones_like(_u))
v = _logsumexp(torch.stack([_v, _u], dim=-1), dim=-1)
# TODO contiguous?
v = v[..., :N]
check = _logsumexp(construct_toeplitz_log(v) + F.pad(u, (0, N-u.shape[-1]), value=-100.0))
print("check", check, torch.exp(check))
return v
if __name__ == '__main__':
a = torch.tensor([1., 2, 3, 4], requires_grad=True)
b = torch.tensor([5., 6, 7, 8], requires_grad=True)
a.retain_grad()
b.retain_grad()
x = triangular_toeplitz_multiply_padded(F.pad(a, (0, 4)), F.pad(b, (0, 4)))[:4]
print(x) # [5 16 34 60]
x = x.sum()
x.backward()
print(x, a.grad, b.grad) # [26 18 11 5] [10 6 3 1]
if __name__ == '__main__':
N = 4
a = torch.randn(N)
construct_toeplitz(a)
print(a)
b = causal_convolution_inverse(a)
print("inverse", b)
print("check", causal_convolution(a, b))
i = torch.zeros(N)
i[0] = 1.0
b = causal_convolution_inverse_wrong(a, i)
print(b)
print(causal_convolution(a, b))
|
spacetime-main
|
model/functional/toeplitz.py
|
from .train import train_model
from .evaluate import evaluate_model, plot_forecasts
|
spacetime-main
|
train/__init__.py
|
"""
Shared functions called during each epoch
"""
import importlib
import torch
from utils.logging import type_of_script
def initialize_shared_step(config):
step_module = importlib.import_module(f'train.step.{config.dataset_type}')
return getattr(step_module, 'shared_step')
def run_epoch(model, dataloaders, optimizer, scheduler, criterions,
config, epoch, input_transform=None, output_transform=None,
val_metric='loss', wandb=None, train=True):
# dataloaders is {'train': train_loader, 'val': val_loader, 'test': test_loader}
metrics = {split: None for split in dataloaders.keys()}
total_y = {split: None for split in dataloaders.keys()}
shared_step = initialize_shared_step(config)
for split, dataloader in dataloaders.items():
model, _metrics, y = shared_step(model, dataloader, optimizer, scheduler,
criterions, epoch, config, split,
input_transform=input_transform,
output_transform=output_transform)
metrics[split] = _metrics
total_y[split] = y
if train:
# Save checkpoints if metric better than before
save_checkpoint(model, optimizer, config, epoch, 'val', val_metric,
metrics['val'][val_metric], config.best_val_metric)
save_checkpoint(model, optimizer, config, epoch, 'train', val_metric,
metrics['train'][val_metric], config.best_train_metric)
# Update optimizer
if config.scheduler == 'plateau':
scheduler.step(metrics['val'][val_metric])
elif config.scheduler == 'timm_cosine':
scheduler.step(epoch)
return model, metrics, total_y
def better_metric(metric_a, metric_b, metric_name):
if metric_name == 'acc':
return metric_a > metric_b
else:
return metric_a < metric_b
def save_checkpoint(model, optimizer, config, epoch, split,
val_metric, run_val_metric, best_val_metric):
checkpoint_path = getattr(config, f'best_{split}_checkpoint_path')
try: # try-except here because checkpoint fname could be too long
if (better_metric(run_val_metric, best_val_metric, val_metric) or epoch == 0):
setattr(config, f'best_{split}_metric', run_val_metric)
setattr(config, f'best_{split}_metric_epoch', epoch)
torch.save({'epoch': epoch,
'val_metric': run_val_metric,
'state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, checkpoint_path)
if split == 'val' and type_of_script() == 'terminal':
print(f'-> New best {split} {val_metric} at epoch {epoch}! ({split} {val_metric}: {run_val_metric:.4f})')
except Exception as e:
print(e)
|
spacetime-main
|
train/epoch.py
|
"""
Training functions and helpers
"""
import importlib
import torch
import numpy as np
import pandas as pd # Local logging
from tqdm.auto import tqdm
from .epoch import run_epoch
def print_epoch_metrics(metrics):
for split in metrics.keys():
print('-'*4, f'{split}', '-'*4)
for k, v in metrics[split].items():
if k != 'total':
print(f'- {k}: {v:.3f}')
else:
print(f'- {k}: {int(v)}')
def train_model(model, optimizer, scheduler, dataloaders_by_split,
criterions, max_epochs, config,
input_transform=None, output_transform=None,
val_metric='loss', wandb=None, args=None,
return_best=False, early_stopping_epochs=100):
results_dict = config.log_results_dict
config.best_val_metric = 0 if val_metric == 'acc' else 1e10
config.best_val_metric_epoch = -1
config.best_train_metric = 1e10 # Interpolation / fitting also good to test
config.best_train_metric_epoch = -1
# Experiment with C coeffs
config.learned_c_weights = []
pbar = tqdm(range(max_epochs))
if input_transform is None:
input_transform = lambda x: x
if output_transform is None:
output_transform = lambda y: y
early_stopping_count = 0
for epoch in pbar:
if epoch == 0:
pbar.set_description(f'├── Epoch {epoch}')
else:
description = f'├── Epoch: {epoch}' # Display metric * 1e3
description += f' | Best val {val_metric}: {config.best_val_metric:.3f} (epoch = {config.best_val_metric_epoch:3d})'
for split in metrics:
if split != 'test': # No look
for metric_name, metric in metrics[split].items():
if metric_name != 'total':
description += f' | {split}/{metric_name}: {metric:.3f}'
pbar.set_description(description)
_, metrics, y = run_epoch(model, dataloaders_by_split, optimizer, scheduler,
criterions, config, epoch, input_transform, output_transform,
val_metric, wandb)
# Reset early stopping count if epoch improved
if config.best_val_metric_epoch == epoch:
early_stopping_count = 0
else:
early_stopping_count += 1
if (epoch + 1) % config.log_epoch == 0:
print_epoch_metrics(metrics)
dataset_name = config.dataset if config.variant is None else f'{config.dataset}{config.variant}'
print(f'Dataset: {dataset_name}')
print(f'Experiment: {config.experiment_name}')
if wandb is not None:
log_metrics = {}
for split in metrics.keys():
for k, v in metrics[split].items():
log_metrics[f'{split}/{k}'] = v
wandb.log(log_metrics, step=epoch)
# Initialize logging dict
for split, _metrics in metrics.items():
for k, v in _metrics.items():
if k not in results_dict:
results_dict[k] = []
break
# Actually save results
for split in metrics.keys():
results_dict['epoch'].append(epoch)
results_dict['split'].append(split)
for k, v in metrics[split].items():
results_dict[k].append(v)
# Save results locally
pd.DataFrame.from_dict(results_dict).to_csv(config.log_results_path)
if early_stopping_count == early_stopping_epochs:
print(f'Early stopping at epoch {epoch}...')
break # Exit for loop and do early stopping
print(f'-> Saved best val model checkpoint at epoch {config.best_val_metric_epoch}!')
print(f' - Saved to: {config.best_val_checkpoint_path}')
print(f'-> Saved best train model checkpoint at epoch {config.best_train_metric_epoch}!')
print(f' - Saved to: {config.best_train_checkpoint_path}')
if return_best:
best_model_dict = torch.load(config.best_val_checkpoint_path)
best_epoch = best_model_dict['epoch']
print(f'Returning best val model from epoch {best_epoch}')
model.load_state_dict(best_model_dict['state_dict'])
return model
|
spacetime-main
|
train/train.py
|
"""
Functions for evaluating trained models and plotting forecasts
"""
import torch
from loss import get_loss
from .epoch import run_epoch
from utils.logging import print_header
def evaluate_model(model, **kwargs):
model.eval()
log_metrics = {}
with torch.no_grad():
_, metrics, total_y = run_epoch(model, **kwargs)
# Print out and log evaluation metrics
for split in metrics.keys():
print_header(f'Best validation model: {split} metrics')
for k, v in metrics[split].items():
print(f'- {k}: {v}')
log_metrics[f'best_val/{split}/{k}'] = v
if kwargs['wandb'] is not None:
kwargs['wandb'].log(log_metrics)
return model, log_metrics, total_y
def plot_forecasts(y_by_splits, splits, feature_dim=0, axes=None):
# Save / plot predictions
n_plots = len(splits) # hard-coded for now
if axes is None:
fig, axes = plt.subplots(1, n_plots,
figsize=(6.4 * n_plots, 4.8 * n_plots))
for split_ix, split in enumerate(splits):
y = y_by_splits[split]
# Visualization
samples_to_plot = get_plotting_samples(y)
pred_ix = 0
for pred_type, pred_samples in samples_to_plot.items():
if pred_type != 'true':
axis = axes[split_ix]
axis.plot(samples_to_plot['true'][..., feature_dim],
label='true', color='tab:orange')
axis.plot(pred_samples[..., feature_dim],
label=pred_type, color='tab:blue', linestyle='--')
pred_ix += 1
axis.legend()
axis.set_title(f'{split} forecasts', size=15)
def get_plotting_samples(y):
"""
y = {'true': torch.stack(total_y_true)
'pred': torch.stack(total_y_pred),
'true_informer': total_y_true_informer
'pred_informer': total_y_pred_informer}
Assumes that samples are not shuffled, strided
"""
samples = {}
for k, _y in y.items():
if 'informer' not in k and 'true' not in k: # Only plot raw-scale samples
samples[k] = average_horizons(_y)
elif k == 'true':
samples[k] = average_horizons(_y)
return samples
def average_horizons(y):
"""
y.shape is B x L x D
"""
b, l, d = y.shape
total_len = b + l - 1
total_pred = torch.zeros(b, total_len, d)
total_pred[total_pred == 0] = float('nan')
for ix, y_preds in enumerate(y):
total_pred[ix][ix:ix+len(y_preds)] = y_preds
return torch.nanmean(total_pred, dim=0)
|
spacetime-main
|
train/evaluate.py
|
spacetime-main
|
train/step/__init__.py
|
|
import torch
from tqdm import tqdm
from loss import get_loss
from utils.logging import type_of_script
def compute_informer_metrics(y_pred, y_true):
metrics = {}
criterions = {f'informer_{name}': get_loss(f'informer_{name}')
for name in ['rmse', 'mse', 'mae']}
for k, criterion in metrics:
metrics[k] = criterion(y_pred, y_true)
return metrics
def shared_step(model, dataloader, optimizer, scheduler, criterions, epoch,
config, split, input_transform=None, output_transform=None):
if input_transform is None:
input_transform = lambda x: x
if output_transform is None:
output_transform = lambda y: y
# Save step-wise metrics
metrics = {'total': 0.}
for k in criterions.keys():
metrics[k] = 0.
if split == 'train':
try: model.set_train()
except: model.train()
model.zero_grad()
grad_enabled = True
else:
try:
model.set_eval()
except Exception as e:
print(e)
model.eval()
grad_enabled = False
# Save predictions
total_y_true = []
total_y_pred = []
total_y_true_informer = []
total_y_pred_informer = []
with torch.set_grad_enabled(grad_enabled):
if type_of_script() == 'terminal':
pbar = tqdm(dataloader, leave=False)
else:
pbar = dataloader
model.to(config.device)
for batch_ix, data in enumerate(pbar):
x, y, *z = data
# Only take in lag terms for input
x = x[:, :model.lag, :]
# Transform batch data
u = input_transform(x)
u = u.to(config.device)
# Return (model outputs), (model last-layer next-step inputs)
y_pred, z_pred = model(u)
# y_pred is closed-loop (y_c) and open-loop (y_o) model output predictions
# i.e., (y_c, y_o) = y_pred
y_pred = [output_transform(_y) if _y is not None else _y
for _y in y_pred]
y_c, y_o = y_pred
y_t = torch.cat([x, y], dim=1) # Supervise all time-steps
# config.criterion_weights specifies relative contribution for each loss component
# - w0 weights loss over model predictions for horizon (future) terms
# - w1 weights loss over model predictions for lag (historical) terms
# - w2 weights loss over closed-loop layer predictions for that layer's next time-step inputs
w0, w1, w2 = config.criterion_weights
# Closed-loop supervision
loss = torch.mean(w0 * criterions[config.loss](
y_c, y_t[:, model.lag:, :].to(config.device)))
if not model.inference_only:
# Open-loop output supervision,
# -> Offset by 1 bc next time-step prediction
loss += torch.mean(w1 * criterions[config.loss](y_o[:, model.kernel_dim-1:, :],
y_t[:, model.kernel_dim:model.lag+1, :].to(config.device)))
# Closed-loop next-time-step input supervision
# -> Offset by 1 bc next time-step prediction
z_p, z_t = z_pred # z_pred is prediction (z_p) and "ground-truth" (z_t) for next-time-step layer input
loss += torch.mean(w2 * criterions[config.loss](z_p[:, model.kernel_dim-1:-1, :],
z_t[:, model.kernel_dim:, :]))
if grad_enabled:
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Save metrics
y_c = y_c.detach().cpu()
y_t = y_t.detach().cpu()
u = u.detach().cpu()
# Compute metrics only for horizon terms
y_c_horizon = y_c
y_t_horizon = y_t[:, model.lag:, :]
for k, criterion in criterions.items():
if k == 'correct':
metrics[k] += criterion(y_c_horizon,
y_t_horizon).sum().item()
else:
metrics[k] += criterion(y_c_horizon,
y_t_horizon).sum().item()
metrics['total'] += (y_t_horizon.shape[0] * y_t_horizon.shape[1]) # Batch size * horizon
description = f'└── {split} batch {int(batch_ix)}/{len(pbar)}'
for metric_name, metric in metrics.items():
if metric_name == 'correct':
description += f' | {metric_name} (acc. %): {int(metric):>5d}/{int(metrics["total"])} = {metric / metrics["total"] * 100:.3f}%'
elif 'informer' in metric_name:
description += f' | {metric_name}: {metric / (batch_ix + 1):.3f}'
elif metric_name != 'total':
description += f' | {metric_name}: {metric / metrics["total"]:.3f}'
if type_of_script() == 'terminal':
pbar.set_description(description)
try:
y_o = y_o.detach().cpu()
except:
pass
# Save these for Informer metrics (standardized)
total_y_pred_informer.append(y_c_horizon)
total_y_true_informer.append(y_t_horizon)
# Now save raw-scale metrics
total_y_pred.append(dataloader.dataset.inverse_transform(y_c_horizon))
total_y_true.append(dataloader.dataset.inverse_transform(y_t_horizon))
# Save predictions and compute aggregate metrics
total_y_true_informer = torch.cat(total_y_true_informer, dim=0)
total_y_pred_informer = torch.cat(total_y_pred_informer, dim=0)
total_y = {'true': torch.cat(total_y_true, dim=0),
'pred': torch.cat(total_y_pred, dim=0),
'true_informer': total_y_true_informer,
'pred_informer': total_y_pred_informer}
informer_metrics = compute_informer_metrics(total_y_true_informer,
total_y_pred_informer)
for k, v in informer_metrics.items():
metrics[f'no_reduce_{k}'] = v
for k, metric in metrics.items():
if k != 'total' and 'informer' not in k:
metrics[k] = metric / metrics['total']
elif 'informer' in k and 'no_reduce' not in k:
metrics[k] = metric / (batch_ix + 1)
else:
metrics[k] = metric
model.cpu()
return model, metrics, total_y
|
spacetime-main
|
train/step/informer.py
|
import torch.nn as nn
class AffineTransform(nn.Module):
def __init__(self, lag=None):
"""
Transform data: f(x) = ax - b
"""
super().__init__()
self.lag = lag
def forward(self, x):
# Assume x.shape is B x L x D
raise NotImplementedError
class InverseAffineTransform(nn.Module):
def __init__(self, transform):
super().__init__()
self.transform = transform # AffineTransform object
def forward(self, x):
return ((x + self.transform.b.to(x.device)) /
self.transform.a.to(x.device))
|
spacetime-main
|
data_transforms/affine.py
|
from .mean import MeanTransform, MeanInputTransform
from .standardize import StandardizeTransform
from .affine import InverseAffineTransform
from .last import LastAffineTransform
def get_data_transforms(method, lag):
supported_methods = ['mean', 'mean_input', 'last',
'standardize', 'none']
if method == 'mean':
input_transform = MeanTransform(lag)
output_transform = InverseAffineTransform(input_transform)
elif method == 'mean_input':
input_transform = MeanInputTransform(lag)
output_transform = InverseAffineTransform(input_transform)
elif method == 'last':
input_transform = LastAffineTransform(lag)
output_transform = InverseAffineTransform(input_transform)
elif method == 'standardize':
input_transform = StandardizeTransform(lag)
output_transform = InverseAffineTransform(input_transform)
elif method == 'none':
input_transform = lambda x: x
output_transform = lambda x: x
else:
raise NotImplementedError(f"Data transform method '{method}' not supported. Please choose from {supported_methods}.")
return input_transform, output_transform
|
spacetime-main
|
data_transforms/__init__.py
|
import torch
from .affine import AffineTransform
class StandardizeTransform(AffineTransform):
"""
Standardize lag terms, i.e., z = (x - mean(x)) / std(x)
- Computed as (1 / std(x)) * x - mean(x) * (1 / std(x)) to fit with inverse call,
which does (z + (mean(x) / std(x))) * std(x) = z * std(x) + mean(x)
"""
def __init__(self, lag):
super().__init__(lag=lag)
def forward(self, x):
self.a = 1. / torch.std(x[:, :self.lag, :], dim=1)[:, None, :]
self.b = torch.mean(x[:, :self.lag, :], dim=1)[:, None, :] * self.a
return self.a * x - self.b
|
spacetime-main
|
data_transforms/standardize.py
|
import torch
from .affine import AffineTransform
class MeanTransform(AffineTransform):
"""
Zero-center values
"""
def __init__(self, lag):
super().__init__(lag=lag)
def forward(self, x):
self.a = torch.ones(1)
self.b = x[:, :self.lag, :].mean(dim=1)[:, None, :]
return self.a * x - self.b
class MeanInputTransform(AffineTransform):
"""
Same as mean, but compute mean over entire input
"""
def __init__(self, lag): # ignore lag here
super().__init__(lag=None)
def forward(self, x):
self.a = torch.ones(1)
self.b = x.mean(dim=1)[:, None, :] # Same as x[:, :None, :].mean(dim=1)
return self.a * x - self.b
|
spacetime-main
|
data_transforms/mean.py
|
from .affine import AffineTransform
class LastAffineTransform(AffineTransform):
def __init__(self, lag):
super().__init__(lag=lag)
def forward(self, x):
self.a = 1.
self.b = x[:, self.lag - 1, :][:, None, :]
return self.a * x - self.b
|
spacetime-main
|
data_transforms/last.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from setuptools import setup, find_packages
setup(name='cmr', version='1.0', packages=find_packages())
|
CMR-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# %%
import json
import argparse
import pandas as pd
from argparse import Namespace
import numpy as np
import glob, os
from pandas.core import base
os.chdir("/private/home/yuchenlin/SemanticDebugger")
base_dir = "experiments/results/qa/"
split = "test"
num_streams = 6
def sma(values):
return float(np.mean(values))
def show_result(path):
if path == "experiments/results/qa/qa_er_lr=3e-5_ep=10_l2w=0_rs=64_rf=1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8-test[5]_seed=42_result.json":
print()
o = json.load(open(path))
r = {}
debugger_args = eval(o["debugger_args"])
data_args = eval(o["data_args"])
r["stream_id"] = data_args.stream_id
path = path.replace(base_dir, "")
r["path"] = path.replace(",", "|")
path_info = path.replace(",", "|").replace("_", "|").replace("-", "|").split("|")
if path_info[-2].startswith("seed="):
r["seed"] = path_info[-2][4:]
elif "nonecl" in path:
r["seed"] = "N/A (42)"
else:
return None
r["standard_path"] = "|".join(path_info[:-2])
for _ind in range(10):
txt = f"test[{_ind}]"
if txt in r["standard_path"]:
r["standard_path"] = r["standard_path"].replace(txt, "test[]")
break
# r["prefix"] = prefix
r["method_class"] = o["method_class"]
r["cl_method"] = o["method_class"]
if r["cl_method"] == "simple_cl":
if hasattr(debugger_args, "diff_loss_weight"):
r["cl_method"] = f'{r["cl_method"]}-l2w={debugger_args.diff_loss_weight}'
elif r["cl_method"] == "online_ewc":
ewc_lambda= debugger_args.ewc_lambda
ewc_gamma= debugger_args.ewc_gamma
r["cl_method"] = f'{r["cl_method"]}-{ewc_lambda}-{ewc_gamma}'
elif r["cl_method"] == "er":
replay_size = debugger_args.replay_size
replay_freq = debugger_args.replay_frequency
r["cl_method"] = f'{r["cl_method"]}-{replay_size}-{replay_freq}'
if hasattr(debugger_args, "diff_loss_weight"):
r["cl_method"] = f'{r["cl_method"]}-l2w={debugger_args.diff_loss_weight}'
elif r["cl_method"] == "mir":
replay_size = debugger_args.replay_size
replay_freq = debugger_args.replay_frequency
replay_candidate_size = debugger_args.replay_candidate_size
mir_abalation_args = debugger_args.mir_abalation_args
r["cl_method"] = f'{r["cl_method"]}-{replay_size}/{replay_candidate_size}-{replay_freq}-{mir_abalation_args}'
if hasattr(debugger_args, "diff_loss_weight"):
r["cl_method"] = f'{r["cl_method"]}-l2w={debugger_args.diff_loss_weight}'
# replay_size = debugger_args.replay_size
elif r["cl_method"] == "index_cl_bart_io_index":
replay_size = debugger_args.replay_size
replay_freq = debugger_args.replay_frequency
r["cl_method"] = f'{r["cl_method"]}-{replay_size}-{replay_freq}'
r["steps"] = o["model_update_steps"]
r["lr"] = 0 if r["cl_method"]=="none_cl" else debugger_args.learning_rate
r["num_epochs"] = 0 if r["cl_method"]=="none_cl" else debugger_args.num_epochs
start = data_args.submission_stream_data.index("submission_stream.") + len("submission_stream.")
end = data_args.submission_stream_data.index(".json")
# if "-test.json" in data_args.submission_stream_data:
# end = data_args.submission_stream_data.index("-test.json")
# elif "-val.json" in data_args.submission_stream_data:
# end = data_args.submission_stream_data.index("-val.json")
ns_config_str = data_args.submission_stream_data[start:end]
r["ns_config"] = ns_config_str
if "-val" in ns_config_str:
ns_config_str = ns_config_str.replace("-val", "")
mode = "val"
elif "-test" in ns_config_str:
ns_config_str = ns_config_str.replace("-test", "")
mode = "test"
ns_config = eval(f"dict({ns_config_str})")
r.update(ns_config)
online = o["online_eval_results"]
EFRs = [item["EFR"] for item in online]
UKRs = [item["UKR"] for item in online if "UKR" in item]
OKRs = [item["OKR"] for item in online if "OKR" in item]
KGs = [item["KG"] for item in online if "KG" in item]
CSRs = [item["CSR"] for item in online if "CSR" in item]
if mode!="val" and len(EFRs) != ns_config["T"]:
print(f"Error: ----> path={path}; len(EFRs)={len(EFRs)}")
return None
last_step = online[-1]
if last_step["timecode"] != ns_config["T"] -1:
print(f'Error: ----> path={path}; last_step["timecode"]={last_step["timecode"]} the results does not match the length')
return None
r["AEFR(T)"] = float(np.mean(EFRs))
r["AUKR"] = sma(UKRs)
r["AOKR"] = sma(OKRs)
r["ACSR"] = sma(CSRs)
r["AKG"] = sma(KGs)
r["AOEC"] = float(np.mean([r["AUKR"], r["AOKR"], r["ACSR"], r["AKG"]]))
r["UKR(T)"] = UKRs[-1]
r["OKR(T)"] = OKRs[-1]
r["CSR(T)"] = CSRs[-1]
r["KG(T)"] = KGs[-1]
r["OEC(T)"] = float(np.mean([r["UKR(T)"], r["OKR(T)"], r["CSR(T)"], r["KG(T)"]]))
return r
# %%
def _sort(column):
# def tm_sorter(column):
"""Sort function"""
cl_methods = ['none_cl', "simple_cl", "online_ewc", "er", "mir", "index_cl_bart_io_index"]
correspondence = {team: order for order, team in enumerate(cl_methods)}
return column.map(correspondence)
# %%
if __name__ == '__main__':
# %%
os.makedirs(f"{base_dir}/csvs/", exist_ok=True)
result_files = []
for file in glob.glob(f'{base_dir}/*.json'):
if split not in file:
continue
result_files.append(file)
print("\n".join(result_files))
# %%
results = []
for r_file in result_files:
# print(r_file)
r = show_result(r_file)
if r:
results.append(r)
# print(results)
results.sort(key=lambda x:x["cl_method"])
results = pd.DataFrame(results)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
# %%
results.to_csv(f"{base_dir}/csvs/full_results.csv", index=False, sep=",")
for ns_config in results.ns_config.unique():
# print(ns_config)
r = results[results["ns_config"]==ns_config]
# r = r[((r["lr"]==3.5e-5) & (r["num_epochs"]==10)) | (r["cl_method"] == "none_cl") | (r["cl_method"] == "none_cl_offline_eval")]
items = []
for standard_path in results.standard_path.unique():
r_r = results[results["standard_path"]==standard_path]
# if len(r_r) != num_streams:
# print(f"{standard_path} does not have {num_streams} runs, so we skip it.")
# continue
# %%
# print(r_r)
do_average_across_stream_id = False
do_average_across_seed = True
shown_item = dict()
if do_average_across_stream_id:
records = r_r.to_dict("records")
shown_item = records[0]
shown_item["stream_id"] = -1
# print(shown_item)
if do_average_across_seed:
r_r = r_r[(r_r["stream_id"] == 5) & (r_r["stream_id"] != "N/A (42)")]
if r_r.empty:
continue
records = r_r.to_dict("records")
shown_item = records[0]
shown_item["seed"] = -1
print(f"len(r_r)={len(r_r)}")
keys = ["AEFR(T)", "AUKR", "AOKR", "ACSR", "AKG", "UKR(T)", "AOEC", "OKR(T)", "CSR(T)", "KG(T)", "OEC(T)"]
for key in keys:
shown_item[key] = r_r[key].mean()
shown_item["OEC(T)-std"] = r_r["OEC(T)"].std()
shown_item["OEC(T)-min"] = r_r["OEC(T)"].min()
shown_item["OEC(T)-median"] = r_r["OEC(T)"].median()
shown_item["OEC(T)-max"] = r_r["OEC(T)"].max()
items.append(shown_item)
r = pd.DataFrame(items)
if "AEFR(T)" not in r:
print()
r = r[(r["AEFR(T)"]>=0.9) | (r["cl_method"]=="none_cl")]
r = r.sort_values(by=["steps", "lr", "num_epochs", "cl_method"])
r = r.sort_values(by=["cl_method"], key = lambda x: x.str.len())
r = r.sort_values(by="method_class", key=_sort, kind="mergesort")
r = r.drop(columns=["ns_config", "method_class", "path", "standard_path", "ACSR", "AOEC", "AKG", "AUKR", "AOKR"])
# r = r.drop(columns=["lr", "num_epochs"])
r.to_csv(f"{base_dir}/csvs/{ns_config}.csv", index=False, sep=",")
print("-"*50)
print(f'ns_config="{ns_config.replace(",", " & ")}",')
print(open(f"{base_dir}/csvs/{ns_config}.csv").read())
# %%
|
CMR-main
|
experiments/report_results.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import pandas as pd
import os
import glob
from io import StringIO
import altair as alt
from torch import clamp
from cmr.notebooks.draw_utils import draw_grouped_bars
# os.chdir()
# os.makedirs("csvs/", exist_ok=True)
# result_files = []
all_data = []
header = ""
for file in glob.glob("experiments/results/qa/csvs/*.csv"):
print(file)
lines = open(file).read().splitlines()
header = lines[0]
data = lines[1:]
all_data += data
# result_files.append(file)
# data = pd.read_csv(file)
# print(all_data)
# print(len(all_data))
all_data.insert(0, header)
df = pd.read_csv(StringIO("\n".join(all_data)))
cl_methods = ['none_cl', 'simple_cl-l2w=0.0', 'online_ewc-250.0-0.9', 'er-32-3-l2w=0.0', 'mir-32/256-3-none-l2w=0.0', 'mir-32/256-3-largest_afterloss-l2w=0.0']
cl_prefix = ["Frozen", "CFT", "OnEWC", "ER", "MIR", "MaxLoss"]
for a,b in zip(cl_methods, cl_prefix):
df = df.replace(a, b)
df.rename(columns={'OEC(T)':'OECT'}, inplace=True)
# df = df[df.cl_method != "Frozen"]
settings = [(0.9, 0.5, 0.8), (0.9, 0.1, 0.8), (0.9, 0.9, 0.8), (0.9, 0.5, 0.2), (0.9, 0.5, 0.5), (0.1, 0.5, 0.8)]
# (0.9, 0.1, 0.8), (0.9, 0.9, 0.8)
table = []
for alpha, beta, gamma in settings:
data = df[(df["alpha"]==alpha) & (df["beta"]==beta) & (df["gamma"]==gamma)]
prefix = f"$alpha$={alpha},$beta$={beta},$gamma$={gamma}"
# print()
OECTs = {c: data[data.cl_method==c].iloc[0]["OECT"] for c in cl_prefix[:]}
OECTs["prefix"] = prefix
table.append(OECTs)
# print(data)
# print(OECTs)
# color_dom = cl_prefix
# color_range = ["gray", "blue", "orange", "green", "black"]
# fig = draw_grouped_bars(df=data, fig_title=f"{alpha}-{beta}-{gamma}", y_scale=[0.6, 0.68], x_key="cl_method", y_key="OECT", y_title="", height=250, width=175, color_dom=color_dom, color_range=color_range, bin_width=30)
# color=alt.Color("cl_method", scale=alt.Scale(domain=color_dom, range=color_range), sort=color_dom, legend=None)
# print(data)
# y=alt.Y("OECT:Q", scale=alt.Scale(domain=[0.3, 0.7]), axis=alt.Axis(grid=False))
# fig = alt.Chart(data).mark_bar(clip=True).encode(x="cl_method", y=y)
# fig.save(f'figures/settings/{alpha}-{beta}-{gamma}.png', scale_factor=3.0)
# fig.save("")
table = pd.DataFrame(table)
print(table.to_csv(index=False,))
|
CMR-main
|
experiments/bakcup/report_all_settings.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
maps = """MIR # 67.40 # QA_mir_lr=3e-5_ep=10_rs=32_rf=3_mcs=256_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
CFT # 61.58 # QA_simplecl_lr=3e-5_ep=10_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
ER # 66.62 # QA_er_lr=3e-5_ep=10_rs=32_rf=3_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
MaxLoss# 66.55 # QA_mir_lr=3e-5_ep=10_rs=32_rf=3_mcs=256_largest_afterloss_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
OnL2Reg # 65.09 # qa_simplecl_lr=3e-5_ep=10_l2w=1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
OnEWC # 65.31 # qa_oewc_lr=3e-5_ep=10_lbd=250_gm=9e-1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
*Frozen # 45.77 # QA_nonecl_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
"""
"""
*Frozen # 45.77 # QA_nonecl_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
"""
import os
import json
import pandas as pd
import altair as alt
from cmr.notebooks.draw_utils import draw_stacked_bars, draw_curve
# os.chdir("experiments/results/qa/")
all_data = []
for line in maps.splitlines():
name, OECT, path = [i.strip() for i in line.split("#")]
# print(name, OECT, path)
o = json.load(open("experiments/results/qa_backup_1113/" + path))
# debugger_args = eval(o["debugger_args"])
# data_args = eval(o["data_args"])
r = o["online_eval_results"]
for item in r:
item["prefix"] = name
if item["timecode"] == 99:
item["timecode"] += 1
all_data += r
# print(o)
# EFRs = [item["EFR"] for item in online]
# UKRs = [item["UKR"] for item in online if "UKR" in item]
# OKRs = [item["OKR"] for item in online if "OKR" in item]
# KGs = [item["KG"] for item in online if "KG" in item]
# CSRs = [item["CSR"] for item in online if "CSR" in item]
# for item in all_data:
# if item["name"] == "*Frozne":
# item["OKR"] = 0
# else:
# if item["OKR"]
all_data = pd.DataFrame(all_data)
# all_data = all_data.drop(columns=["before_eval_results", "before_error_ids", "mir_buffer_ids", "retrieved_ids", "OKR_sampled_ids"])
def flatten_list(lst):
lst = list(lst)
flst = []
for l in lst:
flst += l
return flst
def flatten_predictions(before_eval_results):
lst = list(before_eval_results)
flst = []
scores = []
for l in lst:
flst += l["predictions"]
# scores += l["metric_results"]["EM"]
return flst, scores
def jaccard(list1, list2):
list1 = set(list1)
list2 = set(list2)
intersection = len(list1 & list2)
union = (len(list1) + len(list2)) - intersection
return float(intersection) / union
def error_sim(all_data, span=[0, 10], methods=["CFT", "OnL2Reg", "OnEWC", "ER", "MaxLoss", "MIR"]):
df = all_data[(all_data.timecode<=span[1]) & (all_data.timecode>=span[0])]
sims = []
for method_1 in methods:
for method_2 in methods:
if method_1 == method_2:
continue
# errors1 = flatten_list(df[df.prefix==method_1]["before_error_ids"])
# errors2 = flatten_list(df[df.prefix==method_2]["before_error_ids"])
errors1, scores1 = flatten_predictions(df[df.prefix==method_1]["before_eval_results"])
errors2, scores2 = flatten_predictions(df[df.prefix==method_2]["before_eval_results"])
# if len(errors1) == 0:
# continue
assert len(errors1) == len(errors2)
sim = sum([p1!=p2 for p1, p2 in zip(errors1, errors2)])/len(errors1)
# sim = jaccard(errors1, errors2)
sims.append({"method1": method_1, "method2": method_2, "sim": sim})
print(f"{method_1}-{method_2}: {sim}")
sims = pd.DataFrame(sims)
fig = alt.Chart(sims).mark_rect().encode(
x=alt.X('method1:O', sort=methods),
y=alt.Y('method2:O', sort=methods),
# color='sim:Q'
color = alt.Color('sim:Q',scale=alt.Scale(domain=[0.35, 0.45]))
)
fig = fig.properties(width=500, height=500).configure_title(fontSize=0,
).configure_axis(
labelFontSize=30,
titleFontSize=0,
)
fig.save(f"figures/heatmaps/{span[0]}-{span[1]}.png", scale=5.0)
error_sim(all_data, span=[0,10])
error_sim(all_data, span=[10,20])
error_sim(all_data, span=[20,30])
error_sim(all_data, span=[30,40])
error_sim(all_data, span=[50,60])
error_sim(all_data, span=[90,100])
error_sim(all_data, span=[0,20])
error_sim(all_data, span=[40,60])
error_sim(all_data, span=[80,100])
# all_data = all_data.dropna()
# all_data['OEC'] = all_data.drop(columns=["timecode", "EFR", "SR", "Overall"]).mean(numeric_only=True, axis=1)
# print(all_data)
# exit()
# print(all_data)
# fig = draw_curve(df=all_data[all_data["EFR"].notnull()], fig_title=f"EFR", y_scale=[0.7, 1], x_key="timecode:O", y_key="EFR:Q", y_title="EFR")
# fig.save('figures/curves/EFRs.png', scale_factor=2.0)
# color_dom = ["CFT", "OnL2Reg", "OnEWC", "ER", "MaxLoss", "MIR"]
# color_range = ['gray', 'brown', '#7fc97f', '#D35400', 'purple', '#386cb0']
# fig = draw_curve(df=all_data[all_data["UKR"].notnull()], fig_title=f"UKR", y_scale=[0.66, 0.82], x_key="timecode:O", y_key="UKR:Q", y_title="UKR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/UKRs.png', scale_factor=3.0)
# fig = draw_curve(df=all_data[all_data["OKR"].notnull()], fig_title=f"OKR", y_scale=[0.77, 0.96], x_key="timecode:O", y_key="OKR:Q", y_title="OKR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/OKRs.png', scale_factor=3.0)
# fig = draw_curve(df=all_data[all_data["CSR"].notnull()], fig_title=f"CSR", y_scale=[0.52, 0.67], x_key="timecode:O", y_key="CSR:Q", y_title="CSR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/CSRs.png', scale_factor=3.0)
# fig = draw_curve(df=all_data[all_data["KG"].notnull()], fig_title=f"KG", y_scale=[0.43, 0.54], x_key="timecode:O", y_key="KG:Q", y_title="KG", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/KGs.png', scale_factor=3.0)
# fig = draw_curve(df=all_data[all_data["OEC"].notnull()], fig_title=f"OEC", y_scale=[0.61, 0.72], x_key="timecode:O", y_key="OEC:Q", y_title="OEC", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/OECs.png', scale_factor=3.0)
|
CMR-main
|
experiments/bakcup/report_heatmap.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
maps = """MIR # 67.40 # QA_mir_lr=3e-5_ep=10_rs=32_rf=3_mcs=256_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
CFT # 61.58 # QA_simplecl_lr=3e-5_ep=10_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
ER # 66.62 # QA_er_lr=3e-5_ep=10_rs=32_rf=3_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
MaxLoss# 66.55 # QA_mir_lr=3e-5_ep=10_rs=32_rf=3_mcs=256_largest_afterloss_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
OnL2Reg # 65.09 # qa_simplecl_lr=3e-5_ep=10_l2w=1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
OnEWC # 65.31 # qa_oewc_lr=3e-5_ep=10_lbd=250_gm=9e-1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
"""
"""
*Frozen # 45.77 # QA_nonecl_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
"""
import os
import json
import pandas as pd
from cmr.notebooks.draw_utils import draw_stacked_bars, draw_curve
# os.chdir("experiments/results/qa/")
all_data = []
for line in maps.splitlines():
name, OECT, path = [i.strip() for i in line.split("#")]
# print(name, OECT, path)
o = json.load(open("experiments/results/qa/" + path))
# debugger_args = eval(o["debugger_args"])
# data_args = eval(o["data_args"])
r = o["online_eval_results"]
for item in r:
item["prefix"] = name
if item["timecode"] == 99:
item["timecode"] += 1
all_data += r
# print(o)
# EFRs = [item["EFR"] for item in online]
# UKRs = [item["UKR"] for item in online if "UKR" in item]
# OKRs = [item["OKR"] for item in online if "OKR" in item]
# KGs = [item["KG"] for item in online if "KG" in item]
# CSRs = [item["CSR"] for item in online if "CSR" in item]
# for item in all_data:
# if item["name"] == "*Frozne":
# item["OKR"] = 0
# else:
# if item["OKR"]
all_data = pd.DataFrame(all_data)
all_data = all_data.drop(columns=["before_eval_results", "before_error_ids", "mir_buffer_ids", "retrieved_ids", "OKR_sampled_ids"])
all_data = all_data.dropna()
all_data['OEC'] = all_data.drop(columns=["timecode", "EFR", "SR", "Overall"]).mean(numeric_only=True, axis=1)
# print(all_data)
# exit()
# print(all_data)
# fig = draw_curve(df=all_data[all_data["EFR"].notnull()], fig_title=f"EFR", y_scale=[0.7, 1], x_key="timecode:O", y_key="EFR:Q", y_title="EFR")
# fig.save('figures/curves/EFRs.png', scale_factor=2.0)
color_dom = ["CFT", "OnL2Reg", "OnEWC", "ER", "MaxLoss", "MIR"]
color_range = ['gray', 'brown', '#7fc97f', '#D35400', 'purple', '#386cb0']
fig = draw_curve(df=all_data[all_data["UKR"].notnull()], fig_title=f"UKR", y_scale=[0.66, 0.82], x_key="timecode:O", y_key="UKR:Q", y_title="UKR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/UKRs.png', scale_factor=3.0)
fig = draw_curve(df=all_data[all_data["OKR"].notnull()], fig_title=f"OKR", y_scale=[0.77, 0.96], x_key="timecode:O", y_key="OKR:Q", y_title="OKR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/OKRs.png', scale_factor=3.0)
fig = draw_curve(df=all_data[all_data["CSR"].notnull()], fig_title=f"CSR", y_scale=[0.52, 0.67], x_key="timecode:O", y_key="CSR:Q", y_title="CSR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/CSRs.png', scale_factor=3.0)
fig = draw_curve(df=all_data[all_data["KG"].notnull()], fig_title=f"KG", y_scale=[0.43, 0.54], x_key="timecode:O", y_key="KG:Q", y_title="KG", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/KGs.png', scale_factor=3.0)
fig = draw_curve(df=all_data[all_data["OEC"].notnull()], fig_title=f"OEC", y_scale=[0.61, 0.72], x_key="timecode:O", y_key="OEC:Q", y_title="OEC", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/OECs.png', scale_factor=3.0)
|
CMR-main
|
experiments/bakcup/report_curves.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This script was based on https://github.com/shmsw25/bart-closed-book-qa.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from cmr.models.utils import set_seeds
import sys
import argparse
import logging
import random
import numpy as np
import torch
from cmr.models.run_bart import run
def get_parser():
parser = argparse.ArgumentParser()
# Basic parameters
parser.add_argument("--train_file", default="data", required=False)
parser.add_argument("--dev_file", default="data", required=False)
parser.add_argument("--test_file", default="data", required=False)
parser.add_argument("--dataset", default="None", required=False)
parser.add_argument("--model", default="facebook/bart-base", required=False)
parser.add_argument("--output_dir", default=None, type=str, required=False)
parser.add_argument("--do_train", action='store_true')
parser.add_argument("--do_predict", action='store_true')
parser.add_argument("--predict_checkpoint", type=str,
default="best-model.pt")
# Model parameters
parser.add_argument("--checkpoint", type=str)
parser.add_argument("--do_lowercase", action='store_true', default=False)
parser.add_argument("--freeze_embeds", action='store_true', default=False)
# Preprocessing/decoding-related parameters
parser.add_argument('--max_input_length', type=int, default=128)
parser.add_argument('--max_output_length', type=int, default=32)
parser.add_argument('--num_beams', type=int, default=4)
parser.add_argument("--append_another_bos",
action='store_true', default=False)
# Training-related parameters
parser.add_argument("--train_batch_size", default=64, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--predict_batch_size", default=32, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
# parser.add_argument("--warmup_proportion", default=0.01, type=float,
# help="Weight decay if we apply some.") # Not used
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=0.1, type=float,
help="Max gradient norm.")
parser.add_argument("--gradient_accumulation_steps", default=1, type=int,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1000.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_steps", default=300, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--total_steps", default=-1, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--wait_step', type=int, default=10)
# Other parameters
parser.add_argument("--quiet", action='store_true',
help="If true, tqdm will not show progress bar")
parser.add_argument('--eval_period', type=int, default=100,
help="Evaluate & save model")
parser.add_argument('--prefix', type=str, default='',
help="Prefix for saving predictions")
parser.add_argument('--debug', action='store_true',
help="Use a subset of data for debugging")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
return parser
def main():
args = get_parser().parse_args()
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
# print("Output directory () already exists and is not empty.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
# Start writing logs
log_filename = "{}log.txt".format("train_" if args.do_train else "eval_")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(args.output_dir, log_filename)),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
logger.info(args.output_dir)
set_seeds(args.seed)
args.n_gpu = torch.cuda.device_count()
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError(
"At least one of `do_train` or `do_predict` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError(
"If `do_train` is True, then `train_dir` must be specified.")
if not args.dev_file:
raise ValueError(
"If `do_train` is True, then `predict_dir` must be specified.")
if args.do_predict:
if not args.test_file:
raise ValueError(
"If `do_predict` is True, then `predict_dir` must be specified.")
logger.info("Using {} gpus".format(args.n_gpu))
run(args, logger)
if __name__ == '__main__':
main()
|
CMR-main
|
cmr/cli_bart.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from cmr.notebooks.draw_utils import draw_stacked_bars
from altair.vegalite.v4.schema.core import ColorName
from sklearn.utils import validation
import pandas as pd
import json
def visualize_stream(submission_stream, data_names, cfg):
task_name = cfg["task_name"]
episode_size = cfg["b"]
submission_stat = []
init_error_stat = []
for time_step, episode_data in enumerate(list(submission_stream)):
for dn in data_names:
examples = [ex for ex in episode_data if ex["data_name"]==dn]
num_init_errors = [ex for ex in examples if ex["init_status"]=="error"]
if dn == data_names[0]:
dn = "*" + dn
submission_stat.append(dict(time_step=time_step, num_examples=len(examples), prefix=dn))
init_error_stat.append(dict(time_step=time_step, num_examples=len(num_init_errors), prefix=dn))
submission_stat_pd = pd.DataFrame(submission_stat)
filename_str = f"T={cfg['T']},b={cfg['b']},alpha={cfg['alpha']},beta={cfg['beta']},gamma={cfg['gamma']}|[{cfg['stream_id']}]"
title_str = f"alpha={cfg['alpha']}, beta={cfg['beta']}, gamma={cfg['gamma']}"
fig1 = draw_stacked_bars(df=submission_stat_pd, fig_title=f"Submission Stream ({title_str})", y_scale=[0., episode_size+1], x_key="time_step", y_key="sum(num_examples)", y_title="# of Examples")
fig1.save(f'figures/{task_name}.submission.{filename_str}.png', scale_factor=2.0)
init_error_stat_pd = pd.DataFrame(init_error_stat)
fig2 = draw_stacked_bars(df=init_error_stat_pd, fig_title=f"(Initial) Error Stream ({title_str})", y_scale=[0., episode_size+1], x_key="time_step", y_key="sum(num_examples)", y_title="# of Errors")
fig2.save(f'figures/{task_name}.init_error.{filename_str}.png', scale_factor=2.0)
# 50-version
# color_dom = ["*squad", "hotpot", "news", "nq", "search", "trivia"]
# color_range = ["gray", "blue", "orange", "green", "black", "brown"]
# color_range = ['#bab0ac', '#f0027f', '#7fc97f', '#D35400', '#9c9ede', '#386cb0']
# color_dom=None; color_range=None
# fig1 = draw_stacked_bars(df=submission_stat_pd[submission_stat_pd["time_step"]<=50], x_scale=[0, 50], fig_title=f"Submission Stream ({title_str})", y_scale=[0., 65], x_key="time_step", y_key="sum(num_examples)", y_title="# of Examples", width=1000, bin_width=18, color_dom=color_dom, color_range=color_range)
# fig1.save(f'figures/{task_name}.submission.{filename_str}.50.png', scale_factor=2.0)
# init_error_stat_pd = pd.DataFrame(init_error_stat)
# fig2 = draw_stacked_bars(df=init_error_stat_pd[init_error_stat_pd["time_step"]<=50], x_scale=[0, 50], fig_title=f"(Initial) Error Stream ({title_str})", y_scale=[0., 65], x_key="time_step", y_key="sum(num_examples)", y_title="# of Errors", width=1000, bin_width=18, color_dom=color_dom, color_range=color_range)
# fig2.save(f'figures/{task_name}.init_error.{filename_str}.50.png', scale_factor=2.0)
return
if __name__ == '__main__':
qa_data_names = ["squad", "nq", "trivia", "hotpot", "news", "search"]
cfg = dict(task_name="qa", upstream="squad") # T=100, b=64, alpha=0.9, beta=0.5, gamma=0.8
with open("experiments/eval_data/qa/submission_stream.T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8-test.json") as f:
streams = json.load(f)
str_start = f.name.index("submission_stream.") + len("submission_stream.")
str_end = f.name.index("-")
ns_config_str = f.name[str_start:str_end]
ns_config = eval(f"dict({ns_config_str})")
cfg.update(ns_config)
print(cfg)
for stream_id, stream in enumerate(streams):
cfg["stream_id"] = stream_id
visualize_stream(stream, qa_data_names, cfg)
|
CMR-main
|
cmr/benchmark_gen/visualize_streams.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import enum
import json
import argparse
import random
from re import S
from cmr.models.utils import set_seeds
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
import numpy as np
from tqdm import tqdm
import spacy, nltk
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_stream_path",
default="exp_results/data_streams/mrqa_naturalquestions_dev.data_stream.test.wr.json", type=str)
parser.add_argument(
"--data_stream_path_with_paraphrases",
default="exp_results/data_streams/mrqa_naturalquestions_dev.data_stream.test.wr.wpara.json", type=str)
parser.add_argument(
"--data_paraphrased_dict",
default="exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data.json", type=str)
parser.add_argument("--mode", default="paraphrasing", type=str)
parser.add_argument('--num_shards', type=int, default=4)
parser.add_argument('--shard_id', type=int, default=0)
def get_duplicate_ids(data_stream):
seen_ids = set()
examples_to_paraphrase = {}
for episode in data_stream:
for item in episode:
if item["id"] not in seen_ids:
seen_ids.add(item["id"])
else:
examples_to_paraphrase[item["id"]] = item
return examples_to_paraphrase
def split_sentences(text):
nlp = spacy.load('en_core_web_sm') # python -m spacy download en_core_web_sm
# text = "How are you today? I hope you have a great day"
docs = nlp(text)
sents = []
for sent in docs.sents:
sents.append(str(sent).strip())
return sents
def inference(tokenizer, model, inputs, K=5, max_input_length=100):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# print("Device: ", device)
inputs = add_prompt(inputs)
tokenized_input = tokenizer.batch_encode_plus(inputs,
pad_to_max_length=True,
max_length=max_input_length, return_tensors="pt")
batch_input_ids, batch_attention_masks = tokenized_input["input_ids"], tokenized_input["attention_mask"]
batch_input_ids = batch_input_ids.to(device)
batch_attention_masks = batch_attention_masks.to(device)
# batch_outputs = model.generate(
# input_ids=batch_input_ids, attention_mask=batch_attention_masks,
# max_length=max_input_length,
# do_sample=True,
# top_k=100,
# top_p=0.8,
# early_stopping=True,
# num_return_sequences=K
# )
batch_outputs = model.generate(
input_ids=batch_input_ids, attention_mask=batch_attention_masks,
max_length=max_input_length,
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True,
num_return_sequences=min(K, 5)
)
results = []
for output in batch_outputs:
line = tokenizer.decode(output, skip_special_tokens=True,
clean_up_tokenization_spaces=True)
results.append(line)
splitted_results = np.array_split(results, len(inputs))
# assert len(splitted_results[0]) == K
for id in range(len(splitted_results)):
splitted_results[id] = list(splitted_results[id])
splitted_results = list(splitted_results)
def is_too_similar(s1, s2):
return nltk.edit_distance(s1.lower().replace(" ", ""), s2.lower().replace(" ", "")) <= 10
for id in range(len(inputs)):
s = inputs[id]
splitted_results[id] = [p for p in splitted_results[id] if not is_too_similar(p, s)]
return splitted_results
def add_prompt(sentences):
return [f"paraphrase: {s} </s>" for s in sentences]
def get_paraphrased_example(model, tokenizer, example):
context, question = example["input"].split("|")
context = context.replace("Context: ", "")
question = question.replace("Question: ", "")
context_sentences = split_sentences(context)
context_paraphrases = inference(tokenizer, model, context_sentences, K=5)
question_paraphrases = inference(tokenizer, model, [question], K=7)
return context_sentences, context_paraphrases, question_paraphrases
# print(len(sentences), len(paraphrases), len(paraphrases[0]))
# print(sentences)
# print(paraphrases)
def init_para_model():
tokenizer = T5Tokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
model = T5ForConditionalGeneration.from_pretrained(
"Vamsi/T5_Paraphrase_Paws")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device: ", device)
model = model.to(device)
return model, tokenizer
def sample_from_paras(example):
context_paraphrases_sampled = []
not_contain_answer = True
loop_times = 0
while not_contain_answer:
if loop_times >= 5:
# print(example["id"])
pass
for ind, candidates in enumerate(example["context_paraphrases"]):
if loop_times >= 5 and random.randint(0, 10) <= loop_times and not_contain_answer:
context_paraphrases_sampled.append(example["context_sentences"][ind])
else:
context_paraphrases_sampled.append(random.choice(candidates))
context = " ".join(context_paraphrases_sampled)
if any([a in context for a in example["truth"]]):
not_contain_answer = False
loop_times += 1
question = random.choice(example["question_paraphrases"][0])
assert len(question.strip()) >= 5
input_text = f"Context: {context} | Question: {question}"
return input_text
if __name__ == '__main__':
# init the paraphrasing model.
set_seeds(42)
args = parser.parse_args()
with open(args.data_stream_path, "r") as f :
data_stream = json.load(f)
examples_to_paraphrase = get_duplicate_ids(data_stream)
print(len(examples_to_paraphrase))
if args.mode == "paraphrasing":
model, tokenizer = init_para_model()
paraphrased_examples = {}
all_ids = sorted(list(examples_to_paraphrase.keys()))
current_ids = np.array_split(all_ids, args.num_shards)[args.shard_id]
for _id in tqdm(current_ids, desc=f"shard_id: {args.shard_id}"):
example = examples_to_paraphrase[_id]
context_sentences, context_paraphrases, question_paraphrases = get_paraphrased_example(model, tokenizer, example)
example["context_sentences"] = context_sentences
example["context_paraphrases"] = context_paraphrases
example["question_paraphrases"] = question_paraphrases
paraphrased_examples[_id] = example
with open(args.data_paraphrased_dict, "w") as f :
json.dump(paraphrased_examples, f)
else:
# to sample from the paraphrased examples.
with open(args.data_paraphrased_dict, "r") as f :
data_paraphrased_dict = json.load(f)
seen_ids = set()
for episode in tqdm(data_stream, desc="Sampling from paraphrases"):
for item in episode:
if item["id"] not in examples_to_paraphrase:
# unique examples can pass
item["is_paraphrased"] = False
continue
if item["id"] not in seen_ids:
# the first time seeing it.
seen_ids.add(item["id"])
item["is_paraphrased"] = False
else:
# 2nd, 3rd time seeing it
paraphrased_input_text = sample_from_paras(data_paraphrased_dict[item["id"]])
item["input"] = paraphrased_input_text
item["is_paraphrased"] = True
with open(args.data_stream_path_with_paraphrases, "w") as f:
json.dump(data_stream, f)
"""
thread=6
gpu=0
CUDA_VISIBLE_DEVICES=${gpu} python cmr/benchmark_gen/para_stream.py \
--data_paraphrased_dict "exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data_${thread}.json" \
--num_shards $n_threads --shard_id ${thread} &
"""
"""
n_threads=8
n_gpus=8
start_gpuid=0
for (( thread=0; thread<${n_threads}; thread++ ))
do
gpu=$(($start_gpuid + $thread % n_gpus))
echo $thread, $gpu
CUDA_VISIBLE_DEVICES=${gpu} python cmr/benchmark_gen/para_stream.py \
--data_paraphrased_dict "exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data_${thread}_of_${n_threads}.json" \
--num_shards $n_threads --shard_id ${thread} &
done
# merge the files
n_threads=8
python cmr/benchmark_gen/merge_json_file.py \
--input_file_pattern exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data_#_of_${n_threads}.json \
--range "range(${n_threads})" \
--output_file exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data.json
# post sampling
python cmr/benchmark_gen/para_stream.py \
--mode sampling \
--data_paraphrased_dict "exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data.json"
"""
|
CMR-main
|
cmr/benchmark_gen/para_stream.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import argparse
from os import path
import random
import json
from cmr.models.utils import set_seeds
from cmr.task_manager.eval_metrics import evaluate_func
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def formatting_initial_status(data_name, predictions, truth_data, results_all, task="qa"):
assert len(predictions) == len(truth_data) == len(
results_all["EM"]) == len(results_all["QA-F1"])
formatted_data = []
for p, t, em, f1 in zip(predictions, truth_data, results_all["EM"], results_all["QA-F1"]):
item = dict()
item["input"] = t[0]
item["truth"] = t[1]
item["id"] = t[2]
item["mistake"] = p.strip()
if task == "qa":
if 0.5 < f1 < 1 and em == False:
# remove the false negative ones..
continue
item["score"] = {"EM": int(em == True), "QA-F1": float(f1)}
item["data_name"] = data_name
if em == False:
item["init_status"] = "error"
else:
item["init_status"] = "pass"
formatted_data.append(item)
return formatted_data
def load_datasets(args):
if args.task_name == "QA":
truth_paths = {
# "squad-train": "data/mrqa_squad/mrqa_squad_train.jsonl",
"squad": "data/mrqa_squad/mrqa_squad_dev.jsonl",
"nq": "data/mrqa_naturalquestions/mrqa_naturalquestions_dev.jsonl", #
"trivia": "data/mrqa_triviaqa/mrqa_triviaqa_dev.jsonl",
"hotpot": "data/mrqa_hotpotqa/mrqa_hotpotqa_dev.jsonl",
"news": "data/mrqa_newsqa/mrqa_newsqa_dev.jsonl",
"search": "data/mrqa_searchqa/mrqa_searchqa_dev.jsonl",
}
prediction_paths = {
# "squad-train": "upstream_resources/qa_upstream_preds/mrqa_squad_train.predictions.json",
"squad": "upstream_resources/qa_upstream_preds/mrqa_squad_dev.predictions.json",
"nq": "upstream_resources/qa_upstream_preds/mrqa_naturalquestions_dev.predictions.json",
"trivia": "upstream_resources/qa_upstream_preds/mrqa_triviaqa_dev.predictions.json",
"hotpot": "upstream_resources/qa_upstream_preds/mrqa_hotpotqa_dev.predictions.json",
"news": "upstream_resources/qa_upstream_preds/mrqa_newsqa_dev.predictions.json",
"search": "upstream_resources/qa_upstream_preds/mrqa_searchqa_dev.predictions.json",
}
upstream_data_name = "squad"
elif args.task_name == "NLI":
truth_paths = {
# "squad-train": "data/mrqa_squad/mrqa_squad_train.jsonl",
"snli": "data/snli/snli_validation.jsonl",
"multi_nli_matched": "data/multi_nli/multi_nli_validation_matched.jsonl", #
"multi_nli_mismatched": "data/multi_nli/multi_nli_validation_mismatched.jsonl", #
"scitail": "data/scitail/scitail_dev.jsonl",
"anli": "data/anli/anli_dev.jsonl",
}
prediction_paths = {
"snli": "upstream_resources/nli_upstream_preds/snli-snli_validation.predictions.json",
"multi_nli_matched": "upstream_resources/nli_upstream_preds/multi_nli-multi_nli_validation_matched.predictions.json", #
"multi_nli_mismatched": "upstream_resources/nli_upstream_preds/multi_nli-multi_nli_validation_mismatched.predictions.json", #
"scitail": "upstream_resources/nli_upstream_preds/scitail-scitail_dev.predictions.json",
"anli": "upstream_resources/nli_upstream_preds/anli-anli_dev.predictions.json",
}
upstream_data_name = "snli"
all_truth_data = {}
submission_data = {}
heldout_submission_data = {}
upstream_sampled_data = []
for data_name, data_file in truth_paths.items():
truth_data = []
with open(data_file) as fin:
lines = fin.readlines()
# train_examples = []
for line in lines:
d = json.loads(line)
truth_data.append((d["input"], d["output"], d["id"]))
all_truth_data[data_name] = truth_data
for data_name, prediction_file in prediction_paths.items():
with open(prediction_file, "r") as f:
predictions = json.load(f)
# get evaluation results.
results, results_all = evaluate_func(
predictions, all_truth_data[data_name], args.metric, return_all=True)
print(f"{data_name} --- Evaluation results: {results}")
formatted_data = formatting_initial_status(data_name, predictions, all_truth_data[data_name], results_all)
random.shuffle(formatted_data)
if data_name == upstream_data_name:
# random.sample(formatted_data, k=args.upstream_eval_size)
upstream_sampled_data = formatted_data[:args.upstream_eval_size]
submission_data[upstream_data_name] = formatted_data[args.upstream_eval_size:]
# print(f"len(upstream_sampled_data])={len(upstream_sampled_data)}")
else:
heldout_submission_data[data_name] = formatted_data[:args.heldout_submission_size] # held-out
submission_data[data_name] = formatted_data[args.heldout_submission_size:]
# print(f"len(heldout_submission_data['{data_name}'])={len(heldout_submission_data[data_name])}")
print(f"len(submission_data['{data_name}'])={len(submission_data[data_name])}")
for data_name, data in submission_data.items():
num_examples = len(data)
error_nums = [1 for item in data if item["init_status"] == "error"]
print(f"{data_name} -- # examples = {num_examples}; Error rate: {sum(error_nums)/num_examples}")
# QA_submission_data, QA_heldout_submission_data, QA_upstream_sampled_data
return submission_data, heldout_submission_data, upstream_sampled_data
def generate_submission_stream(submission_data, args, cfg):
submission_stream = []
upstream = cfg["upstream"]; T = cfg["T"]; b = cfg["b"]
alpha = cfg["alpha"]; beta = cfg["beta"]; gamma = cfg["gamma"]
assert upstream in submission_data
OODs = [data_name for data_name in submission_data if data_name != upstream]
N = len(OODs) # except for the upstream data
if beta == 1:
if args.task_name.lower() == "qa":
current_major_ood = "nq"
else:
current_major_ood = random.choice(OODs) # the initial major OOD cluster
for t in range(1, T+1):
S_t = []
if alpha == 0:
b_upstream = 0 # special case when upstream data ratio = 0; (because 0^0=1 by definition)
else:
b_upstream = round(b * (alpha**(t-1)))
b_ood = b - b_upstream
b_ood_major = round(b_ood * gamma)
b_ood_diverse = b_ood - b_ood_major
S_t += random.sample(submission_data[upstream], k=b_upstream)
S_t += random.sample(submission_data[current_major_ood], k=b_ood_major)
other_oods = [o for o in OODs if o != current_major_ood]
# diverse_pools = []
for o in other_oods:
# diverse_pools += submission_data[o]
S_t += random.sample(submission_data[o], k=int(b_ood_diverse/len(other_oods)))
if len(S_t) < b:
o = random.choice(other_oods)
S_t += random.sample(submission_data[o], k=b-len(S_t))
assert len(S_t) == b
# deal with the buffer
# Switch major ood
if random.random() < 1 - beta:
current_major_ood = random.choice(other_oods)
submission_stream.append(S_t)
# visualize_stream(submission_stream, [upstream] + OODs, cfg, args)
return submission_stream
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--upstream_eval_size", type=int, default=512)
parser.add_argument("--heldout_submission_size", type=int, default=256)
parser.add_argument("--episode_size", type=int, default=64)
parser.add_argument("--num_episodes", type=int, default=100)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--metric", default="EM|QA-F1")
parser.add_argument("--num_val", type=int, default=3)
parser.add_argument("--num_test", type=int, default=5)
parser.add_argument("--submission_stream_file", default="experiments/eval_data/qa/submission_stream.#args.json")
parser.add_argument("--sampled_upstream_dataset", default="experiments/eval_data/qa/upstream_eval.jsonl")
parser.add_argument("--heldout_submission_eval_file", default="experiments/eval_data/qa/heldout_eval.jsonl")
parser.add_argument("--task_name", default="QA")
args = parser.parse_args()
print(args)
set_seeds(args.seed)
if args.task_name == "NLI":
args.submission_stream_file = args.submission_stream_file.replace("qa", "nli")
args.sampled_upstream_dataset = args.sampled_upstream_dataset.replace("qa", "nli")
args.heldout_submission_eval_file = args.heldout_submission_eval_file.replace("qa", "nli")
# QA:
configs = {}
# configs["QA"] = dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.98, beta=1, gamma=1)
configs["QA"] = []
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.9, gamma=0.8))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.5, gamma=0.8))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.1, gamma=0.8))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.5, gamma=0.5))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.5, gamma=0.2))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.1, beta=0.5, gamma=0.8))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.95, beta=0.5, gamma=0.8))
configs["NLI"] = []
configs["NLI"].append(dict(upstream="snli", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.9, gamma=0.8))
configs["NLI"].append(dict(upstream="snli", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.5, gamma=0.8))
configs["NLI"].append(dict(upstream="snli", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.1, gamma=0.8))
# if args.task_name == "QA":
submission_data, heldout_submission_data, upstream_sampled_data = load_datasets(args)
with open(args.heldout_submission_eval_file, "w") as f:
flat_heldout_submission_data = []
for v in list(heldout_submission_data.values()):
flat_heldout_submission_data += v
for item in flat_heldout_submission_data:
f.write(json.dumps(item) + "\n")
with open(args.sampled_upstream_dataset, "w") as f:
for item in upstream_sampled_data:
f.write(json.dumps(item) + "\n")
cfgs = configs[args.task_name]
for cfg in cfgs:
# Generate Validaiton/Test Streams
validation_streams = []
test_streams = []
for _ in range(args.num_val):
submission_stream = generate_submission_stream(submission_data, args, cfg)
validation_streams.append(submission_stream)
for _ in range(args.num_test):
submission_stream = generate_submission_stream(submission_data, args, cfg)
test_streams.append(submission_stream)
prefix_title_str = f"T={cfg['T']},b={cfg['b']},alpha={cfg['alpha']},beta={cfg['beta']},gamma={cfg['gamma']}"
title_str = prefix_title_str + "-val"
with open(args.submission_stream_file.replace("#args", title_str), "w") as f:
print(f"To save {f.name}")
json.dump(validation_streams, f)
title_str = prefix_title_str + "-test"
with open(args.submission_stream_file.replace("#args", title_str), "w") as f:
print(f"To save {f.name}")
json.dump(test_streams, f)
if __name__ == '__main__':
main()
"""
python cmr/benchmark_gen/sample_submission_streams.py --task_name QA
python cmr/benchmark_gen/sample_submission_streams.py --task_name NLI --episode_size 256
"""
|
CMR-main
|
cmr/benchmark_gen/sample_submission_streams.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/benchmark_gen/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import argparse
from types import new_class
import random
parser = argparse.ArgumentParser()
parser.add_argument(
"--upstream_file",
default="data/mrqa_squad/mrqa_squad_train.jsonl", type=str)
parser.add_argument(
"--submission_file",
default="experiments/eval_data/qa/submission_stream.T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8.json", type=str)
parser.add_argument(
"--mixed_offline_file",
default="experiments/eval_data/qa/offline_retrain.T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8.jsonl", type=str)
parser.add_argument(
"--heldout_eval_file",
default="experiments/eval_data/qa/heldout_eval.jsonl", type=str)
parser.add_argument("--ratio", default=-1, type=float)
args = parser.parse_args()
with open(args.submission_file, "r") as f :
data_stream = json.load(f)
all_init_errors = []
for data_batch in data_stream:
for item in data_batch:
if item["init_status"] == "error":
data = dict(id=item["id"], input=item["input"], output=item["truth"])
all_init_errors.append(json.dumps(data))
eval_examples = []
with open(args.heldout_eval_file) as f:
eval_lines = f.read().splitlines()
for line in eval_lines:
item = json.loads(line)
data = dict(id=item["id"], input=item["input"], output=item["truth"])
eval_examples.append(json.dumps(data))
# heldout_eval_file
with open(args.upstream_file) as f:
upstream_lines = f.read().splitlines()
if args.ratio == 1:
upstream_lines = upstream_lines
else:
upstream_lines = random.sample(upstream_lines, len(all_init_errors)) # same number of examples
mixed_lines = upstream_lines + all_init_errors
with open(args.mixed_offline_file, "w") as f:
for line in mixed_lines:
f.write(line+"\n")
with open(args.mixed_offline_file.replace(".jsonl", ".dev.jsonl"), "w") as f:
for line in eval_examples:
f.write(line+"\n")
print(f"len(upstream_lines)={len(upstream_lines)}")
print(f"len(all_init_errors)={len(all_init_errors)}")
print(f"len(mixed_lines)={len(mixed_lines)}")
print(f"len(eval_examples)={len(eval_examples)}")
|
CMR-main
|
cmr/benchmark_gen/generate_offline_retrainfile.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
from cmr.models.utils import set_seeds
import sys
import numpy as np
import torch
from cmr.benchmark_gen import bart_api
from cmr.task_manager.eval_metrics import (evaluate_func,
normalize_answer)
def main():
parser = argparse.ArgumentParser()
# Mode
parser.add_argument("--post_process", action='store_true')
# Data distributed
parser.add_argument("--data_dist", action='store_true')
parser.add_argument('--local_id', type=int, default=-1, help="")
parser.add_argument('--num_shards', type=int, default=-1, help="")
# Basic parameters
parser.add_argument(
"--data_file", default="data/mrqa_naturalquestions/mrqa_naturalquestions_dev.100.jsonl", required=False)
parser.add_argument(
"--prediction_file", default="bug_data/mrqa_naturalquestions_dev.bugs.jsonl", required=False)
# parser.add_argument(
# "--bug_file", default="bug_data/mrqa_naturalquestions_dev.bugs.jsonl", required=False)
parser.add_argument(
"--conig_file", default="scripts/infer_mrqa_bart_base.config", required=False)
parser.add_argument("--prefix", default="", required=False)
# API for Evaluation
parser.add_argument("--metric", default="EM|QA-F1", required=False)
# Sampling
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
args = parser.parse_args()
set_seeds(args.seed)
log_filename = "logs/build_bugpool_log_{}.txt".format(args.prefix)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(log_filename)),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
# get the truth data
truth_data = []
with open(args.data_file) as fin:
lines = fin.readlines()
# train_examples = []
for line in lines:
# d = line.strip().split("\t")
# truth_data.append((d[0], d[1:]))
d = json.loads(line)
truth_data.append((d["input"], d["output"], d["id"]))
# get the predictions of a model via its API and config file.
if not args.post_process:
predictions = bart_api.inference_api(
config_file=args.conig_file,
test_file=args.data_file,
logger=logger, data_dist=args.data_dist, num_shards=args.num_shards, local_id=args.local_id)
with open(args.prediction_file, "w") as f:
json.dump(predictions, f)
else:
# base_path = "bug_data/mrqa_naturalquestions_train.predictions.shard_id.jsonl"
# num_shards = 7
predictions = []
for shard_id in range(0, args.num_shards):
current_file = args.prediction_file.replace("shard_id", str(shard_id))
print("loading", current_file)
with open(current_file, "r") as f:
for line in f.read().splitlines():
predictions += json.loads(line)
print(len(predictions), len(truth_data))
# get evaluation results.
results, results_all = evaluate_func(
predictions, truth_data, args.metric, return_all=True)
logging.info(f"Evaluation results: {results}")
with open(args.prediction_file.replace(".shard_id.", "."), "w") as f:
json.dump(predictions, f)
# bug_lines, pass_lines = generate_bugs(predictions, truth_data, results_all)
# logging.info("{} example are passed. Found {} bugs ".format(
# len(pass_lines), len(bug_lines)))
# # save the bugs
# with open(args.bug_file, "w") as f:
# f.write("\n".join(bug_lines))
# # save the passes
# with open(args.bug_file.replace("bugs", "pass"), "w") as f:
# f.write("\n".join(pass_lines))
if __name__ == '__main__':
main()
"""
python cmr/benchmark_gen/run_bart_infer.py \
--
"""
|
CMR-main
|
cmr/benchmark_gen/run_bart_infer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import argparse
import json
import random
from cmr.task_manager.eval_metrics import evaluate_func
import numpy as np
def generate_bugs(predictions, truth_data, results_all, f1_upper_bound=0.5):
assert len(predictions) == len(truth_data) == len(
results_all["EM"]) == len(results_all["QA-F1"])
bug_lines = []
pass_lines = []
for p, t, em, f1 in zip(predictions, truth_data, results_all["EM"], results_all["QA-F1"]):
item = dict()
item["input"] = t[0]
item["truth"] = t[1]
item["id"] = t[2]
item["mistake"] = p.strip()
item["score"] = {"EM": int(em == True), "QA-F1": float(f1)}
if em == False and f1 <= f1_upper_bound: # decide later about the threshold of f1 score
bug_lines.append(item)
item["init_status"] = "error"
if em == True:
pass_lines.append(item)
item["init_status"] = "pass"
return bug_lines, pass_lines
def get_data_stream(data_pool, batch_size, num_batches, use_score=False):
assert batch_size * num_batches <= len(data_pool)
if use_score:
# from easier to harder
sorted_bugs = sorted(data_pool, key=lambda x: x["score"]["QA-F1"], reverse=True)
else:
# no sorting, randomly shuffuled
random.shuffle(data_pool)
sorted_bugs = data_pool
data_stream = []
for start in range(0, len(data_pool), batch_size):
end = min(start + batch_size, len(data_pool))
data_batch = sorted_bugs[start:end]
data_stream.append(data_batch)
if len(data_stream) == num_batches:
break
return data_stream
def get_data_stream_with_replacement(data_pool, batch_size, num_batches):
assert batch_size * num_batches <= len(data_pool)
# no sorting, randomly shuffuled
random.shuffle(data_pool)
data_stream = []
seen_ids = set()
duplicate_ids = set()
num_repetition = 0
num_revisited_times = 0
for _ in range(0, num_batches):
data_batch = random.sample(data_pool, batch_size)
data_stream.append(data_batch)
num_repetition += len([_ for item in data_batch if item["id"] in seen_ids])
revisited_ids = [item["id"] for item in data_batch if item["id"] in seen_ids]
num_revisited_times += len(revisited_ids)
duplicate_ids.update(revisited_ids)
seen_ids.update([item["id"] for item in data_batch])
print(f"num_repetition: {num_repetition}; num_total_examples: {len(seen_ids)}; length: {batch_size * num_batches}; ratio: {num_repetition/(batch_size * num_batches)}; num_duplicate_ids: {len(duplicate_ids)}; num_revisited_times: {num_revisited_times}")
return data_stream
def get_replay_stream(data_stream, replay_eval_size, window_size=10):
past_error_pool = {} # errror in terms of the initial model
replay_stream = []
for timecode, data_batch in enumerate(data_stream):
# add the errors to the pool
past_error_pool[timecode] = []
for item in data_batch:
if True or item["init_status"] == "error":
past_error_pool[timecode].append(item)
# build the pool
start_ind = max(0, timecode-window_size)
end_ind = min(timecode, len(past_error_pool))
candidate_replay_instances = []
if end_ind == 0:
continue # do not add for the first episode because there is no history for it
for ind in range(start_ind, end_ind): # not including itself
candidate_replay_instances += past_error_pool[ind]
for _db in data_stream[-5:]:
if len(candidate_replay_instances) >= replay_eval_size:
break
for item in _db:
if len(candidate_replay_instances) >= replay_eval_size:
break
if item["init_status"] == "pass":
candidate_replay_instances.append(item)
# print(start_ind, end_ind, len(candidate_replay_instances))
assert len(candidate_replay_instances) >= replay_eval_size
sampled_replay = random.sample(candidate_replay_instances, replay_eval_size)
replay_stream.append(sampled_replay)
assert len(replay_stream) == len(data_stream) - 1
return replay_stream
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_file", default="data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl", required=False)
parser.add_argument(
"--prediction_file", default="bug_data/mrqa_naturalquestions_train.predictions.jsonl", required=False) # Input
parser.add_argument(
"--data_stream_file", default="bug_data/mrqa_naturalquestions_dev.data_stream.train.json", required=False) # Output
parser.add_argument(
"--replay_stream_file", default="bug_data/mrqa_naturalquestions_dev.replay_stream.train.json", required=False) # Output
parser.add_argument(
"--hidden_example_file", default="bug_data/mrqa_naturalquestions.hidden.jsonl", required=False) # Output
parser.add_argument("--batch_size", type=int, default=32, required=False)
parser.add_argument("--replay_eval_size", type=int, default=-1, required=False)
parser.add_argument("--bug_sample_size", type=int, default=1000, required=False)
parser.add_argument("--max_bug_each_data", type=int, default=-1, required=False)
parser.add_argument("--pass_sample_size", type=int, default=2200, required=False)
parser.add_argument("--hidden_sample_size", type=int, default=-1, required=False)
parser.add_argument("--num_batches", type=int, default=100, required=False)
parser.add_argument("--seed", type=int, default=42, required=False)
parser.add_argument("--metric", default="EM|QA-F1", required=False)
parser.add_argument("--sample_method", default="no_replace", required=False)
# batch_size * num_batches <= # lines of bug_pool_file
args = parser.parse_args()
print(args)
random.seed(args.seed)
all_truth_data = []
for data_file in args.data_file.split("#"):
truth_data = []
with open(data_file) as fin:
lines = fin.readlines()
# train_examples = []
for line in lines:
# d = line.strip().split("\t")
# truth_data.append((d[0], d[1:]))
d = json.loads(line)
truth_data.append((d["input"], d["output"], d["id"]))
all_truth_data.append(truth_data)
all_pred_data = []
merged_restuls_all = None
for prediction_file in args.prediction_file.split("#"):
with open(prediction_file, "r") as f:
predictions = json.load(f)
# get evaluation results.
print(f"len(predictions): {len(predictions)}")
print(f"len(all_truth_data[len(all_pred_data)]): {len(all_truth_data[len(all_pred_data)])}")
results, results_all = evaluate_func(
predictions, all_truth_data[len(all_pred_data)], args.metric, return_all=True)
print(f"{prediction_file}; Evaluation results: {results}")
all_pred_data.append(predictions)
if merged_restuls_all is None:
merged_restuls_all = results_all
else:
for key in merged_restuls_all:
merged_restuls_all[key].extend(results_all[key])
merged_truth_data = []
for item in all_truth_data:
merged_truth_data.extend(item)
merged_predictions = []
for item in all_pred_data:
merged_predictions.extend(item)
bug_pool, pass_pool = generate_bugs(merged_predictions, merged_truth_data, merged_restuls_all)
# make each dataset has the same number of examples
if len(all_truth_data) >= 2:
filtered_bug_pool = []
counts = {}
random.shuffle(bug_pool)
for item in bug_pool:
dataset_name = item["id"].split("-")[0]
if dataset_name not in counts:
counts[dataset_name] = 0
if counts[dataset_name] >= args.max_bug_each_data and args.max_bug_each_data > 0:
continue
filtered_bug_pool.append(item)
counts[dataset_name] += 1
bug_pool = filtered_bug_pool
else:
bug_pool = bug_pool
# exit()
print(f"len(bug_pool)={len(bug_pool)}; len(pass_pool)={len(pass_pool)} <--- len(predictions)={len(predictions)}")
# bug_pool = []
# with open(args.bug_pool_file) as f:
# for line in f.read().splitlines():
# bug_pool.append(json.loads(line))
# with open(args.pass_pool_file) as f:
# pass_pool = [json.loads(line) for line in f.read().splitlines()]
# pass_pool = [item for item in pass_pool if item["score"]
# ["EM"] == 1] # only the EM=1 examples
random.shuffle(pass_pool)
random.shuffle(bug_pool)
if args.bug_sample_size >= 0 and args.pass_sample_size >= 0:
sampled_bug_pool = bug_pool[:args.bug_sample_size]
sampled_pass_pool = pass_pool[:args.pass_sample_size]
if args.hidden_sample_size > 0 and args.hidden_sample_size + args.pass_sample_size <= len(pass_pool):
if len(all_truth_data) >= 2:
# make equal test examples.
hidden_examples = []
counts = {}
random.shuffle(pass_pool)
for item in pass_pool:
dataset_name = item["id"].split("-")[0]
if dataset_name not in counts:
counts[dataset_name] = 0
if counts[dataset_name] >= (args.hidden_sample_size/len(all_truth_data)):
continue
hidden_examples.append(item)
counts[dataset_name] += 1
else:
hidden_examples = pass_pool[-args.hidden_sample_size:]
with open(args.hidden_example_file, "w") as f:
for item in hidden_examples:
f.write(json.dumps(item) + "\n")
print(len(sampled_bug_pool), len(sampled_pass_pool))
sampled_data_pool = sampled_bug_pool + sampled_pass_pool
else:
sampled_data_pool = pass_pool + bug_pool
sampled_data_pool = sampled_data_pool[:args.batch_size * args.num_batches]
if args.sample_method == "no_replace":
data_stream = get_data_stream(
sampled_data_pool, args.batch_size, args.num_batches, use_score=False) # randomly sorted bugs
elif args.sample_method == "with_replace":
data_stream = get_data_stream_with_replacement(
sampled_data_pool, args.batch_size, args.num_batches) # randomly sorted bugs
if args.replay_eval_size > 0:
replay_stream = get_replay_stream(data_stream, args.replay_eval_size)
# replay_stream.insert(0, random.sample(sampled_bug_pool, args.replay_eval_size))
replay_stream.insert(0, random.sample(sampled_data_pool, args.replay_eval_size))
with open(args.replay_stream_file, "w") as f:
json.dump(replay_stream, f)
with open(args.data_stream_file, "w") as f:
json.dump(data_stream, f)
if __name__ == '__main__':
main()
"""
python semanticdebugger/benchmark_gen/sample_stream_data.py \
--sample_method no_replace \
--data_file \
data/mrqa_naturalquestions/mrqa_naturalquestions_dev.jsonl#\
data/mrqa_squad/mrqa_squad_dev.jsonl#\
data/mrqa_triviaqa/mrqa_triviaqa_dev.jsonl#\
data/mrqa_hotpotqa/mrqa_hotpotqa_dev.jsonl \
--prediction_file \
bug_data/mrqa_naturalquestions_dev.predictions.jsonl#\
bug_data/mrqa_squad_dev.predictions.jsonl#\
bug_data/mrqa_triviaqa_dev.predictions.jsonl#\
bug_data/mrqa_hotpotqa_dev.predictions.jsonl \
--data_stream_file exp_results/data_streams/mrqa.mixed.data_stream.test.json \
--hidden_sample_size 1000 \
--hidden_example_file exp_results/data_streams/mrqa.mixed.hidden_passes.jsonl \
--batch_size 32 --num_batches 100 \
--seed 42 \
--max_bug_each_data 800 \
--bug_sample_size 3200 --pass_sample_size 0
# python semanticdebugger/benchmark_gen/sample_stream_data.py \
# --sample_method no_replace \
# --data_file data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl \
# --prediction_file bug_data/mrqa_naturalquestions_train.predictions.jsonl \
# --data_stream_file exp_results/data_streams/mrqa_naturalquestions_dev.data_stream.train.wr.json \
# --hidden_example_file exp_results/data_streams/mrqa_naturalquestions_dev.hidden_passes.jsonl \
# --batch_size 32 --num_batches 500 \
# --bug_sample_size 4688 --pass_sample_size 0 \
# --hidden_sample_size -1
"""
|
CMR-main
|
cmr/benchmark_gen/sample_stream_data.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file_pattern",
default="exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data_#.json", type=str)
parser.add_argument(
"--output_file",
default="exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data.json", type=str)
parser.add_argument(
"--range",
default="range(16)", type=str)
parser.add_argument(
"--mode",
default="json", type=str)
args = parser.parse_args()
all_data = None
for shard_id in eval(args.range):
filename = args.input_file_pattern.replace("#", str(shard_id))
if args.mode == "json":
with open(filename) as f:
print(f.name)
data = json.load(f)
elif args.mode == "jsonl":
with open(filename) as f:
print(f.name)
data = [json.loads(line) for line in f.read().splitlines() if line]
if all_data is None:
all_data = data
else:
if type(all_data) == dict:
all_data.update(data)
else:
all_data += data
with open(args.output_file, "w") as f:
if args.mode == "json":
json.dump(all_data, f)
elif args.mode == "jsonl":
for item in all_data:
f.write(json.dumps(item) + "\n")
|
CMR-main
|
cmr/benchmark_gen/merge_json_file.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import os
import sys
from argparse import Namespace
import torch
from cmr.models.mybart import MyBart
from cmr.models.run_bart import inference
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from tqdm import tqdm
from transformers import BartConfig, BartTokenizer
def inference_api(config_file, test_file, logger, data_dist, num_shards, local_id):
with open(config_file) as f:
config_args = eval(f.read()) # an Namespace object in python language
args = config_args
logger.info(f"Config args: {config_args}")
# load config from json
test_data = GeneralDataset(
logger, args, test_file, data_type="dev", is_training=False, task_name=args.dataset, data_dist=data_dist, num_shards=num_shards, local_id=local_id)
tokenizer = BartTokenizer.from_pretrained("bart-large")
test_data.load_dataset(tokenizer, skip_cache=data_dist)
test_data.load_dataloader()
checkpoint = os.path.join(args.predict_checkpoint)
logger.info("Loading checkpoint from {} ....".format(checkpoint))
model = MyBart.from_pretrained(args.model,
state_dict=convert_model_to_single_gpu(torch.load(checkpoint)))
logger.info("Loading checkpoint from {} .... Done!".format(checkpoint))
if torch.cuda.is_available():
model.to(torch.device("cuda"))
model.eval()
predictions = inference(
model, test_data, save_predictions=False, verbose=True, args=args, logger=logger, return_all=False, predictions_only=True)
return predictions
# logger.info("%s on %s data: %.s" % (test_data.metric, test_data.data_type, str(result)))
|
CMR-main
|
cmr/benchmark_gen/bart_api.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import numpy as np
import string
import re
from collections import Counter
from sklearn.metrics import matthews_corrcoef, f1_score
from scipy.stats import pearsonr, spearmanr
# from rouge import Rouge
METRICS = {
'mrqa_naturalquestions': 'EM|QA-F1',
'mrqa': 'EM|QA-F1',
'nli': 'EM',
'csr': 'EM|QA-F1',
}
def accuracy(prediction, ground_truth):
return prediction.lower() == ground_truth.lower()
def evaluate_func(predictions, data, metric, return_all=False):
def cast_to_float(predictions):
new_predictions = []
for prediction in predictions:
try:
new_predictions.append(float(prediction.strip()))
except:
new_predictions.append(float('NaN'))
assert len(new_predictions) == len(predictions)
return new_predictions
assert len(predictions) == len(data)
all_metrics = [m.strip() for m in metric.split("|")]
results = {}
results_all = {}
for m in all_metrics:
if m == "EM":
ems = []
for (prediction, dp) in zip(predictions, data):
ems.append(get_exact_match_over_list(prediction, dp[1]))
results[m] = np.mean(ems)
results_all[m] = [bool(_i) for _i in ems]
elif m == "ACC":
accs = []
for (prediction, dp) in zip(predictions, data):
accs.append(get_accruacy_over_list(prediction, dp[1]))
results[m] = np.mean(accs)
results_all[m] = accs
elif m == "QA-F1":
f1s = []
for (prediction, dp) in zip(predictions, data):
f1s.append(get_f1_over_list(prediction, dp[1]))
results[m] = np.mean(f1s)
# results_all[m] = f1s
results_all[m] = [float(_i) for _i in f1s]
elif m == "Classification-F1":
results[m] = f1_score([dp[1][0]
for dp in data], predictions, average="macro")
elif m == "Matthew-Correlation":
results[m] = get_matthews_corr(data, predictions)
elif m == "Pearson-Correlation":
predictions = cast_to_float(predictions)
results[m] = pearsonr([float(dp[1][0])
for dp in data], predictions)[0]
# elif m == "Rouge-L":
# rouges = []
# for (prediction, dp) in zip(predictions, data):
# rouges.append(get_rouge_over_list(prediction, dp[1]))
# results[m] = np.mean(rouges)
if return_all:
return results, results_all
return results
def get_matthews_corr(data, predictions):
# only cola is using this...?
new_predictions = []
for prediction in predictions:
if prediction.strip() == "acceptable":
new_predictions.append(1.0)
else:
new_predictions.append(0.0)
new_gold = []
for dp in data:
if dp[1][0] == "acceptable":
new_gold.append(1.0)
else:
new_gold.append(0.0)
return matthews_corrcoef(new_gold, new_predictions)
def qa_f1_score(prediction, ground_truth):
prediction_tokens = prediction.split()
ground_truth_tokens = ground_truth.split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
# def get_rouge_over_list(prediction, groundtruth):
# def remove_punc(text):
# exclude = set(string.punctuation)
# return ''.join(ch for ch in text if ch not in exclude)
# if len(remove_punc(prediction)) == 0:
# return 0.0 # during early stages, it might generate nothin?
# # print(prediction)
# rouge = Rouge()
# if type(groundtruth)==list:
# if len(groundtruth)==0:
# return 0
# return np.max([rouge.get_scores(prediction, gt, avg=True)["rouge-l"]["f"] for gt in groundtruth])
# return rouge.get_scores(prediction, groundtruth, avg=True)["rouge-l"]["f"]
def get_accruacy_over_list(prediction, groundtruth):
if type(groundtruth) == list:
if len(groundtruth) == 0:
return 0
return np.max([accuracy(prediction, gt) for gt in groundtruth])
return accuracy(prediction, groundtruth)
def get_f1_over_list(prediction, groundtruth):
# if type(groundtruth)==list:
if len(groundtruth) == 0:
return 0
prediction_norm = normalize_answer(prediction)
return np.max([qa_f1_score(prediction_norm, normalize_answer(gt)) for gt in groundtruth])
# return qa_f1_score(prediction, groundtruth)
def get_exact_match_over_list(prediction, groundtruth):
# if type(groundtruth)==list:
if len(groundtruth) == 0:
return 0
prediction_norm = normalize_answer(prediction)
return np.max([(prediction_norm == normalize_answer(gt)) for gt in groundtruth])
# return (normalize_answer(prediction) == groundtruth)
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
CMR-main
|
cmr/task_manager/eval_metrics.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/task_manager/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import os
import json
from .base_datamanager import MyQADataset, MyDataLoader
from .eval_metrics import METRICS, evaluate_func
import torch
import numpy as np
class GeneralDataset(object):
def __init__(self, logger, args, data_path, data_type, is_training, task_name, given_data=None, data_dist=False, num_shards=-1, local_id=-1):
# should give the tasks used in this split in the var "tasks"
self.data_path = data_path
self.data_type = data_type
self.data = []
self.task_name = task_name
if given_data is not None:
self.data = given_data
else:
with open(data_path) as fin:
lines = fin.readlines()
# train_examples = []
for line in lines:
# d = line.strip().split("\t")
# self.data.append((d[0], d[1:]))
d = json.loads(line)
self.data.append((d["input"], d["output"], d["id"]))
self.is_training = is_training
self.load = not args.debug if hasattr(args, "debug") else True
self.logger = logger
self.args = args
self.metric = METRICS[self.task_name]
# self.max_input_length = self.args.max_input_length
self.tokenizer = None
self.dataset = None
self.dataloader = None
self.cache = None
self.gen_early_stop = False
if data_dist and local_id >= 0 and num_shards > 0:
# num_shards = torch.distributed.get_world_size() # the number of gpus
# local_shard_id = torch.distributed.get_rank() # the current process id
self.logger.info(f'dataset_size={len(self.data)}, num_shards={num_shards}, local_shard_id={local_id}')
self.data = np.array_split(self.data, num_shards)[local_id]
# # make it evenly divisible
# indices = indices[:shard_size * num_shards]
# assert len(indices) == shard_size * num_shards
# # subsample
# indices = indices[local_shard_id:len(indices):num_shards]
# assert len(indices) == shard_size
# indices = set(indices)
def __len__(self):
return len(self.data)
def decode(self, tokens):
return self.tokenizer.decode(tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True)
def decode_batch(self, tokens):
return [self.decode(_tokens) for _tokens in tokens]
def flatten(self, answers):
new_answers, metadata = [], []
for answer in answers:
metadata.append((len(new_answers), len(new_answers)+len(answer)))
new_answers += answer
return new_answers, metadata
def load_dataset(self, tokenizer, do_return=False, skip_cache=False, quiet=False):
self.tokenizer = tokenizer
postfix = "prepro" + tokenizer.__class__.__name__.replace("zer", "zed")
inputs = []
outputs = []
uuids = []
for dp in self.data:
# Add the task name to the input
# inputs.append(" [{}] {}".format(self.task_name, dp[0]))
inputs.append(dp[0])
outputs.append(dp[1]) # is a list
uuids.append(dp[2])
if not skip_cache:
preprocessed_path = os.path.join(
"/".join(self.data_path.split("/")[:-1]),
self.data_path.split("/")[-1].replace(".jsonl", "-{}.json".format(postfix)))
self.logger.info(f"preprocessed_path={preprocessed_path}")
if not skip_cache and self.load and os.path.exists(preprocessed_path):
# load preprocessed input
self.logger.info(
"Loading pre-tokenized data from {}".format(preprocessed_path))
with open(preprocessed_path, "r") as f:
input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, \
metadata = json.load(f)
else:
if not quiet:
self.logger.info(
"Start tokenizing ... {} instances".format(len(self.data)))
if not quiet:
self.logger.info("Printing 3 examples")
for i in range(3):
self.logger.info(inputs[i])
self.logger.info(outputs[i])
outputs, metadata = self.flatten(outputs) # what is metadata?
# self.logger.info("Printing 3 examples's outputs and metadata after flattening")
# for i in range(3):
# self.logger.info(outputs[i])
# self.logger.info(metadata[i])
if self.args.do_lowercase:
inputs = [input0.lower() for input0 in inputs]
outputs = [output0.lower() for output0 in outputs]
if self.args.append_another_bos:
inputs = ["<s> "+input0 for input0 in inputs]
outputs = ["<s> " + output0 for output0 in outputs]
if not quiet:
self.logger.info("Tokenizing Input ...")
tokenized_input = tokenizer.batch_encode_plus(inputs,
pad_to_max_length=True,
max_length=self.args.max_input_length)
if not quiet:
self.logger.info("Tokenizing Input ... Done!")
self.logger.info("Tokenizing Output ...")
tokenized_output = tokenizer.batch_encode_plus(outputs,
pad_to_max_length=True,
max_length=self.args.max_output_length)
if not quiet:
self.logger.info("Tokenizing Output ... Done!")
input_ids, attention_mask = tokenized_input["input_ids"], tokenized_input["attention_mask"]
decoder_input_ids, decoder_attention_mask = tokenized_output[
"input_ids"], tokenized_output["attention_mask"]
if self.load and not skip_cache:
preprocessed_data = [input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata]
self.logger.info("Save preprocessed data ...")
with open(preprocessed_path, "w") as f:
json.dump([input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata], f)
self.logger.info("Save preprocessed data ... Done!")
# self.logger.info("len(input_ids): {}".format(len(input_ids)))
# self.logger.info("len(decoder_input_ids): {}".format(len(decoder_input_ids)))
# self.logger.info("len(attention_mask): {}".format(len(attention_mask)))
# self.logger.info("len(decoder_attention_mask): {}".format(len(decoder_attention_mask)))
assert len(uuids) == len(input_ids) # make sure
self.dataset = MyQADataset(input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
in_metadata=None, out_metadata=metadata,
is_training=self.is_training, uuids=uuids)
if not quiet:
self.logger.info("Loaded {} examples from {} data".format(
len(self.dataset), self.data_type))
if do_return:
return self.dataset
def load_dataloader(self, do_return=False, is_training="self"):
if is_training == "self":
is_training = self.is_training
self.dataloader = MyDataLoader(
self.args, self.dataset, is_training)
if do_return:
return self.dataloader
def evaluate(self, predictions, verbose=False):
assert len(predictions) == len(self), (len(predictions), len(self))
predictions = [prediction.strip() for prediction in predictions]
return evaluate_func(predictions, self.data, self.metric)
# ems = []
# for (prediction, dp) in zip(predictions, self.data):
# ems.append(get_exact_match(prediction.strip(), [dp[1]]))
# return np.mean(ems)
def save_predictions(self, predictions, path_to_save=None):
assert len(predictions) == len(self), (len(predictions), len(self))
predictions = ['n/a' if len(prediction.strip()) ==
0 else prediction for prediction in predictions]
prediction_text = [
prediction.strip()+'\n' for prediction in predictions]
if path_to_save:
save_path = path_to_save
else:
save_path = os.path.join(
self.args.output_dir, "{}_predictions.txt".format(self.args.prefix))
with open(save_path, "w") as f:
f.writelines(prediction_text)
self.logger.info("Saved prediction in {}".format(save_path))
|
CMR-main
|
cmr/task_manager/dataloader.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
class MyQADataset(Dataset):
def __init__(self,
input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
in_metadata=None, out_metadata=None,
is_training=False, uuids=None, seed=42):
self.uuids = uuids
self.input_ids = torch.LongTensor(input_ids)
self.attention_mask = torch.LongTensor(attention_mask)
self.decoder_input_ids = torch.LongTensor(decoder_input_ids)
self.decoder_attention_mask = torch.LongTensor(decoder_attention_mask)
self.in_metadata = list(zip(range(len(input_ids)), range(1, 1+len(input_ids)))) \
if in_metadata is None else in_metadata
self.out_metadata = list(zip(range(len(decoder_input_ids)), range(1, 1+len(decoder_input_ids)))) \
if out_metadata is None else out_metadata
self.is_training = is_training
assert len(self.input_ids) == len(
self.attention_mask) == self.in_metadata[-1][-1]
assert len(self.decoder_input_ids) == len(
self.decoder_attention_mask) == self.out_metadata[-1][-1]
np.random.seed(seed) # for selecting the same answer if there are multiple
def __len__(self):
return len(self.in_metadata)
def __getitem__(self, idx):
if not self.is_training:
idx = self.in_metadata[idx][0]
return self.input_ids[idx], self.attention_mask[idx]
in_idx = np.random.choice(range(*self.in_metadata[idx]))
out_idx = np.random.choice(range(*self.out_metadata[idx])) # if there are multiple answers
# TODO: can we pass the self.uuids[in_idx] ?
return self.input_ids[in_idx], self.attention_mask[in_idx], \
self.decoder_input_ids[out_idx], self.decoder_attention_mask[out_idx]
class MyDataLoader(DataLoader):
def __init__(self, args, dataset, is_training):
if is_training:
sampler = RandomSampler(dataset)
batch_size = args.train_batch_size
else:
sampler = SequentialSampler(dataset)
batch_size = args.predict_batch_size
super(MyDataLoader, self).__init__(
dataset, sampler=sampler, batch_size=batch_size)
|
CMR-main
|
cmr/task_manager/base_datamanager.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch
import torch.nn as nn
from transformers import BartModel, RobertaModel
from transformers.activations import ACT2FN
from typing import List
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight, gain=0.0000001)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
# def RegularLinear(in_features, out_features, bias=True):
# m = nn.Linear(in_features, out_features, bias)
# nn.init.xavier_uniform_(m.weight, gain=1)
# if bias:
# nn.init.constant_(m.bias, 0.0)
# return m
# def HNetLinear(config, in_features, out_features, input_dim, output_dim, bias=True):
# var_e = 2 / (config.task_emb_dim + config.long_term_task_emb_num)
# weight_var_fanin = 1 / (2 * in_features * input_dim * var_e)
# weight_var_fanout = 1 / (in_features * output_dim * var_e)
# bias_var_fanin = 1 / (2 * config.task_emb_dim * var_e)
# bias_var_fanout = max((1 - (input_dim / output_dim)) / (config.task_emb_dim * var_e), 1e-10)
# weight_var = 2 / (1 / weight_var_fanin + 1 / weight_var_fanout)
# bias_var = 2 / (1 / bias_var_fanin + 1 / bias_var_fanout)
# m = nn.Linear(in_features, out_features, bias)
# nn.init.normal_(m.weight, 0, weight_var ** 0.5)
# if bias:
# nn.init.normal_(m.bias, 0, bias_var ** 0.5)
# return m
class MLP_Task2Adapter(nn.Module):
# takes in a encoded task description and generates parameters of an adapter
def __init__(self, config):
super().__init__()
self.input_dim = config.task_emb_dim # 768?
self.hidden_dim = config.generator_hdim
# TODO: set this output_dim = # params of adapters automatically.
self.output_dim = config.d_model * config.adapter_dim * 2 + config.d_model + config.adapter_dim
if config.adapt_layer_norm:
self.output_dim += 2 * config.d_model
self.linear1 = Linear(self.input_dim, self.hidden_dim)
self.activation_fn = ACT2FN[config.activation_function]
self.linear2 = Linear(self.hidden_dim, self.output_dim)
def forward(self, x):
x = self.linear1(x)
x = self.activation_fn(x)
x = self.linear2(x)
return x.view(-1)
class ParameterGenerator(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
modules = []
num_adapters = config.encoder_layers + config.decoder_layers # int
for _ in range(num_adapters):
modules.append(MLP_Task2Adapter(config))
self.decoders = nn.ModuleList(modules)
def decode(self, task_emb):
return [d(task_emb) for d in self.decoders]
def forward(self, task_embedding, concat=False):
adapter_params = self.decode(task_embedding)
if concat:
adapter_params = torch.cat(adapter_params)
return adapter_params
# class GrowingBart(nn.Module):
# def __init__(self, model, meta_model, config):
# super().__init__()
# self.config = config
# self.model = model
# self.meta_model = meta_model
# def set_relation(self, rel_ids, rel_masks):
# # generate adapter parameters using task descriptions
# generated_params = self.meta_model(rel_ids, attention_mask=rel_masks)
# # apply the parameters to the adapters
# self.apply_params_to_adapters(generated_params)
# def forward(self, rel_ids, rel_masks, input_ids, input_masks, output_ids, output_masks, is_training=False):
# # generate adapter parameters using task descriptions
# generated_params = self.meta_model(rel_ids, attention_mask=rel_masks)
# # apply the parameters to the adapters
# self.apply_params_to_adapters(generated_params)
# # use the adapted model to make zero-shot inference
# ret = self.model(input_ids, attention_mask=input_masks,
# decoder_input_ids=output_ids,
# decoder_attention_mask=output_masks,
# is_training=is_training
# )
# return ret
# def apply_params_to_adapters(self, generated_params):
# encoder_params, decoder_params = generated_params[:self.config.encoder_layers], generated_params[self.config.encoder_layers:]
# d_model = self.config.d_model
# d_adapter = self.config.adapter_dim
# for p, encoder_layer in zip(encoder_params, self.model.encoders()):
# # dw, db: down weight, down bias
# # uw, ub: up weight, up bias
# dw, uw, db, ub = p[0:d_model*d_adapter], \
# p[d_model*d_adapter:d_model*d_adapter*2], \
# p[d_model*d_adapter*2:d_model*d_adapter*2+d_adapter], \
# p[d_model*d_adapter*2+d_adapter:d_model*d_adapter*2+d_adapter+d_model]
# encoder_layer.adapter_down_weight = dw.view(d_model, d_adapter)
# encoder_layer.adapter_down_bias = db.view(d_adapter)
# encoder_layer.adapter_up_weight = uw.view(d_adapter, d_model)
# encoder_layer.adapter_up_bias = ub.view(d_model)
# if self.config.adapt_layer_norm:
# encoder_layer.self_attn_layer_norm.weight.data = encoder_layer.self_attn_layer_norm.weight.data + p[-2*d_model: -1*d_model]
# encoder_layer.self_attn_layer_norm.bias.data = encoder_layer.self_attn_layer_norm.bias.data + p[-1*d_model:]
# for p, decoder_layer in zip(decoder_params, self.model.decoders()):
# dw, uw, db, ub = p[0:d_model*d_adapter], \
# p[d_model*d_adapter:d_model*d_adapter*2], \
# p[d_model*d_adapter*2:d_model*d_adapter*2+d_adapter], \
# p[d_model*d_adapter*2+d_adapter:d_model*d_adapter*2+d_adapter+d_model]
# decoder_layer.adapter_down_weight = dw.view(d_model, d_adapter)
# decoder_layer.adapter_down_bias = db.view(d_adapter)
# decoder_layer.adapter_up_weight = uw.view(d_adapter, d_model)
# decoder_layer.adapter_up_bias = ub.view(d_model)
# if self.config.adapt_layer_norm:
# decoder_layer.self_attn_layer_norm.weight.data = decoder_layer.self_attn_layer_norm.weight.data + p[-2*d_model: -1*d_model]
# decoder_layer.self_attn_layer_norm.bias.data = decoder_layer.self_attn_layer_norm.bias.data + p[-1*d_model:]
# # a = self.model.decoders()[-4]
# # print(a.adapter_down_weight)
# # print(a.adapter_down_bias)
# # print(a.adapter_up_weight)
# # print(a.adapter_up_bias)
|
CMR-main
|
cmr/models/hypernet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/models/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This script was based on https://github.com/shmsw25/bart-closed-book-qa.
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from transformers import T5ForConditionalGeneration, BartForConditionalGeneration
from transformers.modeling_bart import shift_tokens_right
from .utils import label_smoothed_nll_loss
class MyBart(BartForConditionalGeneration):
def forward(self, input_ids, attention_mask=None, encoder_outputs=None,
decoder_input_ids=None, decoder_attention_mask=None, decoder_cached_states=None,
use_cache=False, is_training=False, return_all_loss=False):
if is_training:
_decoder_input_ids = shift_tokens_right(
decoder_input_ids, self.config.pad_token_id)
else:
_decoder_input_ids = decoder_input_ids
outputs = self.model(
input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=_decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
lm_logits = F.linear(
outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
if is_training:
lprobs = F.log_softmax(lm_logits, dim=-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, decoder_input_ids, epsilon=0.1, ignore_index=self.config.pad_token_id, return_all_loss=return_all_loss)
return loss
return (lm_logits, ) + outputs[1:]
|
CMR-main
|
cmr/models/mybart.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This script was based on https://github.com/shmsw25/bart-closed-book-qa.
import os
import numpy as np
import torch
from transformers import BartTokenizer, BartConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from cmr.task_manager.dataloader import GeneralDataset
from .mybart import MyBart
from .utils import freeze_embeds, trim_batch, convert_model_to_single_gpu
import json
from tqdm import tqdm
import copy
def run(args, logger):
tokenizer = BartTokenizer.from_pretrained("bart-large")
train_data = GeneralDataset(logger, args, args.train_file,
data_type="train", is_training=True, task_name=args.dataset)
dev_data = GeneralDataset(logger, args, args.dev_file,
data_type="dev", is_training=False, task_name=args.dataset)
train_data.load_dataset(tokenizer)
train_data.load_dataloader()
dev_data.load_dataset(tokenizer)
dev_data.load_dataloader()
best_dev_performance = None
test_performance = None
best_model_state_dict = None
if args.do_train:
if args.checkpoint is not None and args.checkpoint != "None":
logger.info(f"Loading checkpoint: {args.checkpoint}")
model = MyBart.from_pretrained(args.model,
state_dict=convert_model_to_single_gpu(torch.load(args.checkpoint)))
else:
model = MyBart.from_pretrained(args.model)
if args.freeze_embeds:
logger.info("Freezing embeddings")
freeze_embeds(model)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if torch.cuda.is_available():
model.to(torch.device("cuda"))
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
args.total_steps = args.num_train_epochs * len(train_data.dataloader)
logger.info(f"args.total_steps = {args.total_steps}")
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.total_steps)
best_dev_performance, best_model_state_dict = train(
args, logger, model, train_data, dev_data, optimizer, scheduler)
if args.do_predict:
if args.do_train and best_model_state_dict is not None:
model = MyBart.from_pretrained(args.model,
state_dict=best_model_state_dict)
logger.info("Loading checkpoint from CPU")
else:
checkpoint = os.path.join(args.predict_checkpoint)
model = MyBart.from_pretrained(args.model,
state_dict=convert_model_to_single_gpu(torch.load(checkpoint)))
logger.info("Loading checkpoint from {}".format(checkpoint))
if torch.cuda.is_available():
model.to(torch.device("cuda"))
model.eval()
data_type = "test" if "test" in args.test_file else "dev"
test_data = GeneralDataset(
logger, args, args.test_file, data_type=data_type, is_training=False, task_name=args.dataset)
test_data.load_dataset(tokenizer)
test_data.load_dataloader()
test_performance = inference(
model, test_data, save_predictions=True, verbose=True, args=args, logger=logger)
logger.info("%s on %s data: %.s" % (test_data.metric,
test_data.data_type, str(test_performance)))
return best_dev_performance, test_performance
def train(args, logger, model, train_data, dev_data, optimizer, scheduler):
model.train()
global_step = 0
train_losses = []
best_performance = None
stop_training = False
logger.info("Starting training!")
for epoch in range(int(args.num_train_epochs)):
for batch in tqdm(train_data.dataloader, desc="Epoch {}".format(epoch), disable=args.quiet):
global_step += 1
if torch.cuda.is_available():
# logger.info(f"torch.cuda.is_available()={torch.cuda.is_available()}")
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = train_data.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(batch[2], pad_token_id, batch[3])
loss = model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if torch.isnan(loss).data:
logger.info("Stop training because loss=%s" % (loss.data))
stop_training = True
break
train_losses.append(loss.detach().cpu())
loss.backward()
if global_step % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
optimizer.step() # We have accumulated enough gradients
scheduler.step()
model.zero_grad()
if global_step % args.eval_period == 0:
model.eval()
curr_performance = inference(
model if args.n_gpu == 1 else model.module, dev_data, args=args, save_predictions=True, logger=logger)
# TODO: save predictions when eval during training
logger.info("Step %d Train loss %.2f %s %s on epoch=%d" % (
global_step,
np.mean(train_losses),
dev_data.metric,
curr_performance,
epoch))
train_losses = []
def is_improved(best, curr):
if best is None:
return True
return any([best[m] < curr[m] for m in best])
if is_improved(best_performance, curr_performance):
best_model_state_dict = {k: v.cpu() for (
k, v) in model.state_dict().items()}
# save results
logger.info("New best perfromance %s: %s -> %s on epoch=%d, global_step=%d" %
(dev_data.metric, best_performance, curr_performance, epoch, global_step))
best_model_path = os.path.join(
args.output_dir, "best-model.pt")
with open(best_model_path.replace(".pt", "_results.json"), "w") as f:
json.dump(curr_performance, f)
logger.info(
"Saving the new best model to {}".format(best_model_path))
torch.save(best_model_state_dict, best_model_path)
best_performance = curr_performance
wait_step = 0
stop_training = False
else:
wait_step += 1
if wait_step >= args.wait_step:
stop_training = True
break
model.train()
if global_step >= args.total_steps:
stop_training = True
break
if stop_training:
break
# model_state_dict = {k:v.cpu() for (k, v) in model.state_dict().items()}
# torch.save(model_state_dict, os.path.join(args.output_dir, "last-model.pt"))
return best_performance, best_model_state_dict
def inference(model, dev_data, save_predictions=False, verbose=False, args=None, logger=None, return_all=False, predictions_only=False, compute_loss=False, loss_only=False):
model.eval()
predictions = []
bos_token_id = dev_data.tokenizer.bos_token_id
losses = [] # if needed
if args and hasattr(args, "quiet"):
quiet = args.quiet
else:
quiet = not verbose
if not quiet:
logger.info("Starting inference ...")
for batch in tqdm(dev_data.dataloader, desc="Infernece", disable=quiet):
if torch.cuda.is_available():
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = dev_data.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(batch[0], pad_token_id, batch[1])
if compute_loss:
# to compute loss
batch[2], batch[3] = trim_batch(batch[2], pad_token_id, batch[3])
loss = model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True, return_all_loss=True)
# TODO: double check this part. are the results correct?
# TODO: do we need to use mean?
# logger.info(loss.shape)
loss = loss.squeeze(-1)
# logger.info(loss.shape)
loss = loss.detach().cpu()
# logger.info(f"torch.sum(loss.squeeze(-1), 1) = {torch.sum(loss.squeeze(-1), 1)}")
for each_loss in loss:
num_nonzeros = (each_loss!=0).sum(0)
norm_loss = each_loss.sum()/ num_nonzeros
# add the normalized loss for each sentence.
losses.append(norm_loss)
if return_all:
pass
if not loss_only:
outputs = model.generate(input_ids=batch[0],
attention_mask=batch[1],
num_beams=dev_data.args.num_beams,
max_length=dev_data.args.max_output_length,
decoder_start_token_id=model.config.bos_token_id,
early_stopping=dev_data.gen_early_stop,)
for input_, output in zip(batch[0], outputs):
pred = dev_data.decode(output)
predictions.append(pred)
if not quiet:
logger.info("Starting inference ... Done")
if loss_only:
return losses
if predictions_only:
return predictions
if save_predictions:
dev_data.save_predictions(predictions, )
# logger.info("Starting evaluation metric ...")
result = dev_data.evaluate(predictions, verbose=verbose)
# logger.info("Starting evaluation metric ... Done!")
if return_all:
return predictions, result, losses
return result
|
CMR-main
|
cmr/models/run_bart.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This script was based on https://github.com/shmsw25/bart-closed-book-qa.
import copy
import torch.nn as nn
import random
import numpy as np
import torch
def set_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def convert_model_to_single_gpu(state_dict):
def _convert(key):
if key.startswith('module.'):
return key[7:]
return key
return {_convert(key): value for key, value in state_dict.items()}
def label_smoothed_nll_loss(lprobs, target, epsilon=0.1, ignore_index=-100, return_all_loss=False):
"""From fairseq"""
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if not return_all_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
def freeze_params(model: nn.Module):
"""Set requires_grad=False for each of model.parameters()"""
for par in model.parameters():
par.requires_grad = False
def freeze_embeds(model):
"""Freeze token embeddings and positional embeddings for bart, just token embeddings for t5."""
model_type = model.config.model_type
if model_type == "t5":
freeze_params(model.shared)
for d in [model.encoder, model.decoder]:
freeze_params(d.embed_tokens)
elif model_type == "fsmt":
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
else:
freeze_params(model.model.shared)
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
def trim_batch(
input_ids,
pad_token_id,
attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
|
CMR-main
|
cmr/models/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from transformers.modeling_bart import EncoderLayer, DecoderLayer, BartEncoder, BartDecoder, BartModel, BartForConditionalGeneration
from transformers.modeling_bart import shift_tokens_right
from transformers.configuration_bart import BartConfig
from transformers.configuration_utils import PretrainedConfig
from .utils import label_smoothed_nll_loss
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
class BartWithAdapterConfig(BartConfig):
def __init__(
self,
activation_dropout=0.0,
activation_function="gelu",
vocab_size=50265,
d_model=1024,
encoder_ffn_dim=4096,
encoder_layers=12,
encoder_attention_heads=16,
decoder_ffn_dim=4096,
decoder_layers=12,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
attention_dropout=0.0,
dropout=0.1,
max_position_embeddings=1024,
init_std=0.02,
classifier_dropout=0.0,
num_labels=3,
is_encoder_decoder=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
normalize_before=False,
add_final_layer_norm=False,
scale_embedding=False,
normalize_embedding=True,
static_position_embeddings=False,
add_bias_logits=False,
adapter_dim=64,
adapt_layer_norm=False,
unfreeze_hyper_encoder=False,
**common_kwargs
):
if "hidden_size" in common_kwargs:
raise ValueError("hidden size is called d_model")
super().__init__(
num_labels=num_labels,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**common_kwargs,
)
self.vocab_size = vocab_size
self.d_model = d_model # encoder_embed_dim and decoder_embed_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = self.num_hidden_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.max_position_embeddings = max_position_embeddings
self.init_std = init_std # Normal(0, this parameter)
self.activation_function = activation_function
# Params introduced for Mbart
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.normalize_embedding = normalize_embedding # True for mbart, False otherwise
self.normalize_before = normalize_before # combo of fairseq's encoder_ and decoder_normalize_before
self.add_final_layer_norm = add_final_layer_norm
# Params introduced for Marian
self.add_bias_logits = add_bias_logits
self.static_position_embeddings = static_position_embeddings
# 3 Types of Dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.dropout = dropout
# Classifier stuff
self.classif_dropout = classifier_dropout
# Adapter
self.adapter_dim = adapter_dim
# Hypernet
self.generator_hdim = int(self.d_model * 0.25) # TODO: make it a tunable hp.
self.adapt_layer_norm = adapt_layer_norm
self.unfreeze_hyper_encoder = unfreeze_hyper_encoder # TODO: should be
def Linear(in_features, out_features, bias=True, std=0.0000001):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight, gain=std)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class EncoderLayerWithAdapter(EncoderLayer):
def __init__(self, config: BartConfig):
super(EncoderLayerWithAdapter, self).__init__(config)
self.adapter_dim = config.adapter_dim
# self.adapter_down_weight = torch.zeros(self.embed_dim, self.adapter_dim)
# self.adapter_down_bias = torch.zeros(self.adapter_dim)
# self.adapter_up_weight = torch.zeros(self.adapter_dim, self.embed_dim)
# self.adapter_up_bias = torch.zeros(self.embed_dim)
self.adapter_down_layer = Linear(self.embed_dim, self.adapter_dim, config.init_std)
self.adapter_up_layer = Linear(self.adapter_dim, self.embed_dim, config.init_std)
def adapter_down(self, x):
# print(x.size())
# print(self.adapter_down_weight.size())
# z = x * self.adapter_down_weight
# print(z.size())
# return F.linear(x, self.adapter_down_weight.t(), self.adapter_down_bias)
# return x * self.adapter_down_weight + self.adapter_down_bias
return self.adapter_down_layer(x)
def adapter_up(self, x):
# return F.linear(x, self.adapter_up_weight.t(), self.adapter_up_bias)
# return x * self.adapter_up_weight + self.adapter_up_bias
return self.adapter_up_layer(x)
def forward(self, x, encoder_padding_mask):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn_weights = self.self_attn(
query=x, key=x, key_padding_mask=encoder_padding_mask, need_weights=self.output_attentions
)
x = F.dropout(x, p=self.dropout, training=self.training)
residual_adapter = x
x = self.adapter_down(x)
x = self.activation_fn(x)
x = self.adapter_up(x)
x = residual_adapter + x
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn_weights
class DecoderLayerWithAdapter(DecoderLayer):
def __init__(self, config: BartConfig):
super(DecoderLayerWithAdapter, self).__init__(config)
self.adapter_dim = config.adapter_dim
# self.adapter_down_weight = torch.zeros(self.embed_dim, self.adapter_dim)
# self.adapter_down_bias = torch.zeros(self.adapter_dim)
# self.adapter_up_weight = torch.zeros(self.adapter_dim, self.embed_dim)
# self.adapter_up_bias = torch.zeros(self.embed_dim)
self.adapter_down_layer = Linear(self.embed_dim, self.adapter_dim, config.init_std)
self.adapter_up_layer = Linear(self.adapter_dim, self.embed_dim, config.init_std)
def adapter_down(self, x):
# return F.linear(x, self.adapter_down_weight.t(), self.adapter_down_bias)
return self.adapter_down_layer(x)
def adapter_up(self, x):
# return F.linear(x, self.adapter_up_weight.t(), self.adapter_up_bias)
return self.adapter_up_layer(x)
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
decoder_padding_mask=None,
):
residual = x
if layer_state is None:
layer_state = {}
if self.normalize_before:
x = self.self_attn_layer_norm(x)
# Self Attention
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state, # adds keys to layer state
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
need_weights=self.output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
residual_adapter = x
x = self.adapter_down(x)
x = self.activation_fn(x)
x = self.adapter_up(x)
x = residual_adapter + x
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
# Cross attention
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, _ = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
# Fully Connected
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
class BartEncodeWithAdapter(BartEncoder):
def __init__(self, config: BartConfig, embed_tokens):
super(BartEncodeWithAdapter, self).__init__(config, embed_tokens)
self.layers = nn.ModuleList(
[EncoderLayerWithAdapter(config) for _ in range(config.encoder_layers)]
)
class BartDecoderWithAdapter(BartDecoder):
def __init__(self, config: BartConfig, embed_tokens: nn.Embedding):
super(BartDecoderWithAdapter, self).__init__(config, embed_tokens)
self.layers = nn.ModuleList(
[DecoderLayerWithAdapter(config) for _ in range(config.decoder_layers)]
)
class BartModelWithAdapter(BartModel):
def __init__(self, config: BartConfig):
super(BartModelWithAdapter, self).__init__(config)
self.encoder = BartEncodeWithAdapter(config, self.shared)
self.decoder = BartDecoderWithAdapter(config, self.shared)
class BartForConditionalGenerationWithAdapter(BartForConditionalGeneration):
def __init__(self, config: BartConfig):
super().__init__(config)
base_model = BartModelWithAdapter(config)
self.model = base_model
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
class MyBartWithAdapter(BartForConditionalGenerationWithAdapter):
def forward(self, input_ids, attention_mask=None, encoder_outputs=None,
decoder_input_ids=None, decoder_attention_mask=None, decoder_cached_states=None,
use_cache=False, is_training=False):
if is_training:
_decoder_input_ids = shift_tokens_right(decoder_input_ids, self.config.pad_token_id)
else:
_decoder_input_ids = decoder_input_ids
outputs = self.model(
input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=_decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
if is_training:
# loss_fct = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.config.pad_token_id)
# loss = loss_fct(lm_logits.view(-1, self.config.vocab_size),
# decoder_input_ids.view(-1))
lprobs = F.log_softmax(lm_logits, dim=-1)
loss, _ = label_smoothed_nll_loss(lprobs, decoder_input_ids, epsilon=0.1, ignore_index=self.config.pad_token_id)
return loss
return (lm_logits, ) + outputs[1:]
def encoders(self):
return self.model.encoder.layers
def decoders(self):
return self.model.decoder.layers
def backup_layer_norm_parameters(self):
for encoder in self.encoders():
encoder.self_attn_layer_norm_bc = copy.deepcopy(encoder.self_attn_layer_norm)
for decoder in self.decoders():
decoder.self_attn_layer_norm_bc = copy.deepcopy(decoder.self_attn_layer_norm)
def restore_layer_norm_parameters(self):
for encoder in self.encoders():
encoder.self_attn_layer_norm = copy.deepcopy(encoder.self_attn_layer_norm_bc)
for decoder in self.decoders():
decoder.self_attn_layer_norm = copy.deepcopy(decoder.self_attn_layer_norm_bc)
|
CMR-main
|
cmr/models/bart_with_adapater.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import json
import random
class OfflineDebugger(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "offline_debug"
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = [
# additional hyper parameters
"offline_retrain_upstream",]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
def _get_all_init_errors(self):
data_args = self.data_args
all_init_errors = []
for data_batch in tqdm(self.data_stream, desc="Creating the data loaders."):
if data_args.max_timecode > 0 and len(self.data_eval_loaders) >= data_args.max_timecode:
break
all_init_errors += [item for item in data_batch if item["init_status"] == "error"]
all_init_errors = self.data_formatter(all_init_errors)
return all_init_errors
def offline_debug(self):
""""This function is to generate the bound when fixing the errors offline."""
self.logger.info("Start Offline Debugging")
self.timecode = -1
# TODO: get the all_bug_examples
init_errors = self._get_all_init_errors()
# get the upstream examples
with open(self.data_args.upstream_data_path) as f:
upstream_memory_examples = [json.loads(line)for line in set(f.read().splitlines())]
upstream_memory_examples = self.upstream_data_formatter(upstream_memory_examples)
if self.debugger_args.offline_retrain_upstream:
merged_examples = init_errors + upstream_memory_examples
else:
merged_examples = init_errors
# dl, _ = self.get_dataloader(self.data_args, merged_examples, mode="train")
# self.fix_bugs(dl, quiet=False)
# self._save_base_model(ckpt_name="offline")
|
CMR-main
|
cmr/debug_algs/offline_debug_bounds.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from logging import disable
import numpy as np
import torch
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from transformers import (AdamW, BartConfig, BartTokenizer,
get_linear_schedule_with_warmup)
from cmr.debug_algs.commons import OnlineDebuggingMethod
from tqdm import tqdm
import copy
class ContinualFinetuning(OnlineDebuggingMethod):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "simple_cl"
def _check_debugger_args(self):
required_atts = ["weight_decay",
"learning_rate",
"adam_epsilon",
"warmup_steps",
"total_steps",
"num_epochs",
"gradient_accumulation_steps",
"max_grad_norm",
"diff_loss_weight"]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
return
def load_base_model(self, base_model_args, mode="online_debug"):
self.base_model_args = base_model_args
model_type, base_model_path = base_model_args.model_type, base_model_args.base_model_path
self.logger.info(
f"Loading checkpoint from {base_model_path} for {model_type} .....")
self.base_model = MyBart.from_pretrained(model_type,
state_dict=convert_model_to_single_gpu(torch.load(base_model_path)))
self.logger.info(
f"Loading checkpoint from {base_model_path} for {model_type} ..... Done!")
if self.use_cuda:
self.base_model.to(torch.device("cuda"))
self.logger.info("Moving to the GPUs.")
if self.n_gpu > 1:
self.base_model = torch.nn.DataParallel(self.base_model)
def base_model_infer(self, eval_dataloader, verbose=False):
self.base_model.eval()
model = self.base_model if self.n_gpu == 1 else self.base_model.module
predictions = run_bart.inference(model, eval_dataloader, save_predictions=False, verbose=verbose,
logger=self.logger, return_all=False, predictions_only=True, args=Namespace(quiet=True))
return predictions
def data_formatter(self, bug_batch):
# The continual fine-tuning method only uses the correct answers for fixing bugs.
formatted_bug_batch = []
for bug in bug_batch:
# if "id" not in bug:
# _id = len(formatted_bug_batch)
_id = bug["id"]
_input = bug["input"]
# _mistake = bug["mistake"]
# TODO: only for now debugging.
if "truth" in bug:
_truth = bug["truth"] # a list of answers
else:
_truth = bug["output"] # a list of answers
formatted_bug_batch.append((_input, _truth, _id))
return formatted_bug_batch
def get_dataloader(self, bug_data_args, formatted_bug_batch, mode="both", is_training="self"):
# mini bug-batch size.
assert hasattr(bug_data_args, "train_batch_size")
assert hasattr(bug_data_args, "predict_batch_size")
train_bug_dataloader, eval_bug_dataloader = None, None
if mode == "both" or mode == "train":
# for error-fixing
train_bug_dataloader = GeneralDataset(self.logger, bug_data_args, None,
data_type="train", is_training=True,
task_name=bug_data_args.task_name,
given_data=formatted_bug_batch)
train_bug_dataloader.load_dataset(
self.tokenizer, skip_cache=True, quiet=True)
train_bug_dataloader.load_dataloader(is_training=is_training)
if mode == "both" or mode == "eval":
# for evaluation
eval_bug_dataloader = GeneralDataset(self.logger, bug_data_args, None,
data_type="dev", is_training=False,
task_name=bug_data_args.task_name,
given_data=formatted_bug_batch)
eval_bug_dataloader.load_dataset(
self.tokenizer, skip_cache=True, quiet=True)
eval_bug_dataloader.load_dataloader()
return train_bug_dataloader, eval_bug_dataloader
def reset_optimizer(self):
no_decay = ['bias', 'LayerNorm.weight']
self.optimizer_grouped_parameters = [
{'params': [p for n, p in self.base_model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self.debugger_args.weight_decay},
{'params': [p for n, p in self.base_model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
self.optimizer = AdamW(self.optimizer_grouped_parameters,
lr=self.debugger_args.learning_rate, eps=self.debugger_args.adam_epsilon)
# TODO: double check the decision about warm up for fine-tuning
self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=self.debugger_args.warmup_steps,
num_training_steps=self.debugger_args.total_steps)
self.logger.info(f"optimizer & scheduler Setup ...... Done!")
def debugger_setup(self, debugger_args):
self.debugger_args = debugger_args
self._check_debugger_args()
self.logger.info(f"Debugger Setup ......")
self.logger.info(f"debugger_args: {debugger_args} ......")
self.reset_optimizer()
self.logger.info(f"Debugger Setup ...... Done!")
return
def fix_bugs(self, bug_loader, quiet=True):
# bug_dataloader is from self.bug_loaders
self.base_model.train()
train_losses = []
global_step = 0
if self.debugger_args.diff_loss_weight > 0:
last_weights = copy.deepcopy(list(self.base_model.parameters()))
for epoch_id in range(int(self.debugger_args.num_epochs)):
for batch in tqdm(bug_loader.dataloader, desc=f"Bug-fixing Epoch {epoch_id}", disable=quiet):
global_step += 1
# here the batch is a mini batch of the current bug batch
if self.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = self.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
loss = self.base_model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
# For L2 norm
if self.debugger_args.diff_loss_weight > 0:
diff_loss = torch.Tensor([0]).to("cuda" if torch.cuda.is_available() else "cpu")
# Iterate over base_weights and curr_weights and accumulate the euclidean norm
# of their differences
curr_weights = list(self.base_model.parameters())
for base_param, curr_param in zip(last_weights, curr_weights):
diff_loss += (curr_param - base_param).pow(2).sum()
# self.logger.info(f"loss={loss}; diff_loss={diff_loss}; l2w={self.debugger_args.diff_loss_weight}")
loss = loss + self.debugger_args.diff_loss_weight * diff_loss
train_losses.append(loss.detach().cpu())
loss.backward()
self.model_update_steps += 1
if global_step % self.debugger_args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
self.base_model.parameters(), self.debugger_args.max_grad_norm)
self.optimizer.step() # We have accumulated enough gradients
self.scheduler.step()
self.base_model.zero_grad()
# last_weights = copy.deepcopy(list(self.base_model.parameters())) # update the last weights
if self.debugger_args.diff_loss_weight > 0:
del last_weights
return
|
CMR-main
|
cmr/debug_algs/cl_simple_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
from altair.vegalite.v4.api import value
import numpy as np
import sys
import os
from numpy.lib.function_base import median
def get_prefix(filepath):
return filepath.split("/")[2].replace("_offline_eval","").replace("nq_dev_", "")[5:]
def eval_forgetting(online_debug_result, timecodes):
pass_forgetting_data = []
# Eval the forggeting issue.
em_on_passes = []
f1_on_passes = []
for timecode in timecodes:
item = online_debug_result[str(timecode)]
r = item["eval_results_overall_forget"]["metric_results"]
em_on_passes.append(r["EM"])
f1_on_passes.append(r["QA-F1"])
worse = np.min(em_on_passes)
mean = np.mean(em_on_passes)
# median = np.median(em_on_passes)
final = em_on_passes[-1]
# print(f"Forgetting measure (EM): worse={worse}; mean={mean}; final={final}")
return worse, mean, final
def eval_error_fixing(online_debug_result, timecodes):
final_state_bug_fixing_rate = online_debug_result[str(timecodes[-1])]["eval_results_overall_bug"]["metric_results"]["EM"]
bug_fixing_rates = [online_debug_result[str(t)]["eval_results_overall_bug"]["metric_results"]["EM"] for t in timecodes]
inter_prefix_efr = []
inter_respon_efr = []
bsz = 20
odr = online_debug_result
# TODO: add these back later.
# for timecode, ((before, after), em_fixed, f1_fixed, em_prefixed, f1_prefixed) in \
# enumerate(zip(odr["res_on_bugs"], odr["em_fixed_bugs"], odr["f1_fixed_bugs"], odr["em_prefixed_bugs"], odr["f1_prefixed_bugs"])):
# inter_prefix_efr.append(len(em_prefixed)/bsz)
# inter_respon_efr.append(len(em_fixed)/(bsz-len(em_prefixed)))
# mean_ip_efr = np.mean(inter_prefix_efr)
# mean_ir_efr = np.mean(inter_respon_efr)
# print(f"Bug-Fixing measure (EM): final_state_bug_fixing_rate={final_state_bug_fixing_rate};")
# print(f"Bug-Fixing measure (EM): mean_ip_efr={mean_ip_efr}; mean_ir_efr={mean_ir_efr};")
mean_ip_efr, mean_ir_efr = 0, 0
best_efr = np.max(bug_fixing_rates)
mean_efr = np.mean(bug_fixing_rates)
return final_state_bug_fixing_rate, best_efr, mean_efr
def print_eval(path="bug_data/output/nq_dev_0625_1e-5_e3_result.json"):
# Load the json data
lr = path.split("_")[-5]
num_epoch = path.split("_")[-4][1:]
prefix = get_prefix(path)
assert os.path.exists(path)
all_results = json.load(open(path))
# print(output_info.keys())
# online_debug_results = output_info["online_debug_results"]
timecodes = [int(t) for t in list(all_results.keys())]
timecodes = sorted(timecodes, reverse=False)
worse_kr, mean_kr, final_kr = eval_forgetting(all_results, timecodes)
final_efr, best_efr, mean_efr = eval_error_fixing(all_results, timecodes)
final_f1 = 2*(final_kr*final_efr)/(final_kr+final_efr)
mean_f1 = 2*(mean_kr*mean_efr)/(mean_kr+mean_efr)
print(f"{prefix}, {worse_kr}, {mean_kr}, {final_kr}, {best_efr}, {mean_efr}, {final_efr}, {mean_f1} , {final_f1}")
def aggregate_offline_results(path="bug_data/output/nq_dev_0701_v2_offline_eval/"):
import glob
alltime_results = {}
for thread_res_path in sorted(glob.glob(f"{path}/thread_*.json")):
with open(thread_res_path) as f:
thread_res = json.load(f)
# for key, values in single_res.items():
# if key not in alltime_results:
# alltime_results[key] = []
# alltime_results[key] += values
alltime_results.update(thread_res)
with open(f"{path}/alltime_result.json", "w") as f:
json.dump(alltime_results, f)
if __name__ == '__main__':
# aggregate_offline_results("bug_data/output/nq_dev_0706_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0706_3e-5_e3_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0706_1e-5_e3_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0706_1e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l0.5_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l5_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l50_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l500_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l5000_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l50000_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_withup_l500_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_withup_l5000_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0709_simplereplay_rsz30_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0709_simplereplay_rsz10_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0709_simplereplay_rsz100_3e-5_e5_offline_eval")
aggregate_offline_results("bug_data/output/nq_dev_0716_mbpapp_rsz32_rf30_3e-5_e5_offline_eval")
aggregate_offline_results("bug_data/output/nq_dev_0716v1_mbpapp_rsz32_rf30_3e-5_e5_woadapt_offline_eval")
aggregate_offline_results("bug_data/output/nq_dev_0716_mbpa_3e-5_e5_offline_eval")
print("{prefix}, {worse_kr}, {mean_kr}, {final_kr}, {best_efr}, {mean_efr}, {final_efr}, {mean_f1}, {final_f1}")
print_eval("bug_data/output/nq_dev_0706_1e-5_e3_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0706_3e-5_e3_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0706_1e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0706_3e-5_e5_offline_eval/alltime_result.json")
print("-"*50)
print_eval("bug_data/output/nq_dev_0708_ewc_l0.5_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_l5_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_l50_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_l500_g1_3e-5_e5_offline_eval/alltime_result.json") # the best
print_eval("bug_data/output/nq_dev_0708_ewc_l5000_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_l50000_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_withup_l500_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_withup_l5000_g1_3e-5_e5_offline_eval/alltime_result.json")
print("-"*50)
print_eval("bug_data/output/nq_dev_0709_simplereplay_rsz10_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0709_simplereplay_rsz30_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0709_simplereplay_rsz100_3e-5_e5_offline_eval/alltime_result.json")
print("-"*50)
print_eval("bug_data/output/nq_dev_0716_mbpapp_rsz32_rf30_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0716v1_mbpapp_rsz32_rf30_3e-5_e5_woadapt_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0716_mbpa_3e-5_e5_offline_eval/alltime_result.json")
|
CMR-main
|
cmr/debug_algs/evaluation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
import argparse
from torch import detach
from cmr.models.utils import set_seeds
from cmr.debug_algs.cl_none import NoneCL, OfflineCL
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from cmr.debug_algs.cl_online_ewc_alg import OnlineEWC
from cmr.debug_algs.offline_debug_bounds import OfflineDebugger
from cmr.debug_algs.cl_mbcl_alg import MemoryBasedCL
from cmr.debug_algs.index_based.cl_indexed_alg import IndexBasedCL
from cmr.debug_algs.cl_hypernet_alg import HyperCL
from cmr.debug_algs.distant_supervision import data_collection
import logging
import os
import json
from tqdm import tqdm
import numpy as np
import wandb
class TqdmHandler(logging.Handler):
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg) # , file=sys.stderr)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def setup_args(args):
set_seeds(args.seed)
prefix = args.prefix
log_filename = f"logs/{prefix}_online_debug.log"
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(log_filename),
logging.StreamHandler(), TqdmHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
if args.cl_method_name == "none_cl":
debugging_alg = NoneCL(logger=logger)
elif args.cl_method_name == "offline_cl":
debugging_alg = OfflineCL(logger=logger)
elif args.cl_method_name == "simple_cl":
debugging_alg = ContinualFinetuning(logger=logger)
elif args.cl_method_name == "online_ewc":
debugging_alg = OnlineEWC(logger=logger)
elif args.cl_method_name == "offline_debug":
debugging_alg = OfflineDebugger(logger=logger)
elif args.cl_method_name in ["er", "mir"]: # replay only
assert args.replay_frequency > 0
assert args.replay_size > 0
if args.cl_method_name == "mir":
args.use_mir = True
assert args.replay_candidate_size >= args.replay_size
assert args.num_adapt_epochs >= 1 # this is for the virtual update
else:
assert args.num_adapt_epochs <= 0
debugging_alg = MemoryBasedCL(logger=logger)
debugging_alg.name = args.cl_method_name
elif args.cl_method_name == "mbpa":
assert args.num_adapt_epochs > 0
assert args.replay_frequency <= 0
assert args.replay_size <= 0
debugging_alg = MemoryBasedCL(logger=logger)
debugging_alg.name = args.cl_method_name
elif args.cl_method_name == "mbpa++":
assert args.num_adapt_epochs > 0
assert args.replay_frequency > 0
assert args.replay_size > 0
debugging_alg = MemoryBasedCL(logger=logger)
debugging_alg.name = args.cl_method_name
elif args.cl_method_name == "index_cl":
assert args.replay_frequency > 0
assert args.replay_size > 0
assert args.num_adapt_epochs <= 0
debugging_alg = IndexBasedCL(logger=logger)
debugging_alg.name = args.cl_method_name
elif args.cl_method_name == "hyper_cl":
debugging_alg = HyperCL(logger=logger)
elif args.cl_method_name == "simple_ds_mine":
debugging_alg = data_collection.MiningSupervision(logger=logger)
data_args = Namespace(
submission_stream_data=args.submission_stream_data,
stream_id=args.stream_id,
upstream_eval_data=args.upstream_eval_data,
heldout_submission_data=args.heldout_submission_data,
upstream_data_path=args.upstream_data_path,
# sampled_upstream_json_path=args.sampled_upstream_json_path,
# pass_sample_size=args.pass_sample_size,
do_lowercase=args.do_lowercase,
append_another_bos=args.append_another_bos,
max_input_length=args.max_input_length,
max_output_length=args.max_output_length,
task_name=args.task_name,
result_file=args.result_file,
train_batch_size=args.train_batch_size,
predict_batch_size=args.predict_batch_size,
num_beams=args.num_beams,
max_timecode=args.max_timecode,
accumulate_eval_freq=-1,
# use_sampled_upstream=args.use_sampled_upstream,
)
base_model_args = Namespace(
model_type=args.base_model_type,
base_model_path=args.base_model_path
)
if args.cl_method_name in ["none_cl", "offline_cl", "simple_cl", "online_ewc", "er", "mir", "mbpa", "mbpa++", "index_cl", "hyper_cl", "simple_ds_mine"]:
debugger_args = Namespace(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
adam_epsilon=args.adam_epsilon,
warmup_steps=0,
total_steps=10000,
num_epochs=args.num_train_epochs,
gradient_accumulation_steps=args.gradient_accumulation_steps,
max_grad_norm=args.max_grad_norm,
diff_loss_weight=args.diff_loss_weight,
save_ckpt_freq=args.save_ckpt_freq,
ckpt_dir=args.ckpt_dir,
skip_instant_eval=args.skip_instant_eval,
kr_eval_freq=args.kr_eval_freq,
kr_eval_mode=args.kr_eval_mode,
okr_sample_size=args.okr_sample_size,
okr_sample_seed=args.okr_sample_seed,
kg_eval_freq=args.kg_eval_freq,
kg_eval_mode=args.kg_eval_mode,
)
if args.cl_method_name == "online_ewc":
setattr(debugger_args, "ewc_lambda", args.ewc_lambda)
setattr(debugger_args, "ewc_gamma", args.ewc_gamma)
elif args.cl_method_name in ["er", "mbpa", "mbpa++", "mir", "index_cl"]:
setattr(debugger_args, "use_replay_mix", args.use_replay_mix)
setattr(debugger_args, "replay_size", args.replay_size)
setattr(debugger_args, "replay_candidate_size", args.replay_candidate_size)
setattr(debugger_args, "replay_frequency", args.replay_frequency)
setattr(debugger_args, "memory_path", args.memory_path)
setattr(debugger_args, "init_memory_cache_path", args.init_memory_cache_path)
setattr(debugger_args, "memory_key_encoder", args.memory_key_encoder)
setattr(debugger_args, "memory_store_rate", args.memory_store_rate)
setattr(debugger_args, "upstream_sample_ratio", args.upstream_sample_ratio)
setattr(debugger_args, "num_adapt_epochs", args.num_adapt_epochs)
setattr(debugger_args, "inference_query_size", args.inference_query_size)
setattr(debugger_args, "local_adapt_lr", args.local_adapt_lr)
if args.cl_method_name == "mir" or args.use_mir:
setattr(debugger_args, "mir_abalation_args", args.mir_abalation_args)
if args.cl_method_name == "index_cl":
setattr(debugger_args, "use_mir", args.use_mir)
setattr(debugger_args, "index_rank_method", args.index_rank_method)
setattr(debugger_args, "indexing_method", args.indexing_method)
setattr(debugger_args, "indexing_args_path", args.indexing_args_path)
elif args.cl_method_name in ["hyper_cl"]:
setattr(debugger_args, "adapter_dim", args.adapter_dim)
setattr(debugger_args, "example_encoder_name", args.example_encoder_name)
setattr(debugger_args, "task_emb_dim", args.task_emb_dim)
return debugging_alg, data_args, base_model_args, debugger_args, logger
def run(args):
debugging_alg, data_args, base_model_args, debugger_args, logger = setup_args(args)
# The Online Debugging Mode + Computing offline debugging bounds.
# setattr(data_args, "data_stream_json_path", args.data_stream_json_path)
# setattr(data_args, "replay_stream_json_path", args.replay_stream_json_path)
debugging_alg.load_data(data_args)
debugging_alg.load_base_model(base_model_args)
debugging_alg.debugger_setup(debugger_args)
debugging_alg.online_debug()
# logger.info(f'output_info["final_eval_results"]={output_info["final_eval_results"]}')
debugging_alg.save_result_file()
logger.info(f"Finished. Results saved to {args.result_file}")
return
def get_cli_parser():
parser = argparse.ArgumentParser()
# base_model_args
parser.add_argument("--base_model_type",
default="facebook/bart-base", required=False)
parser.add_argument(
"--base_model_path",
default="out/mrqa_squad_bart-base_1029_upstream_model/best-model.pt", type=str)
# data_args
parser.add_argument("--submission_stream_data",
default="/path/to/submission_stream")
# this will be used for evaluating forgetting
parser.add_argument("--upstream_eval_data",
default="experiments/eval_data/qa/upstream_eval.v1.jsonl")
parser.add_argument("--heldout_submission_data",
default="experiments/eval_data/qa/heldout_eval.v1.json")
parser.add_argument("--upstream_data_path",
default="data/mrqa_squad/mrqa_squad_train.jsonl")
# default="bug_data/mrqa_naturalquestions.sampled_upstream.jsonl")
parser.add_argument("--task_name", default="mrqa")
# base model args.
parser.add_argument('--train_batch_size', type=int, default=8)
parser.add_argument('--predict_batch_size', type=int, default=16)
parser.add_argument('--num_beams', type=int, default=3)
parser.add_argument("--do_lowercase", action='store_true', default=False)
parser.add_argument("--freeze_embeds", action='store_true', default=False)
parser.add_argument('--max_input_length', type=int, default=888)
parser.add_argument('--max_output_length', type=int, default=50)
parser.add_argument("--append_another_bos", type=int,
default=1) # should be true (1)
# evalaution related
parser.add_argument('--skip_instant_eval', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--use_wandb', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--kr_eval_freq', type=int, default=5)
parser.add_argument('--kr_eval_mode', default="loss") # loss or metric
parser.add_argument('--okr_sample_size', type=int, default=512)
parser.add_argument('--okr_sample_seed', type=int, default=1337)
parser.add_argument('--kg_eval_freq', type=int, default=5)
parser.add_argument('--kg_eval_mode', default="loss") # loss or metric
# feiw-benchmark
# debugger_args
parser.add_argument('--cl_method_name', type=str, default="none_cl",
help="the method name of the continual learning method")
### The HPs for Simple Continual Fine-tuning Method. ###
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=0.1, type=float,
help="Max gradient norm.")
parser.add_argument("--diff_loss_weight", default=0, type=float,
help="For L2 reg")
parser.add_argument("--gradient_accumulation_steps", default=1, type=int,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
### The HPs for Online EWC Method. ###
parser.add_argument("--ewc_lambda", default=0.5, type=float,
help="Max gradient norm.")
parser.add_argument("--ewc_gamma", default=1, type=float,
help="Max gradient norm.")
# parser.add_argument("--use_sampled_upstream", action='store_true', default=False)
### The HPs for replay-based methods and memory-based.
parser.add_argument('--replay_size', type=int, default=8)
parser.add_argument('--replay_candidate_size', type=int, default=8)
parser.add_argument('--replay_frequency', type=int, default=1) # 1 means always replay for every steps, set to 10 means sample after 10 model updates.
parser.add_argument('--memory_key_encoder', type=str, default="facebook/bart-base")
parser.add_argument('--memory_path', type=str, default="")
parser.add_argument('--init_memory_cache_path', type=str, default="bug_data/memory_key_cache.pkl")
parser.add_argument('--upstream_sample_ratio', type=float, default=-1) #
parser.add_argument('--memory_store_rate', type=float, default=1.0) # 1= always store all examples to the memory.
parser.add_argument('--num_adapt_epochs', type=int, default=1) #
parser.add_argument('--inference_query_size', type=int, default=1) #
parser.add_argument("--use_replay_mix", action='store_true', default=False) # mix the replayed examples with the current error examples.
parser.add_argument('--local_adapt_lr', type=float, default=1e-5) #
# MIR ablation options
parser.add_argument('--mir_abalation_args', type=str, default="none")
# Indexbased CL abalation options
parser.add_argument('--use_mir', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--index_rank_method', type=str, default="most_similar")
parser.add_argument('--indexing_method', type=str, default="bart_index") # bart_index, biencoder
parser.add_argument('--indexing_args_path', type=str, default="exp_results/supervision_data/1012_dm_simple.train_args.json") # bart_index, biencoder
### The HPs for HyperCL
parser.add_argument('--adapter_dim', type=int, default=32) # 1 means always replay for every steps, set to 10 means sample after 10 model updates.
parser.add_argument('--example_encoder_name', type=str, default="roberta-base")
parser.add_argument('--task_emb_dim', type=int, default=768)
### The HPs for offline
parser.add_argument('--offline_retrain_upstream', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
# To save all ckpts.
# I/O parameters
parser.add_argument('--prefix', type=str, default="",
help="Prefix for saving predictions")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--stream_id', type=int, default=0,
help="multiple_streams")
parser.add_argument(
"--result_file", default="bug_data/results.json", type=str)
parser.add_argument("--ckpt_dir", type=str, default="experiments/ckpt_dirs/qa/nonecl",
help="path to all ckpts for saving")
parser.add_argument("--save_ckpt_freq", type=int, default=5, # 0 means no save for the intermidiate . but we always save the final model ckpt.
help="set to 1 if we want all ckpts and eval offline")
# Offline Evaluation Mode in Parallel
parser.add_argument("--num_threads_eval", type=int, default=0,
help="0 means nothing; >0 means the number of gpu threads")
parser.add_argument("--current_thread_id", type=int,
help="0 to num_threads_eval-1")
parser.add_argument("--max_timecode", default=-1, type=int,
help="the maximum timecode to eval")
parser.add_argument("--path_to_thread_result", type=str,
help="the path to save the thread results")
return parser
if __name__ == '__main__':
args = get_cli_parser().parse_args()
if args.use_wandb:
wandb_mode = "online"
else:
wandb_mode = "disabled"
wandb_run = wandb.init(reinit=True, project="error-nlp", mode=wandb_mode, settings=wandb.Settings(start_method="fork"), name=args.prefix)
run_name = wandb.run.name
wandb.config.update(args)
run(args)
|
CMR-main
|
cmr/debug_algs/run_lifelong_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from cmr.debug_algs.cl_utils import get_top_interfered_examples, local_adaptation, KeyValueMemoryModule
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import random
import numpy as np
import torch
import transformers
from cmr.debug_algs.index_based.index_manager import RandomMemoryManger
from cmr.task_manager.eval_metrics import evaluate_func
import copy
import pickle
import os
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from argparse import Namespace
import more_itertools
import json
class MemoryBasedCL(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "tbd" # can be er/mbpa/mbpa++
self.upstream_memory_examples = []
def load_data(self, data_args, given_data_stream=None):
super().load_data(data_args, given_data_stream=given_data_stream)
with open(data_args.upstream_data_path) as f:
upstream_memory_examples = [json.loads(line)for line in set(f.read().splitlines())]
self.upstream_memory_examples = self.upstream_data_formatter(upstream_memory_examples)
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = [
"replay_size",
"replay_candidate_size",
"replay_frequency",
"memory_key_encoder", # 'bert-base-uncased' by default
"memory_store_rate", # 0, 0.1, 1 etc.
"upstream_sample_ratio",
"memory_path", # to save/load the memory module from disk
"init_memory_cache_path",
"num_adapt_epochs",
"inference_query_size",
"local_adapt_lr",
"use_replay_mix",
]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
def debugger_setup(self, debugger_args):
super().debugger_setup(debugger_args)
# Initializing the Key-Value memory module for MBPA++
if self.name in ["er", "mir"]:
self.upstream_memroy_module = RandomMemoryManger(self.logger)
self.memroy_module = RandomMemoryManger(self.logger)
self.logger.info("Prepare the sampled upstream data as the initial memory for the ER and MIR;")
# upstream possible
self.upstream_memroy_module.set_up_initial_memory(formatted_examples=self.upstream_memory_examples)
if self.debugger_args.upstream_sample_ratio < 0:
# mix
self.memroy_module = self.upstream_memroy_module
self.logger.info(f"Initial memroy_module size: {self.memroy_module.get_memory_size()}")
self.logger.info(f"Initial upstream_memroy_module size: {self.upstream_memroy_module.get_memory_size()}")
elif self.name in ["mbpa", "mbpa++"]:
# TODO: prepare the Memory module for it
pass
return
def online_debug(self):
self.logger.info("Start Online Debugging with Dynamic Error Mode")
self.logger.info(f"Number of Batches of Data: {self.num_data_batches}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
if self.debugger_args.save_ckpt_freq:
# save the initial model as the 0-th model.
self._save_base_model()
last_steps = 0
for data_eval_loader in tqdm(self.data_eval_loaders, desc="Online Debugging (with Memory Replay)"):
result_dict = {"timecode": self.timecode} # start with 0
self.eval_knowledge_retention(result_dict)
self.eval_knowledge_generalization(result_dict)
############### CORE ###############
# self._replay_based_eval(result_dict)
formatted_bug_examples = self._get_dynamic_errors(
data_eval_loader, result_dict, return_raw_bug_examples=True)
_, bug_eval_loader = self.get_dataloader(self.data_args, formatted_bug_batch=formatted_bug_examples, mode="eval")
examples_to_train = formatted_bug_examples[:]
if self.timecode % self.debugger_args.replay_frequency == 0 \
and self.debugger_args.replay_frequency > 0 and self.debugger_args.replay_size > 0 \
and self.timecode > 0:
# sparse experience replay
self.logger.info("Triggering Sampling from Memory and starting to replay.")
self.logger.info(f"Current memroy_module size: {self.memroy_module.get_memory_size()}.")
self.logger.info(f"Current upstream_memroy_module size: {self.upstream_memroy_module.get_memory_size()}.")
if self.name == "mir":
def mir_retrieve(mm, sample_size):
assert self.debugger_args.replay_candidate_size >= self.debugger_args.replay_size
retrieved_examples_candidates = mm.retrieve_from_memory(
sample_size=min(self.debugger_args.replay_candidate_size, mm.get_memory_size()))
if "mir_buffer_ids" not in result_dict:
result_dict["mir_buffer_ids"] = []
result_dict["mir_buffer_ids"] += [_id for (_input, _truth, _id) in retrieved_examples_candidates]
retrieved_examples = get_top_interfered_examples(self,
K=sample_size, candidate_examples=retrieved_examples_candidates, query_data_loader=bug_train_loader)
return retrieved_examples
# self.logger.info(f"retrieved_examples (mir)={retrieved_examples}")
if self.debugger_args.upstream_sample_ratio > 0:
upstream_sample_budget = int(self.debugger_args.upstream_sample_ratio * self.debugger_args.replay_size)
self.logger.info(f"Memory from upstream_memroy_module = {upstream_sample_budget}; ")
self.logger.info(f"Memory from memroy_module = {self.debugger_args.replay_size-upstream_sample_budget}; ")
retrieved_examples = []
if upstream_sample_budget > 0:
retrieved_examples += mir_retrieve(mm=self.upstream_memroy_module,
sample_size=upstream_sample_budget)
retrieved_examples += mir_retrieve(mm=self.memroy_module,
sample_size=self.debugger_args.replay_size-upstream_sample_budget)
else:
retrieved_examples = mir_retrieve(mm=self.memroy_module,
sample_size=self.debugger_args.replay_size)
else:
if self.debugger_args.upstream_sample_ratio > 0:
upstream_sample_budget = int(self.debugger_args.upstream_sample_ratio * self.debugger_args.replay_size)
self.logger.info(f"Memory from upstream_memroy_module = {upstream_sample_budget}; ")
self.logger.info(f"Memory from memroy_module = {self.debugger_args.replay_size-upstream_sample_budget}; ")
retrieved_examples = []
if upstream_sample_budget > 0:
retrieved_examples += self.upstream_memroy_module.retrieve_from_memory(
sample_size=upstream_sample_budget)
retrieved_examples += self.memroy_module.retrieve_from_memory(
sample_size=self.debugger_args.replay_size-upstream_sample_budget)
else:
retrieved_examples = self.memroy_module.retrieve_from_memory(
sample_size=self.debugger_args.replay_size)
self.base_model.train()
result_dict["retrieved_ids"] = [_id for (_input, _truth, _id) in retrieved_examples]
if self.debugger_args.use_replay_mix:
examples_to_train += retrieved_examples
self.logger.info(f"Mixed the retrieved examples (len={len(retrieved_examples)}) to the current batch for training.")
else:
self.logger.info(f"Replay-Training Start! Using the retrieved examples (len={len(retrieved_examples)}) ")
replay_data_loader, _ = self.get_dataloader(
self.data_args, retrieved_examples, mode="train")
self.fix_bugs(replay_data_loader, quiet=False) # sparse replay
self.logger.info("Replay-Training done.")
last_steps = self.model_update_steps
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start error-fixing (len(examples_to_train)={len(examples_to_train)}) .... Timecode: {self.timecode}")
bug_train_loader, _ = self.get_dataloader(
self.data_args, examples_to_train, mode="train")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start error-fixing .... Done!")
# Store to memory
_max = 1000000
# flag_store_examples = bool(random.randrange(0, _max)/_max >=
# 1 - self.debugger_args.memory_store_rate)
flag_store_examples = True
if flag_store_examples:
self.logger.info(f"Saving the current error examples (len={len(formatted_bug_examples)}) to the memory.")
self.logger.info(f"Current memory size: {self.memroy_module.get_memory_size()}.")
self.memroy_module.store_examples(formatted_bug_examples)
self.logger.info(".................. Done.")
############### CORE ###############
self.evaluate_error_fixing(result_dict, bug_eval_loader)
self._update_result_dict(result_dict)
if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
self._save_base_model()
self.save_result_file()
self.logger.info("-"*50)
self.timecode += 1
#### Final evaluation ####
self.final_evaluation()
#### Save the final model ####
self._save_base_model()
# Save to path
self.memroy_module.save_memory_to_path(self.debugger_args.memory_path)
def evaluate(self, eval_dataloader=None, verbose=False):
"""Evaluates the performance"""
if self.name not in ["mbpa", "mbpa++"]:
# ER (no local adpatation).
# This is for the equvilent version of the replay as the baseline (MbPA++ w/o local adaptation when inference or just simple replay.)
return super().evaluate(eval_dataloader, verbose)
if not eval_dataloader:
eval_dataloader = self.submission_eval_loaders[self.timecode]
# TODO: reset the bsz for the local adaptation.
# prepare adapt_dataloaders
adapt_dataloaders = self.get_adapt_dataloaders(eval_dataloader, verbose=True)
predictions = self.base_model_infer_with_adaptation(
eval_dataloader, adapt_dataloaders, verbose)
assert len(predictions) == len(eval_dataloader)
predictions = [p.strip() for p in predictions]
results, return_all = evaluate_func(
predictions, eval_dataloader.data, self.metric, return_all=True)
return predictions, results, return_all
### The Adapatation Related Functions ###
def get_adapt_dataloaders(self, eval_dataloader=None, verbose=False):
"""Get the adapt_dataloader."""
adapt_dataloaders = []
num_batches = len(eval_dataloader.dataloader)
example_batches = np.array_split(eval_dataloader.data, num_batches)
# Only allow retrieving from the past memory. (due to offline evaluation)
past_memory_keys = []
for key, values in self.memroy_module.memory.items():
if values[3]-1 <= self.timecode:
past_memory_keys.append(key)
if not past_memory_keys:
adapt_dataloaders = [None for _ in range(len(example_batches))]
return adapt_dataloaders
past_memory_keys = np.frombuffer(np.asarray(
past_memory_keys), dtype=np.float32).reshape(len(past_memory_keys), -1)
for example_batch in tqdm(example_batches, desc="Retrieving Data from Memory", disable=not verbose):
# self.logger.info("Memory Retrieving ...")
# local adaptation for self.base_model of retrieved examples from memory.
# self.logger.info("Encoding the examples to evaluate...")
keys = self.memroy_module.encode_examples(example_batch)
# self.logger.info("Reading memory to get the KNN examples for local adaptation...")
retrieved_examples = self.memroy_module.query_examples(
keys, past_memory_keys, k=self.debugger_args.inference_query_size)
replay_data_loader, _ = self.get_dataloader(
self.data_args, retrieved_examples, mode="train")
adapt_dataloaders.append(replay_data_loader)
# self.logger.info("Memory Retrieving Done ...")
return adapt_dataloaders
def base_model_infer_with_adaptation(self, eval_dataloader, adapt_dataloaders, verbose=False):
self.base_model.eval()
model = self.base_model if self.n_gpu == 1 else self.base_model.module
predictions = self.inference_with_adaptation(model, eval_dataloader, adapt_dataloaders, save_predictions=False,
verbose=verbose, logger=self.logger, return_all=False, predictions_only=True, args=Namespace(quiet=True))
return predictions
def inference_with_adaptation(self, model, dev_data, adapt_dataloaders, save_predictions=False, verbose=False, args=None, logger=None, return_all=False, predictions_only=False):
# model.eval()
predictions = []
bos_token_id = dev_data.tokenizer.bos_token_id
loss = [] # if needed
if args:
quiet = args.quiet
else:
quiet = False
if not quiet:
logger.info("Starting inference ...")
current_index = 0
for batch in tqdm(dev_data.dataloader, desc="Inference", disable=not verbose):
### Local Adaptation: Start ###
_model = copy.deepcopy(model)
adapt_dataloader = adapt_dataloaders[current_index]
if adapt_dataloader:
# TODO: debug. deactivate this step? then it should be the same as ER.
_model = local_adaptation(self, _model, adapt_dataloader)
pass
### Local Adaptation: End ###
_model.eval()
### Inference: Start ###
if torch.cuda.is_available():
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = dev_data.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(batch[0], pad_token_id, batch[1])
outputs = _model.generate(input_ids=batch[0],
attention_mask=batch[1],
num_beams=dev_data.args.num_beams,
max_length=dev_data.args.max_output_length,
decoder_start_token_id=_model.config.bos_token_id,
early_stopping=dev_data.gen_early_stop,)
for input_, output in zip(batch[0], outputs):
pred = dev_data.decode(output)
predictions.append(pred)
### Inference: End ###
current_index += 1
del _model
if not quiet:
logger.info("Starting inference ... Done")
if predictions_only:
return predictions
if save_predictions:
dev_data.save_predictions(predictions, )
# logger.info("Starting evaluation metric ...")
result = dev_data.evaluate(predictions, verbose=verbose)
# logger.info("Starting evaluation metric ... Done!")
if return_all:
return predictions, result, loss
return result
|
CMR-main
|
cmr/debug_algs/cl_mbcl_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import random
import copy
from cmr.models.mybart import MyBart
from cmr.models import run_bart
import torch
import transformers
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
import more_itertools
import pickle
import numpy as np
def get_virtual_updated_model(cl_trainer, query_data_loader):
before_model = copy.deepcopy(cl_trainer.base_model)
virtual_adapt_args = copy.deepcopy(cl_trainer.data_args)
virtual_adapt_args.train_batch_size = 4
# change the batch size for the training.
query_data_loader, _ = cl_trainer.get_dataloader(virtual_adapt_args, query_data_loader.data, mode="train") # fix of the order
after_model = local_adaptation(cl_trainer, before_model, query_data_loader, diff_loss_weight=0)
del before_model
return after_model
def get_top_interfered_examples(cl_trainer, K, candidate_examples, query_data_loader):
"""
This is for the MIR method.
1) use query examples to train current_model for getting a virtual model.
2) test the current_model and the virtual model seperately on the candidate examples
3) compare the loss udpate of each example and rank them by the delta.
4) return the top K examples with the largest positive loss changes.
"""
# assert cl_trainer.name == "mir"
cl_trainer.logger.info(
f"get_top_interfered_examples: len(candidate_examples)={len(candidate_examples)};")
if cl_trainer.debugger_args.mir_abalation_args == "random":
cl_trainer.logger.info(f"ablation mode: randomly sample {K} examples from the candidate_examples")
random.shuffle(candidate_examples)
return candidate_examples[:K]
##################### Prepare the candidate examples as Memory Buffer #####################
mlr_data_args = copy.deepcopy(cl_trainer.data_args)
mlr_data_args.predict_batch_size = 8 # to get the loss for each example # TODO: debug_MIR
# TODO: give the same random seed for selecting the same answer (if there are multiple answers)
# only keep one possible correct answers for computing the loss consistnetly.
candidate_examples_single_ans = _keep_first_answer(candidate_examples)
memory_buffer_loader, _ = cl_trainer.get_dataloader(
mlr_data_args, candidate_examples_single_ans, mode="train", is_training=False) # fix of the order
##################### End #####################
before_model = copy.deepcopy(cl_trainer.base_model)
before_losses = run_bart.inference(
before_model, memory_buffer_loader, compute_loss=True, loss_only=True, logger=cl_trainer.logger)
if cl_trainer.debugger_args.mir_abalation_args == "largest_beforeloss":
after_losses = before_losses
else:
# virtual udpate
virtual_adapt_args = copy.deepcopy(cl_trainer.data_args)
virtual_adapt_args.train_batch_size = 4
# change the batch size for the training.
query_data_loader, _ = cl_trainer.get_dataloader(virtual_adapt_args, query_data_loader.data, mode="train") # fix of the order
after_model = local_adaptation(cl_trainer, before_model, query_data_loader, diff_loss_weight=0)
after_losses = run_bart.inference(
after_model, memory_buffer_loader, compute_loss=True, loss_only=True, logger=cl_trainer.logger)
# cl_trainer.logger.info(
# f"len(before_losses)={len(before_losses)}; len(after_losses)={len(after_losses)};")
assert len(before_losses) == len(after_losses) == len(candidate_examples)
# cl_trainer.logger.info(f"candidate_examples IDs: {[x[2] for x in candidate_examples]}")
# it's a virtual update and we need to recover it.
# del cl_trainer.base_model
# del after_model
# cl_trainer.base_model = before_model
interference_scores = []
for example, before_loss, after_loss in zip(candidate_examples, before_losses, after_losses):
if cl_trainer.debugger_args.mir_abalation_args == "largest_afterloss":
loss_delta = after_loss # only for debugging MIR; biggest losers afterwards
elif cl_trainer.debugger_args.mir_abalation_args == "largest_beforeloss":
loss_delta = before_loss
else:
# standard MIR
loss_delta = after_loss - before_loss
interference_scores.append((example, loss_delta))
# cl_trainer.logger.info(f"before_losses={before_losses}")
# cl_trainer.logger.info(f"after_losses={after_losses}")
# cl_trainer.logger.info(f"interference_scores={[x[1] for x in interference_scores]}")
interference_scores.sort(key=lambda x: x[1], reverse=True)
if cl_trainer.debugger_args.mir_abalation_args == "reverse":
interference_scores.reverse() # only for debugging MIR. it's actually reverse=Yes
top_K_examples = [x[0] for x in interference_scores][:K]
# cl_trainer.logger.info(f"retrieved candidates ids = {[x[2] for x in top_K_examples]}")
del before_model
del before_losses
del after_model
del after_losses
del memory_buffer_loader
return top_K_examples
def local_adaptation(cl_trainer, model, adapt_dataloader, diff_loss_weight=1e-3):
pad_token_id = cl_trainer.tokenizer.pad_token_id
base_weights = list(cl_trainer.base_model.parameters())
curr_weights = list(model.parameters())
global_step = 0
pad_token_id = cl_trainer.tokenizer.pad_token_id
# super().debugger_setup(cl_trainer.debugger_args) # reset the optimizier and schduler
model.train()
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': cl_trainer.debugger_args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=cl_trainer.debugger_args.local_adapt_lr, eps=cl_trainer.debugger_args.adam_epsilon)
# TODO: double check the decision about warm up for fine-tuning
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=cl_trainer.debugger_args.warmup_steps,
num_training_steps=cl_trainer.debugger_args.total_steps)
for epoch_id in range(int(cl_trainer.debugger_args.num_adapt_epochs)):
for batch in tqdm(adapt_dataloader.dataloader, desc=f"Local Adaptation Epoch {epoch_id}", disable=False):
global_step += 1
if cl_trainer.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
# this is the task loss w/o any regularization
loss = model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if cl_trainer.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if diff_loss_weight != 0:
diff_loss = torch.Tensor([0]).to("cuda" if torch.cuda.is_available() else "cpu")
# Iterate over base_weights and curr_weights and accumulate the euclidean norm
# of their differences
for base_param, curr_param in zip(base_weights, curr_weights):
diff_loss += (curr_param - base_param).pow(2).sum()
loss = loss + diff_loss_weight * diff_loss
loss.backward()
if global_step % cl_trainer.debugger_args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), cl_trainer.debugger_args.max_grad_norm)
optimizer.step() # We have accumulated enough gradients
scheduler.step()
model.zero_grad()
return model
def _keep_first_answer(examples_with_multiple_ans):
examples_with_single_ans = []
for item in examples_with_multiple_ans:
examples_with_single_ans.append((item[0], item[1][0:1], item[2]))
return examples_with_single_ans
class KeyValueMemoryModule(object):
def __init__(self, logger):
self.logger = logger
self.memory = {}
self.keys_over_time = {}
self.memory_key_cache = {}
self.memory_key_encoder = ""
def load_key_encoder(self, memory_key_encoder='facebook/bart-base'):
# https://huggingface.co/transformers/model_doc/bart.html#bartmodel
# TODO: consider the SentenceBERT-like sentence encoders.
self.memory_key_encoder = memory_key_encoder
self.logger.info(
f"Starting to load the key encoder ({memory_key_encoder}) for the memory module.")
if "bart" in memory_key_encoder.lower():
self.tokenizer = transformers.BartTokenizer.from_pretrained(memory_key_encoder)
self.key_encoder = transformers.BartModel.from_pretrained(memory_key_encoder)
elif "distilbert" in memory_key_encoder.lower():
self.tokenizer = transformers.DistilBertTokenizer.from_pretrained(memory_key_encoder)
self.key_encoder = transformers.DistilBertModel.from_pretrained(memory_key_encoder)
elif "roberta" in memory_key_encoder.lower():
self.key_encoder = transformers.RobertaModel.from_pretrained(memory_key_encoder)
self.tokenizer = transformers.RobertaTokenizer.from_pretrained(memory_key_encoder)
elif "bert" in memory_key_encoder.lower():
self.key_encoder = transformers.BertModel.from_pretrained(memory_key_encoder)
self.tokenizer = transformers.BertTokenizer.from_pretrained(memory_key_encoder)
self.key_encoder.cuda()
self.logger.info(f"Finished.")
return self.key_encoder, self.tokenizer
def get_key_content(self, inputs):
key_texts = []
trigger_str = "Question: "
for _input in inputs:
start_ind = _input.index(trigger_str) + len(trigger_str)
key_texts.append(_input[start_ind:])
return key_texts
def load_memory_key_cache(self, init_memory_cache_path):
if os.path.exists(init_memory_cache_path):
self.logger.info(f"Loading init_memory_cache_path from {init_memory_cache_path}")
with open(init_memory_cache_path, "rb") as f:
self.memory_key_cache = pickle.load(f)[self.memory_key_encoder]
else:
self.logger.info(f"Initializing an empty memory key cache.")
self.memory_key_cache = None
def encode_examples_for_caching(self, all_examples, batch_size=1, return_tensors=False):
"""
Return key representation of the documents
"""
# Freeze the weights of the key network to prevent key
# representations from drifting as data distribution changes
# with torch.no_grad():
# last_hidden_states, _
# = self.key_encoder(contents, attention_mask=attn_masks)
# Obtain key representation of every text content by selecting the its [CLS] hidden representation
# keys = last_hidden_states[:, 0, :]
all_vectors = {}
all_tensors = []
batches = list(more_itertools.chunked(all_examples, batch_size))
for examples in tqdm(batches, desc="Caching the examples"):
inputs = [d[0] for d in examples]
with torch.no_grad():
# only use the questions as the key text for encoding.
key_texts = self.get_key_content(inputs)
inputs = self.tokenizer.batch_encode_plus(
key_texts, return_tensors="pt", pad_to_max_length=True)
input_ids = inputs["input_ids"].to(torch.device("cuda"))
attention_mask = inputs["attention_mask"].to(torch.device("cuda"))
# last_hidden_states, _ = self.key_encoder(**inputs)
results = self.key_encoder(input_ids, attention_mask)
last_hidden_states = results[0]
key_vectors = last_hidden_states[:, 0, :]
key_vectors_npy = key_vectors.cpu().numpy()
all_tensors += list(key_vectors)
for key_text, key_vector in zip(key_texts, key_vectors_npy):
all_vectors[key_text] = key_vector
if return_tensors:
return all_tensors
return all_vectors
def encode_examples(self, examples, use_random_keys=False):
"""
Return key representation of the documents
"""
inputs = [d[0] for d in examples]
# only use the questions as the key text for encoding.
key_texts = self.get_key_content(inputs)
key_vectors = None
if use_random_keys:
self.logger.info("Using randomly generated memory keys for ER and MIR.")
key_vectors = np.random.rand(len(examples), 128)
return key_vectors
if self.memory_key_cache:
# self.logger.info("Using the cache.")
key_vectors = []
for key_text in key_texts:
assert key_text in self.memory_key_cache, key_text
key_vectors.append(self.memory_key_cache[key_text])
else:
# on the fly
with torch.no_grad():
inputs = self.tokenizer.batch_encode_plus(
key_texts, return_tensors="pt", pad_to_max_length=True)
input_ids = inputs["input_ids"].to(torch.device("cuda"))
attention_mask = inputs["attention_mask"].to(torch.device("cuda"))
# last_hidden_states, _ = self.key_encoder(**inputs)
results = self.key_encoder(input_ids, attention_mask)
last_hidden_states = results[0]
key_vectors = last_hidden_states[:, 0, :]
key_vectors = key_vectors.cpu().numpy()
return key_vectors
def store_examples(self, keys, examples, timecode=0):
"""
Add the examples as key-value pairs to the memory dictionary with content,attention_mask,label tuple as value
and key determined by key network
"""
assert len(keys) == len(examples)
# update the memory dictionary
for i, key in enumerate(keys):
# numpy array cannot be used as key since it is non-hashable, hence convert it to bytes to use as key.
values = list(examples[i])
values.append(timecode)
self.memory.update({key.tobytes(): tuple(values)})
def query_examples(self, keys, past_memory_keys, k=32):
"""
Returns samples from buffer using K-nearest neighbour approach
"""
retrieved_examples = []
# Iterate over all the input keys
# to find neigbours for each of them
k = min(k, len(past_memory_keys))
for key in keys:
# compute similarity scores based on Euclidean distance metric
similarity_scores = np.dot(past_memory_keys, key.T)
K_neighbour_keys = past_memory_keys[np.argpartition(similarity_scores, -k)[-k:]]
neighbours = [self.memory[nkey.tobytes()] for nkey in K_neighbour_keys]
# converts experiences into batch
# retrieved_examples.append(neighbours)
retrieved_examples += neighbours
# self.logger.info(f"Retrieved {len(retrieved_examples)} examples from memory; {len(retrieved_examples)/len(keys)} examples per key.")
return retrieved_examples
def random_sample(self, sample_size):
sample_size = min(len(self.memory), sample_size)
keys = random.sample(list(self.memory), sample_size)
_inputs = [self.memory[k][0] for k in keys]
_outputs = [self.memory[k][1] for k in keys]
_ids = [self.memory[k][2] for k in keys]
# _timecodes = [self.memory[k][3] for k in keys]
examples = list(zip(_inputs, _outputs, _ids))
return examples
def save_memory_to_path(self, memory_path):
if self.memory is not None:
with open(memory_path, "wb") as f:
self.logger.info(f"Saving the memory to {memory_path}")
pickle.dump(self.memory, f)
def load_memory_from_path(self, memory_path):
if os.path.exists(memory_path):
with open(memory_path, "rb") as f:
self.logger.info(f"Loading the memory from {memory_path}")
self.memory = pickle.load(f)
total_keys = len(self.memory.keys())
# convert the keys from np.bytes to np.float32
self.all_keys = np.frombuffer(
np.asarray(list(self.memory.keys())), dtype=np.float32).reshape(total_keys, -1)
else:
self.logger.info(f"Warning: {memory_path} doesn't exist.")
def KVMemory_init():
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
import argparse
import json
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--memory_key_encoder', type=str, default="facebook/bart-base")
parser.add_argument('--init_memory_cache_path', type=str,
default="bug_data/memory_key_cache.pkl")
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument("--bug_stream_json_path",
default="bug_data/mrqa_naturalquestions_dev.static_bug_stream.json")
parser.add_argument("--upstream_eval_data",
default="bug_data/mrqa_naturalquestions_dev.sampled_pass.jsonl")
parser.add_argument("--sampled_upstream_json_path",
default="bug_data/mrqa_naturalquestions.sampled_upstream.jsonl")
args = parser.parse_args()
log_filename = f'logs/memory_cache_building_{args.memory_key_encoder.replace("/", "_")}.log'
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(log_filename),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
cl_trainer = ContinualFinetuning(logger)
# Load bugs
with open(args.bug_stream_json_path) as f:
bug_stream = json.load(f)
all_examples = []
for bug_batch in tqdm(bug_stream, desc="Creating the bug data loaders."):
formatted_bug_batch = cl_trainer.data_formatter(bug_batch)
all_examples += formatted_bug_batch
# Load pass cases
with open(args.upstream_eval_data) as f:
pass_examples = [json.loads(line) for line in set(f.read().splitlines())]
all_examples += cl_trainer.data_formatter(pass_examples)
memory_module = KeyValueMemoryModule(logger)
logger.info(f"All examples: {len(all_examples)}")
memory_module.load_key_encoder(memory_key_encoder=args.memory_key_encoder)
all_key_vectors = memory_module.encode_examples_for_caching(
all_examples, batch_size=args.batch_size)
logger.info(
f"all_key_vectors.shape: {len(all_key_vectors)} x {len(all_key_vectors[list(all_key_vectors.keys())[0]])}")
if os.path.exists(args.init_memory_cache_path):
with open(args.init_memory_cache_path, "rb") as f:
memory_key_cache = pickle.load(f)
else:
memory_key_cache = {}
memory_key_cache[args.memory_key_encoder] = all_key_vectors
with open(args.init_memory_cache_path, "wb") as f:
pickle.dump(memory_key_cache, f)
logger.info(f"Saved the cache to {f.name}")
|
CMR-main
|
cmr/debug_algs/cl_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import copy
import logging
import random
from cmr.debug_algs.cl_utils import _keep_first_answer
from cmr.models import run_bart
from cmr.task_manager.eval_metrics import evaluate_func
import torch
from transformers import BartTokenizer, BartConfig
import json
from tqdm import tqdm
import os
import numpy as np
import wandb
def _pack_as_dict(predictions, results, results_all):
return {"predictions": predictions, "metric_results": results, "metric_results_detailed": results_all}
class OnlineDebuggingMethod():
def __init__(self, logger=None):
self.name = "base_class"
# logger
self.logger = logger
# args
self.debugger_args = None
self.base_model_args = None
self.data_args = None
# modules
self.base_model = None
self.debugger = None
# data
self.num_bug_batches = None
self.bug_batch_size = None
self.submission_eval_loaders = [] # for online dynamic streams
self.upstream_eval_loader = None # for UKR
self.heldout_submission_eval_loader = None # for KG eval
# utils
self.use_cuda = torch.cuda.is_available()
self.tokenizer = BartTokenizer.from_pretrained("bart-large")
self.timecode = None
self.metric = "EM|QA-F1"
# for dynamic stream mode
self.data_eval_loaders = []
self.online_eval_results = []
self.last_OKR = None; self.last_UKR = None; self.last_KG = None
if self.use_cuda:
self.n_gpu = torch.cuda.device_count()
else:
self.n_gpu = 0
self.model_update_steps = 0 # number of updates over the base model.
self.past_errors = []
self.past_submissions = []
return
def save_result_file(self):
output_info = {}
output_info["method_class"] = self.name
output_info["base_model_args"] = str(self.base_model_args)
output_info["debugger_args"] = str(self.debugger_args)
output_info["data_args"] = str(self.data_args)
output_info["model_update_steps"] = self.model_update_steps
output_info["online_eval_results"] = self.online_eval_results
# if args.cl_method_name in ["offline_debug"]:
# output_info["offline_bound_results"] = offline_bound_results
# logger.info(f"eval_results_overall_bug: {offline_bound_results['eval_results_overall_bug']['metric_results']}")
# logger.info(f"eval_results_overall_forget: {offline_bound_results['eval_results_overall_forget']['metric_results']}")
with open(self.data_args.result_file, "w") as f:
json.dump(output_info, f)
self.logger.info(f"Updated result file: {self.data_args.result_file} at Timecode: {self.timecode}.")
def _check_data_args(self, additional_args=[]):
required_atts = ["submission_stream_data",
"stream_id",
"upstream_eval_data",
"heldout_submission_data",
"do_lowercase",
"append_another_bos",
"max_input_length",
"max_output_length",
"task_name",
"num_beams",
"max_timecode",
"result_file"] + additional_args
assert all([hasattr(self.data_args, att) for att in required_atts])
return
def load_data(self, data_args, given_data_stream=None):
""""For loading the data stream for dynamic building the errors."""
self.data_args = data_args
self._check_data_args() # additional_args=["data_stream_json_path", "accumulate_eval_freq"]
# Load bug stream
if given_data_stream:
data_stream = given_data_stream
else:
with open(data_args.submission_stream_data) as f:
data_stream = json.load(f)[data_args.stream_id]
self.logger.info(f"Loading the stream from {f.name} and use the ${data_args.stream_id} part.")
self.data_stream = data_stream
self.num_data_batches = len(data_stream)
self.data_batch_size = len(data_stream[0])
# Create data loaders for each error batch.
all_formatted_data = []
self.data_eval_loaders = []
self.online_eval_results = []
for data_batch in tqdm(self.data_stream, desc="Creating the data loaders."):
if data_args.max_timecode > 0 and len(self.data_eval_loaders) >= data_args.max_timecode:
break
formatted_data_batch = self.data_formatter(data_batch)
all_formatted_data += formatted_data_batch
_, eval_data_dataloader = self.get_dataloader(
data_args, formatted_data_batch, mode="eval")
self.data_eval_loaders.append(eval_data_dataloader)
self.all_formatted_data = all_formatted_data
# Create loaders for the sampled pass examples for evaluation.
with open(data_args.upstream_eval_data) as f:
upstream_eval_examples = [json.loads(line) for line in f.read().splitlines()]
upstream_eval_examples = self.data_formatter(upstream_eval_examples)
self.logger.info(f"load_data: len(upstream_eval_examples)={len(upstream_eval_examples)}")
_, self.upstream_eval_loader = self.get_dataloader(
data_args, upstream_eval_examples, mode="eval")
# Create loaders for the sampled pass examples for evaluation.
with open(data_args.heldout_submission_data) as f:
heldout_eval_examples = [json.loads(line) for line in f.read().splitlines()]
heldout_eval_examples = self.data_formatter(heldout_eval_examples)
self.logger.info(f"load_data: len(heldout_eval_examples)={len(heldout_eval_examples)}")
_, self.heldout_submission_eval_loader = self.get_dataloader(
data_args, heldout_eval_examples, mode="eval")
def _get_dynamic_errors(self, data_eval_loader, result_dict, return_raw_bug_examples=False):
############### Get the errors dynamically. ###############
self.logger.info(
f"Evaluating to get errors .... Timecode: {self.timecode}")
self.past_submissions += data_eval_loader.data
predictions, results, results_all = self.evaluate(data_eval_loader)
self.logger.info(f"Before Error Fixing: {results}")
# self.logger.info(
# f"Doing-Nothing Instant EM: {self.instant_doing_nothing_EM[self.timecode]}")
### Pack the error examples for training. ###
errors = []
error_ids = []
for (_input, _truth, _id), prediction, em, f1 in zip(data_eval_loader.data,
predictions,
results_all["EM"],
results_all["QA-F1"]):
# self.logger.info(f"{example}")
# self.logger.info(f"{prediction}")
# self.logger.info(f"{em}")
if em == 0: # TODO: this is the condition to judge if it is a bug.
bug = {}
bug["id"] = _id
bug["input"] = _input
bug["truth"] = _truth
bug["mistake"] = prediction
errors.append(bug)
error_ids.append(_id)
self.past_errors.append(bug)
formatted_bug_batch = self.data_formatter(errors)
self.logger.info(f"Found {len(formatted_bug_batch)} errors.")
SR = 1 - len(error_ids)/len(predictions)
CSR = 1 - len(self.past_errors) / len(self.past_submissions)
wandb.log({"num_errors": len(formatted_bug_batch)}, step=self.timecode)
wandb.log({"CSR": CSR}, step=self.timecode)
wandb.log({"SR": SR}, step=self.timecode)
result_dict["before_eval_results"] = _pack_as_dict(predictions, results, results_all)
result_dict["before_error_ids"] = error_ids
result_dict["SR"] = SR
result_dict["CSR"] = CSR
if return_raw_bug_examples:
return formatted_bug_batch
else:
bug_train_loader, bug_eval_loader = self.get_dataloader(
self.data_args, formatted_bug_batch, mode="both")
return bug_train_loader, bug_eval_loader
def _update_result_dict(self, result_dict):
# if self.last_OKR is None or self.last_KG is None or self.last_UKR is None:
# pass
# else:
scores = [result_dict.get("CSR", 0.0), result_dict.get("EFR", 0.0)]
if self.last_OKR:
scores.append(self.last_OKR)
scores.append(self.last_UKR)
scores.append(self.last_KG)
result_dict["Overall"] = float(np.mean(scores))
wandb.log({"Overall": result_dict["Overall"]}, step=self.timecode)
self.logger.info(f'Overall: {result_dict["Overall"]} from scores={scores}')
self.online_eval_results.append(result_dict)
def online_debug(self):
self.logger.info("Start Online Debugging with Dynamic Error Mode")
self.logger.info(f"Number of Batches of Data: {self.num_data_batches}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
if self.debugger_args.save_ckpt_freq:
# save the initial model as the 0-th model.
self._save_base_model()
for data_eval_loader in tqdm(self.data_eval_loaders, desc="Online Debugging"):
result_dict = {"timecode": self.timecode} # start with 0
self.eval_knowledge_retention(result_dict)
self.eval_knowledge_generalization(result_dict)
# self._replay_based_eval(result_dict)
bug_train_loader, bug_eval_loader = self._get_dynamic_errors(data_eval_loader, result_dict)
############### CORE ###############
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start error-fixing .... Timecode: {self.timecode}")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start error-fixing .... Done!")
############### CORE ###############
self.evaluate_error_fixing(result_dict, bug_eval_loader)
self._update_result_dict(result_dict)
if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
self._save_base_model()
self.save_result_file()
self.logger.info("-"*50)
self.timecode += 1
#### Final evaluation ####
self.final_evaluation()
#### Save the final model ####
self._save_base_model()
def final_evaluation(self):
self.logger.info("Start the final evaluation.")
# TODO:
self.logger.info("Nothing here.")
def eval_knowledge_retention(self, result_dict):
if self.timecode == self.data_args.max_timecode-1:
pass
elif self.timecode % self.debugger_args.kr_eval_freq == 0:
pass
else:
return
######################## UKR ########################
self.logger.info(f"Start eval_knowledge_retention for UKR @ Timecode={self.timecode}")
if self.debugger_args.kr_eval_mode == "loss":
UKR_loss = self.evaluate(self.upstream_eval_loader, mode="loss")
elif self.debugger_args.kr_eval_mode == "metric":
predictions, results, results_all = self.evaluate(self.upstream_eval_loader)
scores = results_all["EM"]
UKR = len([1 for s in scores if s == 1]) / len(scores)
result_dict["UKR"] = UKR
wandb.log({"UKR": UKR}, step=self.timecode)
self.last_UKR = UKR
# UKR_loss = self.evaluate(self.upstream_eval_loader, mode="loss")
# wandb.log({"UKR_loss": UKR_loss}, step=self.timecode)
self.logger.info(f"Upstream Knowledge Retation (UKR@{self.timecode}): {UKR:.4f}")
######################## OKR ########################
if not self.past_submissions:
return
rng = random.Random(self.debugger_args.okr_sample_seed) # fixed for different methods e.g., 1337
if len(self.past_submissions) < self.debugger_args.okr_sample_size:
self.logger.info(f"len(self.past_submissions) = {len(self.past_submissions)} \
< self.debugger_args.okr_sample_size = {self.debugger_args.okr_sample_size}")
return
sampled_past_submissions = rng.sample(self.past_submissions, k=self.debugger_args.okr_sample_size)
result_dict["OKR_sampled_ids"] = [_id for _input, _truth, _id in sampled_past_submissions]
result_dict["OKR_sampled_ids"].sort()
_, past_submission_eval_loader = self.get_dataloader(self.data_args, sampled_past_submissions, mode="eval")
self.logger.info(f"Start eval_knowledge_retention for OKR @ Timecode={self.timecode}")
if self.debugger_args.kr_eval_mode == "loss":
OKR = self.evaluate(past_submission_eval_loader, mode="loss")
elif self.debugger_args.kr_eval_mode == "metric":
predictions, results, results_all = self.evaluate(past_submission_eval_loader)
scores = results_all["EM"]
OKR = len([1 for s in scores if s == 1]) / len(scores)
self.logger.info(f"Online Knowledge Retation (OKR@{self.timecode}): {OKR:.4f}")
result_dict["OKR"] = OKR
self.last_OKR = OKR
wandb.log({"OKR": OKR}, step=self.timecode)
def eval_knowledge_generalization(self, result_dict):
if self.timecode == self.data_args.max_timecode-1:
pass
elif self.timecode % self.debugger_args.kg_eval_freq == 0:
pass
else:
return
######################## KG ########################
self.logger.info(f"Start eval_knowledge_generalization for KG @ Timecode={self.timecode}")
if self.debugger_args.kg_eval_mode == "loss":
KG_loss = self.evaluate(self.heldout_submission_eval_loader, mode="loss")
elif self.debugger_args.kg_eval_mode == "metric":
# TODO: get a decomposed version?
predictions, results, results_all = self.evaluate(self.heldout_submission_eval_loader)
scores = results_all["EM"]
KG = len([1 for s in scores if s == 1]) / len(scores)
result_dict["KG"] = KG
wandb.log({"KG": KG}, step=self.timecode)
self.last_KG = KG
self.logger.info(f"Future Knowledge Generalization (KG@{self.timecode}): {KG:.4f}")
def evaluate_error_fixing(self, result_dict, bug_eval_loader):
after_predictions, after_results, after_results_all = self.evaluate(bug_eval_loader)
fixed_ids = []
unfixed_ids = []
for (_input, _truth, _id), score_after in zip(bug_eval_loader.data, after_results_all["EM"]):
if score_after == 1:
fixed_ids.append(_id)
else:
unfixed_ids.append(_id)
EFR = len(fixed_ids) / len(fixed_ids+unfixed_ids)
result_dict["EFR"] = EFR
wandb.log({"EFR": EFR}, step=self.timecode)
self.logger.info(f"EFR={EFR}")
return EFR
# So the 0-th checkpoint should be the original base model.
def _save_base_model(self, ckpt_name=None):
output_dir = self.debugger_args.ckpt_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
model_state_dict = {k: v.cpu() for (
k, v) in self.base_model.state_dict().items()}
if ckpt_name:
model_path = os.path.join(output_dir, f"model_ckpt_{ckpt_name}.pt")
else:
model_path = os.path.join(
output_dir, f"model_ckpt_{self.timecode:03d}.pt")
torch.save(model_state_dict, model_path)
self.logger.info(f"Model saved to {model_path}.")
def evaluate(self, eval_dataloader=None, verbose=False, mode="metric"):
"""Evaluates the performance"""
if not eval_dataloader:
self.logger.info("evaluate with submission eval loaders")
eval_dataloader = self.submission_eval_loaders[self.timecode]
if mode == "metric":
predictions = self.base_model_infer(eval_dataloader, verbose)
assert len(predictions) == len(eval_dataloader)
predictions = [p.strip() for p in predictions]
results, results_all = evaluate_func(
predictions, eval_dataloader.data, self.metric, return_all=True)
return predictions, results, results_all
elif mode == "loss":
examples = eval_dataloader.data
_examples = _keep_first_answer(examples)
tmp_data_args = copy.deepcopy(self.data_args)
tmp_data_args.predict_batch_size = 8 # TODO: set an arg.
eval_loader, _ = self.get_dataloader(tmp_data_args, _examples, mode="train", is_training=False) # fix of the order
losses = run_bart.inference(
self.base_model, eval_loader, compute_loss=True, loss_only=True, logger=self.logger)
mean_loss = sum(losses) / len(examples)
return mean_loss
def base_model_infer(self, eval_dataloader, verbose):
raise NotImplementedError(
"Please Implement the `base_model_infer` method in your class.")
def check_debugger_args(self):
raise NotImplementedError(
"Please Implement the `check_debugger_args` method in your class.")
def data_formatter(self, bug_batch):
raise NotImplementedError(
"Please Implement the `data_formatter` method in your class.")
def get_dataloader(self, data_args, formatted_bug_batch):
raise NotImplementedError(
"Please Implement the `get_dataloader` method in your class.")
def load_base_model(self, base_model_args, mode="online_debug"):
raise NotImplementedError(
"Please Implement the `load_base_model` method in your class.")
def debugger_setup(self):
raise NotImplementedError(
"Please Implement the `debugger_setup` method in your class.")
def fix_bugs(self, bug_loader, quiet=True):
raise NotImplementedError(
"Please Implement the `fix_bugs` method in your class.")
def upstream_data_formatter(self, examples):
# The continual fine-tuning method only uses the correct answers for fixing bugs.
formatted_examples = []
for example in examples:
_id = example["id"]
_input = example["input"]
_truth = example["output"] # a list of answers
formatted_examples.append((_input, _truth, _id))
return formatted_examples
|
CMR-main
|
cmr/debug_algs/commons.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# TODO: remove this as we have the offline evaluation function now.
def _eval_before_fixing(self):
# Before Bug-Fixing
assert self.online_debug_results is not None
bug_eval_loader = self.bug_eval_loaders[self.timecode]
bug_before_predictions, bug_before_results, bug_before_results_all = self.evaluate(
bug_eval_loader)
self.logger.info("-"*10+f"Timecode: {self.timecode}"+"-"*10)
self.logger.info(
f"Before Bug-fixing the results on bug-batch-{self.timecode} = {bug_before_results}")
if len(self.online_debug_results["res_on_passes"]) == 0:
pass_before_predictions, pass_before_results, pass_before_results_all = self.evaluate(
self.forget_eval_loader)
self.online_debug_results["res_on_passes"].append(
(pass_before_results, pass_before_results_all))
else:
pass_before_predictions = None # TODO:
pass_before_results, pass_before_results_all = self.online_debug_results[
"res_on_passes"][-1]
self.logger.info(
f"Before Bug-fixing the results on the sampled pass cases = {pass_before_results}")
return bug_before_results, bug_before_results_all, pass_before_results, pass_before_results_all
# TODO: remove this as we have the offline evaluation function now.
def _eval_after_fixing(self, bug_before_results, bug_before_results_all, pass_before_results, pass_before_results_all):
# After Bug-Fixing
assert self.online_debug_results is not None
bug_eval_loader = self.bug_eval_loaders[self.timecode]
bug_after_predictions, bug_after_results, bug_after_results_all = self.evaluate(
bug_eval_loader)
self.logger.info(
f"After Bug-fixing the results on bug-batch-{self.timecode} = {bug_after_results}")
pass_after_predictions, pass_after_results, pass_after_results_all = self.evaluate(
self.forget_eval_loader)
self.logger.info(
f"After Bug-fixing the results on the sampled pass cases = {pass_after_results}")
# Log the overall results
self.online_debug_results["res_on_bugs"].append(
(bug_before_results, bug_after_results))
self.online_debug_results["res_on_passes"].append(
(pass_after_results, pass_after_results_all))
self._check_fixing(
bug_eval_loader, bug_before_results_all, bug_after_results_all)
self._check_forgetting(pass_before_results_all, pass_after_results_all)
if self.debugger_args.overtime_overall_bug_eval:
all_bug_after_predictions, all_bug_after_results, all_bug_after_results_all = self.evaluate(
self.bug_all_eval_loader)
self.logger.info(
f"Current Overall Bug-fixing Results = {all_bug_after_results}")
self.online_debug_results["overtime_all_bug_eval"].append(
all_bug_after_results)
# TODO: remove this as we have the offline evaluation function now.
def _eval_overall_bugs(self):
all_bug_after_predictions, all_bug_after_results, all_bug_after_results_all = self.evaluate(
self.bug_all_eval_loader)
self.online_debug_results["final_all_bug_eval"] = all_bug_after_results
self.logger.info(
f"Final Overall Bug-fixing Results = {all_bug_after_results}")
# TODO: move to evaluation analysis part.
def _check_fixing(self, bug_eval_loader, bug_before_results_all, bug_after_results_all):
# Log the specific fixed bugs and forget examples
em_prefixed_bugs = []
f1_prefixed_bugs = []
em_fixed_bugs = []
f1_fixed_bugs = []
assert len(bug_eval_loader.data) == len(
bug_before_results_all["EM"]) == len(bug_after_results_all["EM"])
for ind in range(len(bug_eval_loader.data)):
em_before = bug_before_results_all["EM"][ind]
em_after = bug_after_results_all["EM"][ind]
f1_before = bug_before_results_all["QA-F1"][ind]
f1_after = bug_after_results_all["QA-F1"][ind]
uuid = bug_eval_loader.data[ind][2] # (input, output, uuid)
if em_before == 1:
em_prefixed_bugs.append(uuid)
if f1_after > 0.5:
f1_prefixed_bugs.append(uuid)
if em_before == 0 and em_after == 1:
em_fixed_bugs.append(uuid)
if f1_before < 0.5 and f1_after > 0.5 and f1_after-f1_before >= 0.25:
f1_fixed_bugs.append(uuid)
self.online_debug_results["em_fixed_bugs"].append(em_fixed_bugs)
self.online_debug_results["f1_fixed_bugs"].append(f1_fixed_bugs)
self.online_debug_results["em_prefixed_bugs"].append(em_prefixed_bugs)
self.online_debug_results["f1_prefixed_bugs"].append(f1_prefixed_bugs)
self.logger.info(
f"Number of em_prefixed_bugs = {len(em_prefixed_bugs)}; Number of f1_prefixed_bugs = {len(f1_prefixed_bugs)}")
self.logger.info(
f"Number of em_fixed_bugs = {len(em_fixed_bugs)}; Number of f1_fixed_bugs = {len(f1_fixed_bugs)}")
# TODO: move to evaluation analysis part.
def _check_forgetting(self, pass_before_results_all, pass_after_results_all):
# log the forgotten bugs
em_forgotten_passes = []
for ind in range(len(self.forget_eval_loader.data)):
em_before = pass_before_results_all["EM"][ind]
em_after = pass_after_results_all["EM"][ind]
# f1_before = pass_before_results_all["QA-F1"][ind]
# f1_after = pass_after_results_all["QA-F1"][ind]
uuid = self.forget_eval_loader.data[ind][2] # (input, output, uuid)
if em_before == 1 and em_after == 0:
em_forgotten_passes.append(uuid)
self.online_debug_results["forgotten_passes"].append(
em_forgotten_passes)
self.logger.info(
f"Number of em_forgotten_passes = {len(em_forgotten_passes)}.")
# self.logger.info(f"UUIDS of fixed bugs = {em_fixed_bugs}")
def evaluate_v1(self, eval_dataloader=None, verbose=False):
"""Evaluates the performance"""
# backup the base model.
self.logger.info("Backing up the base model ...")
base_model_backup = copy.deepcopy(self.base_model)
self.logger.info("Backking up the base model ... Done!")
self.logger.info("Memory Retrieving ...")
# local adaptation for self.base_model of retrieved examples from memory.
keys = self.memroy_module.encode_examples(eval_dataloader.data)
retrieved_examples = self.memroy_module.query_examples(keys, k=self.debugger_args.replay_size)
replay_data_loader, _ = self.get_dataloader(self.data_args, retrieved_examples, mode="train")
self.logger.info("Memory Retrieving Done ...")
self.logger.info("Temp local adaptation ...")
self.fix_bugs(replay_data_loader) # local adaptation
self.logger.info("Temp local adaptation ... Done")
# get inference as usual.
predictions, results, return_all = super().evaluate(eval_dataloader=None, verbose=False)
del self.base_model
self.base_model = base_model_backup # restore to the original base_model
return predictions, results, return_all
### Check the accumulative results. ###
if (self.data_args.accumulate_eval_freq > 0 and (self.timecode + 1) % self.data_args.accumulate_eval_freq == 0):
accumu_EM, forgotten_ids, fixed_ids, total_len = self.get_accumulative_results()
result_dict["accumulative_EM"] = accumu_EM
result_dict["accumulative_forgotten_ids"] = forgotten_ids
result_dict["accumulative_fixed_ids"] = fixed_ids
result_dict["accumulative_forgotten_rate"] = len(forgotten_ids) / total_len
result_dict["accumulative_fixed_rate"] = len(fixed_ids) / total_len
self.logger.info(" ")
self.logger.info(
f"Doing-Nothing Accumulative EM: {self.accumulate_doing_nothing_EM[self.timecode]}")
self.logger.info(f"My Accumulative EM: {accumu_EM}")
self.logger.info(
f"accumulative_forgotten_rate: {result_dict['accumulative_forgotten_rate']}")
self.logger.info(
f"accumulative_fixed_rate: {result_dict['accumulative_fixed_rate']}")
def get_accumulative_results(self):
EMs = []
forgotten_ids = []
fixed_ids = []
total_len = 0
for data_eval_loader in tqdm(self.data_eval_loaders[:self.timecode], desc="Evaluate Accumulative Results"):
predictions, results, results_all = self.evaluate(data_eval_loader)
EMs.append(results["EM"])
for (_, _, _id), em in zip(data_eval_loader.data, results_all["EM"]):
if _id in self.all_initial_error_ids and em == 1:
fixed_ids.append(_id)
if _id in self.all_initial_pass_ids and em == 0:
forgotten_ids.append(_id)
total_len += 1
return float(np.mean(EMs)), forgotten_ids, fixed_ids, total_len
def single_timecode_eval(self, timecode):
"""Used only for offline eval of a single checkpoint of a specific timecode."""
self.timecode = timecode
result_dict = {} # initialize for the given time code
self.logger.info("Start the Overall Error-Fixing Results....")
# Overall Error-Fixing Results
eval_results_overall_bug = self.evaluate(
self.bug_all_eval_loader, verbose=True)
result_dict["eval_results_overall_bug"] = _pack_as_dict(
*eval_results_overall_bug)
self.logger.info("Start the Overall Error-Fixing Results....Done")
self.logger.info(
"Start the Overall Forgetting Results (Knowledge Retain Acc)....")
# Overall Forgetting Results (Knowledge Retain Acc)
eval_results_overall_forget = self.evaluate(
self.forget_eval_loader, verbose=True)
result_dict["eval_results_overall_forget"] = _pack_as_dict(
*eval_results_overall_forget)
self.logger.info(
"Start the Overall Forgetting Results (Knowledge Retain Acc)....Done")
if self.name == "offline_debug":
# only overall evaluation for the offline debugging.
return result_dict
# Error-Fixing performance on the current batch of errors.
if self.timecode > 0:
self.logger.info(
"Start Error-Fixing performance on the Current batch of errors.....")
bug_eval_loader = self.bug_eval_loaders[self.timecode-1]
eval_results_current_errors = self.evaluate(bug_eval_loader)
result_dict["eval_results_current_errors"] = _pack_as_dict(
*eval_results_current_errors)
self.logger.info(
"Start Error-Fixing performance on the Current batch of errors.....Done")
# Error-Fixing performance on the next batch of errors. (for the computation of real responsive efr)
if self.timecode < len(self.bug_eval_loaders):
self.logger.info(
"Start Error-Fixing performance on the Next batch of errors.....")
bug_eval_loader = self.bug_eval_loaders[self.timecode]
eval_results_next_errors = self.evaluate(bug_eval_loader)
result_dict["eval_results_next_errors"] = _pack_as_dict(
*eval_results_next_errors)
self.logger.info(
"Start Error-Fixing performance on the Next batch of errors.....Done")
return result_dict
def load_data_static(self, data_args):
self.data_args = data_args
self._check_data_args()
# Load bug stream
with open(data_args.bug_stream_json_path) as f:
bug_stream = json.load(f)
self.bug_stream = bug_stream
self.num_bug_batches = len(bug_stream)
self.bug_batch_size = len(bug_stream[0])
# Create data loaders for each error batch.
all_formatted_bugs = []
for bug_batch in tqdm(self.bug_stream, desc="Creating the bug data loaders."):
formatted_bug_batch = self.data_formatter(bug_batch)
all_formatted_bugs += formatted_bug_batch
train_bug_dataloader, eval_bug_dataloader = self.get_dataloader(
data_args, formatted_bug_batch, mode="both")
self.bug_train_loaders.append(train_bug_dataloader)
self.bug_eval_loaders.append(eval_bug_dataloader)
assert len(self.bug_train_loaders) == self.num_bug_batches
self.all_bug_examples = all_formatted_bugs
# Create the all bug loaders.
self.bug_all_train_loader, self.bug_all_eval_loader = self.get_dataloader(
data_args, all_formatted_bugs, mode="both")
# Create the pass pool evaluation loader for the final forgetting issue.
if data_args.upstream_eval_data:
# Create loaders for the sampled pass examples
with open(data_args.upstream_eval_data) as f:
pass_examples = [json.loads(line)
for line in set(f.read().splitlines())]
self.sampled_passes = pass_examples
pass_examples = self.data_formatter(pass_examples)
_, self.forget_eval_loader = self.get_dataloader(
data_args, pass_examples, mode="eval")
if data_args.sampled_upstream_json_path:
# Create loaders for the sampled pass examples
with open(data_args.sampled_upstream_json_path) as f:
sampled_upstream_examples = [json.loads(line)
for line in set(f.read().splitlines())]
self.sampled_upstream_examples = self.upstream_data_formatter(
sampled_upstream_examples)
# self.sampled_upstream_trainloader, self.sampled_upstream_evalloader = self.get_dataloader(
# data_args, sampled_upstream_examples, mode="eval")
return
def online_debug_static(self):
"""For the static error stream."""
self.logger.info("Start Online Debugging with Static Error Mode")
self.logger.info(f"Number of Batches of Bugs: {self.num_bug_batches}")
self.logger.info(f"Bug Batch Size: {self.bug_batch_size}")
self.timecode = 0
if self.debugger_args.save_ckpt_freq:
# save the initial model as the 0-th model.
self._save_base_model()
for bug_train_loader in tqdm(self.bug_train_loaders, desc="Online Debugging (Static)", total=self.num_bug_batches):
############### CORE ###############
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start bug-fixing .... Timecode: {self.timecode}")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start bug-fixing .... Done!")
############### CORE ###############
self.timecode += 1
if self.debugger_args.save_ckpt_freq:
self._save_base_model()
# Note that we save the model from the id=1.
# cmr/debug_algs/cl_mbcl_alg.py
def online_debug_static(self):
self.logger.info("Start Online Debugging")
self.logger.info(f"Number of Batches of Bugs: {self.num_bug_batches}")
self.logger.info(f"Bug Batch Size: {self.bug_batch_size}")
self.logger.info(f"Replay Size: {self.debugger_args.replay_size}")
self.logger.info(f"Replay Frequency: {self.debugger_args.replay_frequency}")
self.timecode = 0
if self.debugger_args.save_ckpt_freq:
# save the initial model as the 0-th model.
self._save_base_model()
# For the initial memory.
# TODO: sample and save to the memory.
last_steps = 0
for bug_train_loader in tqdm(self.bug_train_loaders, desc="Online Debugging", total=self.num_bug_batches):
if (self.model_update_steps - last_steps) >= self.debugger_args.replay_frequency \
and self.debugger_args.replay_frequency > 0 and self.debugger_args.replay_size > 0:
# sparse experience replay
self.logger.info("Triggering Sampling from Memory and starting to replay.")
retrieved_examples = self.memroy_module.random_sample(
sample_size=self.debugger_args.replay_size)
replay_data_loader, _ = self.get_dataloader(
self.data_args, retrieved_examples, mode="train")
self.fix_bugs(replay_data_loader) # sparse replay
self.logger.info("Replay-Training done.")
last_steps = self.model_update_steps
############### CORE START ###############
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start bug-fixing .... Timecode: {self.timecode}")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start bug-fixing .... Done!")
############### CORE END ###############
self.timecode += 1
if self.debugger_args.save_ckpt_freq:
self._save_base_model()
# Note that we save the model from the id=1.
# So the 0-th checkpoint should be the original base model.
_max = 1000000
flag_store_examples = bool(random.randrange(0, _max)/_max >=
1 - self.debugger_args.memory_store_rate)
if flag_store_examples:
self.logger.info("Saving examples to the memory.")
key_vectors = self.memroy_module.encode_examples(bug_train_loader.data, use_random_keys=bool(self.name in ["er", "mir"]))
self.memroy_module.store_examples(
key_vectors, bug_train_loader.data, timecode=self.timecode)
self.logger.info("Finished.")
self.memroy_module.save_memory_to_path(self.debugger_args.memory_path)
|
CMR-main
|
cmr/debug_algs/_legacy_functions.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from logging import disable
import numpy as np
import torch
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from transformers import (AdamW, BartConfig, BartTokenizer,
get_linear_schedule_with_warmup)
from cmr.debug_algs.commons import OnlineDebuggingMethod
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
from torch import nn
import torch
from torch.nn import functional as F
import abc
import copy
class EWCRegularizer(nn.Module, metaclass=abc.ABCMeta):
'''Abstract module to add continual learning capabilities to a classifier.
'''
def __init__(self, ):
super().__init__()
self.base_model = None # the bart model or other possible models to
# -EWC:
# -> hyperparam: how strong to weigh EWC-loss ("regularisation strength")
self.ewc_lambda = 0
# -> hyperparam (online EWC): decay-term for old tasks' contribution to quadratic term
self.gamma = 1.
# -> "online" (=single quadratic term) or "offline" (=quadratic term per task) EWC
self.online = True
# -> sample size for estimating FI-matrix (if "None", full pass over dataset)
self.fisher_n = None
# -> if True, use provided labels to calculate FI ("empirical FI"); else predicted labels
self.emp_FI = True # otherwise we need to do the inference decoding.
# -> keeps track of number of quadratic loss terms (for "offline EWC")
self.EWC_task_count = 0
def estimate_fisher(self, data_loader, pad_token_id):
'''After completing training on a task, estimate diagonal of Fisher Information matrix.
[data_loader]: <DataSet> to be used to estimate FI-matrix; give batches of size 1
'''
# Prepare <dict> to store estimated Fisher Information matrix
est_fisher_info = {}
for n, p in self.base_model.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
est_fisher_info[n] = p.detach().clone().zero_()
# Set model to evaluation mode
mode = self.base_model.training
self.base_model.eval()
# Create data-loader to give batches of size 1
# data_loader = utils.get_data_loader(
# dataset, batch_size=1, cuda=self._is_on_cuda(), collate_fn=collate_fn)
# TODO: why batch size =1 ?
# Estimate the FI-matrix for [self.fisher_n] batches of size 1
for index, batch in enumerate(data_loader):
# break from for-loop if max number of samples has been reached
if self.fisher_n is not None:
if index >= self.fisher_n:
break
# run forward pass of model
# x = x.to(self.base_model._device())
batch = [b.to(torch.device("cuda")) for b in batch]
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
# output = self.base_model(x)
assert self.emp_FI
# -use provided label to calculate loglikelihood --> "empirical Fisher":
# label = torch.LongTensor([y]) if type(y) == int else y
# label = label.to(self.base_model._device())
# calculate negative log-likelihood
# negloglikelihood = F.nll_loss(F.log_softmax(output, dim=1), label)
nll_loss = self.base_model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
# Calculate gradient of negative loglikelihood
self.base_model.zero_grad()
nll_loss.backward()
###
# Square gradients and keep running sum
for n, p in self.base_model.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
if p.grad is not None:
est_fisher_info[n] += p.grad.detach() ** 2
# Normalize by sample size used for estimation
est_fisher_info = {n: p/index for n, p in est_fisher_info.items()}
# Store new values in the network
for n, p in self.base_model.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
# -mode (=MAP parameter estimate)
self.register_buffer('{}_EWC_prev_task{}'.format(n, "" if self.online else self.EWC_task_count+1), p.detach().clone())
# -precision (approximated by diagonal Fisher Information matrix)
if self.online and self.EWC_task_count == 1:
existing_values = getattr(self, '{}_EWC_estimated_fisher'.format(n))
est_fisher_info[n] += self.gamma * existing_values
self.register_buffer('{}_EWC_estimated_fisher{}'.format(n, "" if self.online else self.EWC_task_count+1), est_fisher_info[n])
# If "offline EWC", increase task-count (for "online EWC", set it to 1 to indicate EWC-loss can be calculated)
self.EWC_task_count = 1 if self.online else self.EWC_task_count + 1
# Set model back to its initial mode
self.base_model.train(mode=mode)
def ewc_loss(self):
'''Calculate EWC-loss.'''
if self.EWC_task_count > 0:
losses = []
# If "offline EWC", loop over all previous tasks (if "online EWC", [EWC_task_count]=1 so only 1 iteration)
for task in range(1, self.EWC_task_count+1):
for n, p in self.base_model.named_parameters():
if p.requires_grad:
# Retrieve stored mode (MAP estimate) and precision (Fisher Information matrix)
n = n.replace('.', '__')
mean = getattr(self, '{}_EWC_prev_task{}'.format(
n, "" if self.online else task))
if self.gamma > 0 :
fisher = getattr(self, '{}_EWC_estimated_fisher{}'.format(
n, "" if self.online else task))
# If "online EWC", apply decay-term to the running sum of the Fisher Information matrices
fisher = self.gamma*fisher if self.online else fisher
# Calculate EWC-loss
losses.append((fisher * (p-mean)**2).sum())
else:
# This is just the L2 norm w/o computing the fisher info for weighting
losses.append(((p-mean)**2).sum())
# Sum EWC-loss from all parameters (and from all tasks, if "offline EWC")
return (1./2)*sum(losses)
else:
# EWC-loss is 0 if there are no stored mode and precision yet
# TODO: instead of 0, let's use the normal L2 norm?
return torch.tensor(0., device=torch.device("cuda"))
class OnlineEWC(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "online_ewc"
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = [
# ewc-related hyper parameters
"ewc_lambda",
"ewc_gamma",
# "use_sampled_upstream"
]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
return
# ### END ###
def debugger_setup(self, debugger_args):
super().debugger_setup(debugger_args)
# Initializing the EWC Regularzier.
self.regularizer = EWCRegularizer()
self.regularizer.online = True
self.regularizer.ewc_lambda = self.debugger_args.ewc_lambda
self.regularizer.gamma = self.debugger_args.ewc_gamma
self.regularizer.emp_FI = True # TODO: check later.
self.regularizer.base_model = self.base_model
return
def fix_bugs(self, bug_loader, quiet=True):
# bug_dataloader is from self.bug_loaders
self.base_model.train()
train_losses = []
global_step = 0
pad_token_id = self.tokenizer.pad_token_id
# #### For the first update ###
# if self.data_args.use_sampled_upstream and self.timecode==0:
# self.logger.info("Start the initial fisher info matrix computation....")
# upstream_dl, _ = self.get_dataloader(self.data_args, self.sampled_upstream_examples, mode="train")
# upstream_dl.args.train_batch_size = 1
# upstream_fi_dl = upstream_dl.load_dataloader(do_return=True)
# self.regularizer.estimate_fisher(upstream_fi_dl, pad_token_id)
# self.logger.info("Start the initial fisher info matrix computation....Done!")
for epoch_id in range(int(self.debugger_args.num_epochs)):
for batch in tqdm(bug_loader.dataloader, desc=f"Bug-fixing Epoch {epoch_id}", disable=quiet):
# here the batch is a mini batch of the current bug batch
global_step += 1
if self.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
# this is the task loss w/o any regularization
loss = self.base_model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if self.regularizer.ewc_lambda > 0: # a hp to control the penalty weight.
# add the regularzation term.
ewc_loss = self.regularizer.ewc_loss()
loss = loss + self.regularizer.ewc_lambda * ewc_loss
train_losses.append(loss.detach().cpu())
loss.backward()
self.model_update_steps += 1
if global_step % self.debugger_args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
self.base_model.parameters(), self.debugger_args.max_grad_norm)
self.optimizer.step() # We have accumulated enough gradients
self.scheduler.step()
self.base_model.zero_grad()
# TODO: build bsz=1 dataloader for update the fisher information matrix
bug_loader.logger = None
fisher_dataloader = copy.deepcopy(bug_loader)
fisher_dataloader.logger = self.logger
fisher_dataloader.args.train_batch_size = 1
fi_dl = fisher_dataloader.load_dataloader(do_return=True)
self.regularizer.estimate_fisher(fi_dl, pad_token_id)
return
|
CMR-main
|
cmr/debug_algs/cl_online_ewc_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from logging import disable
from cmr.task_manager.eval_metrics import evaluate_func
from cmr.models.bart_with_adapater import BartWithAdapterConfig, MyBartWithAdapter
from cmr.debug_algs.cl_mbcl_alg import KeyValueMemoryModule
from cmr.models.hypernet import ParameterGenerator
import numpy as np
import torch
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from transformers import (AdamW, BartConfig, BartTokenizer,
get_linear_schedule_with_warmup)
from cmr.debug_algs.commons import OnlineDebuggingMethod
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
from torch import log, nn
import torch
from torch.nn import functional as F
import transformers
class HyperBart(nn.Module):
def __init__(self, logger, config):
super().__init__()
self.logger = logger
self.config = config
self.bart_model = None
self.weight_generator = None
self.example_encoder, self.example_tokenizer = None, None
# self.stored_task_embs = nn.Parameter(torch.zeros(self.config.num_tasks, self.task_emb_dim)) # for Trainable
# self.register_buffer('stored_task_embs', torch.zeros(self.config.num_tasks, self.task_emb_dim)) # fixed
def apply_adapter_weights(self, adapter_weights):
encoder_params, decoder_params = adapter_weights[:self.config.encoder_layers], adapter_weights[self.config.encoder_layers:]
d_model = self.config.d_model
d_adapter = self.config.adapter_dim
for p, encoder_layer in zip(encoder_params, self.bart_model.encoders()):
# dw, db: down weight, down bias
# uw, ub: up weight, up bias
dw, uw, db, ub = p[0:d_model*d_adapter], \
p[d_model*d_adapter:d_model*d_adapter*2], \
p[d_model*d_adapter*2:d_model*d_adapter*2+d_adapter], \
p[d_model*d_adapter*2+d_adapter:d_model*d_adapter*2+d_adapter+d_model]
encoder_layer.adapter_down_weight = dw.view(d_model, d_adapter)
encoder_layer.adapter_down_bias = db.view(d_adapter)
encoder_layer.adapter_up_weight = uw.view(d_adapter, d_model)
encoder_layer.adapter_up_bias = ub.view(d_model)
if self.config.adapt_layer_norm:
encoder_layer.self_attn_layer_norm.weight.data = encoder_layer.self_attn_layer_norm.weight.data + p[-2*d_model: -1*d_model]
encoder_layer.self_attn_layer_norm.bias.data = encoder_layer.self_attn_layer_norm.bias.data + p[-1*d_model:]
for p, decoder_layer in zip(decoder_params, self.bart_model.decoders()):
dw, uw, db, ub = p[0:d_model*d_adapter], \
p[d_model*d_adapter:d_model*d_adapter*2], \
p[d_model*d_adapter*2:d_model*d_adapter*2+d_adapter], \
p[d_model*d_adapter*2+d_adapter:d_model*d_adapter*2+d_adapter+d_model]
decoder_layer.adapter_down_weight = dw.view(d_model, d_adapter)
decoder_layer.adapter_down_bias = db.view(d_adapter)
decoder_layer.adapter_up_weight = uw.view(d_adapter, d_model)
decoder_layer.adapter_up_bias = ub.view(d_model)
if self.config.adapt_layer_norm:
decoder_layer.self_attn_layer_norm.weight.data = decoder_layer.self_attn_layer_norm.weight.data + p[-2*d_model: -1*d_model]
decoder_layer.self_attn_layer_norm.bias.data = decoder_layer.self_attn_layer_norm.bias.data + p[-1*d_model:]
def forward(self, input_ids, attention_mask=None, encoder_outputs=None,
decoder_input_ids=None, decoder_attention_mask=None, decoder_cached_states=None,
use_cache=False, is_training=False, task_emb=None):
""""overwrite the bart.forward function"""
# assert task_emb.dim() == 1
# generated_weights = None
generated_weights = self.weight_generator(task_emb.unsqueeze(0))
# self.bart_model.set_adapter_weights(generated_weights)
self.apply_adapter_weights(adapter_weights=generated_weights)
ret = self.bart_model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, is_training=is_training, use_cache=use_cache)
return ret
def load_example_encoder(self):
tmp = KeyValueMemoryModule(self.logger)
self.example_encoder, self.example_tokenizer = tmp.load_key_encoder(memory_key_encoder=self.config.example_encoder_name)
def get_task_embeddings(self, dataloader):
# TODO: get the ids of the examples
# TODO: get the vectors of these ids
# TODO: aggreagte the vectors (with mean) to get a task embedding vector.
examples = dataloader.data
tmp = KeyValueMemoryModule(self.logger)
tmp.tokenizer = self.example_tokenizer
tmp.key_encoder = self.example_encoder
all_vectors = tmp.encode_examples_for_caching(examples, return_tensors=True)
all_vectors = torch.stack(all_vectors)
# print(all_vectors)
mean_embedding = torch.mean(all_vectors, 0)
# print(mean_embedding)
return mean_embedding
# def init_weight_generator(self):
# # make sure config has such attrs
# # config.encoder_layers
# # config.decoder_layers
# # config.activation_function
# # config.activation_function
# # config.generator_hdim
# # config.task_emb_dim
# # config.d_model
# # config.adapter_dim
# # config.adapt_layer_norm
# self.weight_generator = ParameterGenerator(self.config)
class HyperCL(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "hyper_cl"
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = ["adapter_dim", "example_encoder_name", "task_emb_dim"]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
def debugger_setup(self, debugger_args):
self.debugger_args = debugger_args
self._check_debugger_args()
model_type, base_model_path = self.base_model_args.model_type, self.base_model_args.base_model_path
# Set up the additional configs
config = BartWithAdapterConfig.from_pretrained(model_type)
config.adapter_dim = debugger_args.adapter_dim
config.adapt_layer_norm = False # debugger_args.adapt_layer_norm
# config.unfreeze_hyper_encoder = debugger_args.unfreeze_hyper_encoder
# config.num_tasks = len(self.all_bug_examples) # number of the overall examples in the error stream.
config.task_emb_dim = debugger_args.task_emb_dim # the dim of the CLS token embedding of the below model.
config.example_encoder_name = debugger_args.example_encoder_name
# Set up the HyperBart model
self.base_model = HyperBart(self.logger, config)
hyper_bart = self.base_model # make an alias to indicate the special arch.
hyper_bart.bart_model = MyBartWithAdapter(config)
hyper_bart.weight_generator = ParameterGenerator(config)
hyper_bart.load_example_encoder()
# Load the bart model of the HyperBart model.
self.logger.info(f"Loading checkpoint from {base_model_path} for {model_type} .....")
mybart_model = MyBart.from_pretrained(model_type, state_dict=convert_model_to_single_gpu(torch.load(base_model_path)))
hyper_bart.bart_model.model.load_state_dict(mybart_model.model.state_dict(), strict=False)
# TODO: load the cache of both bart and the weight generator
if self.use_cuda:
# Enable multi-gpu training.
hyper_bart.to(torch.device("cuda"))
self.logger.info("Moving to the GPUs.")
if self.n_gpu > 1:
hyper_bart = torch.nn.DataParallel(hyper_bart)
hyper_bart = hyper_bart.module if self.n_gpu > 1 else hyper_bart
# TODO: set up the memory for the "task embedding"
self.stored_task_embs = None
## we can assume that we have a pre-defined number of incoming examples.
## pre-computed by a frozen bert for each example.
## set up a method to extend the look-up table.
# Set up the optimizer.
no_decay = ['bias', 'LayerNorm.weight']
self.optimizer_grouped_parameters = [
# Note that we only update the hypernetwork.
{'params': [p for n, p in hyper_bart.weight_generator.decoders.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': debugger_args.weight_decay},
{'params': [p for n, p in hyper_bart.weight_generator.decoders.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
self.optimizer = AdamW(self.optimizer_grouped_parameters,
lr=debugger_args.learning_rate, eps=debugger_args.adam_epsilon)
# TODO: double check the decision about warm up for fine-tuning
self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=debugger_args.warmup_steps,
num_training_steps=debugger_args.total_steps)
self.logger.info(f"Debugger Setup ...... Done!")
return
def load_base_model(self, base_model_args, mode="online_debug"):
self.base_model_args = base_model_args
if mode=="offline_eval":
model_type, base_model_path = self.base_model_args.model_type, self.base_model_args.base_model_path
# Set up the additional configs
config = BartWithAdapterConfig.from_pretrained(model_type)
config.adapter_dim = self.debugger_args.adapter_dim
config.adapt_layer_norm = False # self.debugger_args.adapt_layer_norm
# config.unfreeze_hyper_encoder = debugger_args.unfreeze_hyper_encoder
# config.num_tasks = len(self.all_bug_examples) # number of the overall examples in the error stream.
config.task_emb_dim = self.debugger_args.task_emb_dim # the dim of the CLS token embedding of the below model.
config.example_encoder_name = self.debugger_args.example_encoder_name
# Set up the HyperBart model
self.base_model = HyperBart(self.logger, config)
else:
pass # the base_model is initiated in the debugger_setup
return
def fix_bugs(self, bug_loader, quiet=True):
# set the states of the hypernetwork and the base model for inference
self.base_model.train()
train_losses = []
global_step = 0
pad_token_id = self.tokenizer.pad_token_id
hyper_bart = self.base_model # alias
task_emb = hyper_bart.get_task_embeddings(bug_loader)
self.base_model.train()
train_losses = []
global_step = 0
for epoch_id in range(int(self.debugger_args.num_epochs)):
for batch in tqdm(bug_loader.dataloader, desc=f"Bug-fixing Epoch {epoch_id}", disable=quiet):
global_step += 1
# here the batch is a mini batch of the current bug batch
if self.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = self.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
loss = hyper_bart(task_emb=task_emb, input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
train_losses.append(loss.detach().cpu())
loss.backward()
self.model_update_steps += 1
if global_step % self.debugger_args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
hyper_bart.parameters(), self.debugger_args.max_grad_norm)
self.optimizer.step() # We have accumulated enough gradients
self.scheduler.step()
hyper_bart.zero_grad()
return
def get_task_split_for_inference(self):
pass
def evaluate(self, eval_dataloader=None, verbose=False):
"""Evaluates the performance"""
if not eval_dataloader:
eval_dataloader = self.submission_eval_loaders[self.timecode]
# prepare adapt_dataloaders
adapt_dataloaders = self.get_adapt_dataloaders(eval_dataloader, verbose=True)
predictions = self.base_model_infer_with_adaptation(eval_dataloader, adapt_dataloaders, verbose)
assert len(predictions) == len(eval_dataloader)
predictions = [p.strip() for p in predictions]
results, return_all = evaluate_func(
predictions, eval_dataloader.data, self.metric, return_all=True)
return predictions, results, return_all
|
CMR-main
|
cmr/debug_algs/cl_hypernet_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from datetime import time
from logging import disable
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
import numpy as np
import torch
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from transformers import (AdamW, BartConfig, BartTokenizer,
get_linear_schedule_with_warmup)
from tqdm import tqdm
class NoneCL(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "none_cl"
def _check_debugger_args(self):
return
def debugger_setup(self, debugger_args):
self.logger.info(f"No debugger!")
self.debugger_args = debugger_args
self._check_debugger_args()
return
def fix_bugs(self, bug_loader, quiet=True):
# bug_dataloader is from self.bug_loaders
self.logger.info("No debugging at all.")
return
class OfflineCL(NoneCL):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "none_cl_offline_eval"
def _check_debugger_args(self):
return
def online_debug(self):
self.logger.info("Start Online Debugging with Dynamic Error Mode")
self.logger.info(f"Number of Batches of Data: {self.num_data_batches}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
# if self.debugger_args.save_ckpt_freq:
# # save the initial model as the 0-th model.
# self._save_base_model()
data_args = self.data_args
bug_eval_loaders = []
for data_batch in tqdm(self.data_stream, desc="Creating the data loaders."):
data_batch += [item for item in data_batch if item["init_status"] == "error"] # keep only the initial errors
formatted_data_batch = self.data_formatter(data_batch)
_, eval_data_dataloader = self.get_dataloader(
data_args, formatted_data_batch, mode="eval")
bug_eval_loaders.append(eval_data_dataloader)
for bug_eval_loader, data_eval_loader in tqdm(zip(bug_eval_loaders, self.data_eval_loaders), desc="Online Evaluation"):
result_dict = {"timecode": self.timecode} # start with 0
if self.timecode+1 == len(self.data_eval_loaders):
self.eval_knowledge_retention(result_dict)
self.eval_knowledge_generalization(result_dict)
# self._replay_based_eval(result_dict)
_ = self._get_dynamic_errors(data_eval_loader, result_dict, return_raw_bug_examples=True) # we don't need the dataloader and empty cause false
# bug_eval_loader = bug_eval_loaders[self.timecode]
self.evaluate_error_fixing(result_dict, bug_eval_loader)
self._update_result_dict(result_dict)
# if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
# # self._save_base_model()
# self.save_result_file()
self.logger.info("-"*50)
self.timecode += 1
#### Final evaluation ####
self.final_evaluation()
#### Save the final model ####
self._save_base_model()
|
CMR-main
|
cmr/debug_algs/cl_none.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import random
from cmr.benchmark_gen import sample_stream_data
from cmr.task_manager.eval_metrics import evaluate_func
def create_training_stream(args, logger):
assert not args.use_dev_stream
# setattr(data_args, "data_stream_json_path", args.data_stream_json_path)
# setattr(data_args, "replay_stream_json_path", args.replay_stream_json_path)
# with open(data_args.data_stream_json_path) as f:
# data_stream = json.load(f)
upstream_truth_data = []
with open(args.upstream_data_file) as fin:
lines = fin.readlines()
for line in lines:
# d = line.strip().split("\t")
# truth_data.append((d[0], d[1:]))
d = json.loads(line)
upstream_truth_data.append((d["input"], d["output"], d["id"]))
with open(args.upstream_data_prediction_file, "r") as f:
M0_predictions = json.load(f)
logger.info(f"len(predictions): {len(M0_predictions)}")
logger.info(f"len(upstream_truth_data]): {len(upstream_truth_data)}")
results, results_all = evaluate_func(
M0_predictions, upstream_truth_data , "EM|QA-F1", return_all=True)
logger.info(f"Upstream evaluation results: {results}")
bug_pool, pass_pool = sample_stream_data.generate_bugs(M0_predictions, upstream_truth_data, results_all, f1_upper_bound=1.0)
logger.info(f"len(bug_pool)={len(bug_pool)}")
logger.info(f"len(pass_pool)={len(pass_pool)}")
# TODO: add some pass_pool examples in bug pool?
sampled_M0_errors = random.sample(bug_pool, args.train_stream_length * args.train_stream_episode_size)
sampled_init_memory = random.sample(pass_pool, args.init_memory_size)
sampled_train_stream = sample_stream_data.get_data_stream(
sampled_M0_errors, args.train_stream_episode_size, args.train_stream_length, use_score=False)
# randomly sorted bugs
return sampled_init_memory, sampled_train_stream
def create_training_stream_with_dev(args, logger):
assert args.use_dev_stream
dev_memory = []
with open(args.dev_memory) as f:
for line in f.read().splitlines():
d = json.loads(line)
dev_memory.append(d)
sampled_init_memory = random.sample(dev_memory, args.init_memory_size)
with open(args.dev_stream) as f:
dev_stream = json.load(f)
dev_stream_examples = []
# print(len(dev_stream))
for batch in dev_stream:
for item in batch:
# print(item.keys())
dev_stream_examples.append(item)
# print(dev_stream_examples[:3])
# print(len(dev_stream_examples))
random.shuffle(dev_stream_examples)
sampled_M0_errors = random.sample(dev_stream_examples, args.train_stream_length * args.train_stream_episode_size)
sampled_train_stream = sample_stream_data.get_data_stream(
sampled_M0_errors, args.train_stream_episode_size, args.train_stream_length, use_score=False)
# randomly sorted bugs
return sampled_init_memory, sampled_train_stream
|
CMR-main
|
cmr/debug_algs/distant_supervision/ds_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
"""
This script is used to get the training data for learning a retriever that can get back the most forgettable examples given a batch of error cases to fix.
Input:
- The training streams. ---> get the error cases.
- model.
Output:
- The pairs between error cases and associated forgettable examples.
Key logic:
- Use the simple_CL method and put it work on the training streams (can be randomly sampled.)
- For each episode, before and after the error-fixing (continual fine-tuning) step, we record the forgetted the examples.
"""
import copy
import pickle
from cmr.debug_algs.cl_utils import _keep_first_answer
from cmr.debug_algs.distant_supervision.ds_utils import create_training_stream, create_training_stream_with_dev
from cmr.debug_algs.index_based.index_manager import RandomMemoryManger
from cmr.debug_algs.index_based.index_utils import get_bart_dual_representation
from cmr.models import run_bart
from cmr.models.utils import set_seeds, trim_batch
import torch
from scipy.stats.stats import describe
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
import random
from tqdm import tqdm
from cmr.debug_algs import run_lifelong_finetune
from cmr.benchmark_gen import sample_stream_data
import json
from cmr.task_manager.eval_metrics import evaluate_func
from collections import OrderedDict
from operator import getitem
class MiningSupervision(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "simple_ds_mine"
self.init_model = None
def _check_data_args(self, additional_args):
pass
def compute_MIR_scores(self, before_model, after_model, examples):
_examples = _keep_first_answer(examples)
mlr_data_args = copy.deepcopy(self.data_args)
mlr_data_args.predict_batch_size = 4 # TODO: set an arg.
memory_buffer_loader, _ = self.get_dataloader(
mlr_data_args, _examples, mode="train", is_training=False) # fix of the order
before_losses = run_bart.inference(
before_model, memory_buffer_loader, compute_loss=True, loss_only=True, logger=self.logger)
after_losses = run_bart.inference(
after_model, memory_buffer_loader, compute_loss=True, loss_only=True, logger=self.logger)
MIR_scores = {}
for example, before_loss, after_loss in zip(examples, before_losses, after_losses):
loss_delta = after_loss - before_loss
MIR_scores[example[2]] = loss_delta # id, score.
return MIR_scores
def get_pos_neg_results(self, examples, scores, positive_size=8, negative_size=8):
examples_dict = {ex[2]: ex for ex in examples}
sorted_scores = sorted(scores.items(), key = lambda x:x[1], reverse = True)
pos_ids = [x[0] for x in sorted_scores[:positive_size]]
neg_ids = [x[0] for x in sorted_scores[-negative_size:]]
positive_results = [examples_dict[ex_id] for ex_id in pos_ids]
negative_results = [examples_dict[ex_id] for ex_id in neg_ids]
return positive_results, negative_results
def wrap_supervision(self, before_model, after_model, query_examples, positive_results, negative_results):
cl_trainer = self
tokenizer = self.tokenizer
data_args = copy.deepcopy(self.data_args)
data_args.predict_batch_size = 4
# TODO: two options here:
if self.all_args.long_term_delta:
# Optional: using the delta versus the init model
self.logger.info("Using initial model as the before model for computing query vecs.")
before_model = self.init_model
supervision = {}
supervision["mode"] = "all_hiddens" if self.all_args.save_all_hiddens else "mean_reps"
supervision["positive"] = {}
supervision["negative"] = {}
top_examples = []
if self.all_args.save_all_hiddens:
supervision["query_before"] = {}
supervision["query_after"] = {}
query_hiddens_before = get_bart_dual_representation(cl_trainer, before_model, tokenizer, data_args, query_examples, return_all_hidden=True)
query_hiddens_after = get_bart_dual_representation(cl_trainer, after_model, tokenizer, data_args, query_examples, return_all_hidden=True)
positive_hiddens = get_bart_dual_representation(cl_trainer, self.init_model, tokenizer, data_args, positive_results, return_all_hidden=True)
negative_hiddens = get_bart_dual_representation(cl_trainer, self.init_model, tokenizer, data_args, negative_results, return_all_hidden=True)
for ind, example in enumerate(query_examples):
supervision["query_before"][example[2]] = {k: v[ind] for k, v in query_hiddens_before.items()}
supervision["query_after"][example[2]] = {k: v[ind] for k, v in query_hiddens_after.items()}
for ind, example in enumerate(positive_results):
supervision["positive"][example[2]] = {k: v[ind] for k, v in positive_hiddens.items()}
for ind, example in enumerate(negative_hiddens):
supervision["negative"][example[2]] = {k: v[ind] for k, v in negative_hiddens.items()}
else:
supervision["query"] = {}
query_vectors_before = get_bart_dual_representation(cl_trainer, before_model, tokenizer, data_args, query_examples)
query_vectors_after = get_bart_dual_representation(cl_trainer, after_model, tokenizer, data_args, query_examples)
assert len(query_vectors_before) == len(query_vectors_after) == len(query_examples)
for example, q1, q2 in zip(query_examples, query_vectors_before, query_vectors_after):
supervision["query"][example[2]] = list(q1) + list(q2) # concat
positive_vectors = get_bart_dual_representation(cl_trainer, self.init_model, tokenizer, data_args, positive_results)
negative_vectors = get_bart_dual_representation(cl_trainer, self.init_model, tokenizer, data_args, negative_results)
for example, vector in zip(positive_results, positive_vectors):
supervision["positive"][example[2]] = list(vector)
top_examples.append(example)
for example, vector in zip(negative_results, negative_vectors):
supervision["negative"][example[2]] = list(vector)
return supervision, top_examples
def mine_supervision(self, memory_manager=None, all_args=None):
self.all_args = all_args
self.logger.info("Start Mining Distant Supervision (as online debugging).")
sub_stream_dataloaders = self.data_eval_loaders
self.logger.info(f"Number of Batches of Data: {len(sub_stream_dataloaders)}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
mined_supervision = []
for data_eval_loader in tqdm(sub_stream_dataloaders, desc="Mining Supervision from Dynamic Error Stream"):
episode_data = data_eval_loader.data
bug_train_loader, _ = self.get_dataloader(
self.data_args, episode_data, mode="train")
# TODO: this is actually not errors for M_t, it is just M_0's errors
model_copy = copy.deepcopy(self.base_model)
############### CORE ###############
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start error-fixing .... Timecode: {self.timecode}")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start error-fixing .... Done!")
############### CORE ###############
updated_model = self.base_model
sampled_examples = memory_manager.retrieve_from_memory(sample_size=all_args.mir_buffer_size)
MIR_scores = self.compute_MIR_scores(model_copy, updated_model, sampled_examples)
self.timecode += 1
positive_results, negative_results = self.get_pos_neg_results(sampled_examples,
MIR_scores, positive_size=all_args.positive_size, negative_size=all_args.negative_size)
supervision, top_examples = self.wrap_supervision(model_copy, updated_model, episode_data, positive_results, negative_results)
self.logger.info(f"Get an instance for supervision at {self.timecode}")
mined_supervision.append(supervision)
memory_manager.store_examples(episode_data)
# update with the sampled examples
self.base_model = model_copy
self.reset_optimizer()
mixed_data = episode_data + top_examples
mixed_bug_train_loader, _ = self.get_dataloader(
self.data_args, mixed_data, mode="train")
self.fix_bugs(mixed_bug_train_loader) # for debugging
# del model_copy
return mined_supervision
# if self.debugger_args.save_ckpt_freq:
# self._save_base_model()
if __name__ == '__main__':
parser = run_lifelong_finetune.get_cli_parser()
parser.add_argument("--upstream_data_file", type=str,
default="data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl",
help="the path to upstream data")
parser.add_argument("--upstream_data_prediction_file", type=str, # by the initial model M_0
default="bug_data/mrqa_naturalquestions_train.predictions.jsonl",
help="the path to initial model's predictions on the upstream data")
parser.add_argument("--dev_memory", type=str, # by the initial model M_0
default="exp_results/data_streams/mrqa.nq_train.memory.jsonl",
help="the path to initial model's predictions on the upstream data")
parser.add_argument("--dev_stream", type=str, # by the initial model M_0
default="exp_results/data_streams/mrqa.mixed.data_stream.test.json",
help="the path to initial model's predictions on the upstream data")
parser.add_argument("--output_supervision", type=str,
help="the path to save the thread results")
parser.add_argument('--train_stream_length', type=int, default=100)
parser.add_argument('--train_stream_episode_size', type=int, default=16)
parser.add_argument('--init_memory_size', type=int, default=10000)
parser.add_argument('--num_rounds', type=int, default=1)
parser.add_argument('--positive_size', type=int, default=8)
parser.add_argument('--negative_size', type=int, default=8)
parser.add_argument('--mir_buffer_size', type=int, default=256)
parser.add_argument('--use_dev_stream', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--long_term_delta', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--save_all_hiddens', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--debug_mode', default=True, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
args = parser.parse_args()
# debuggging
args.cl_method_name = "simple_ds_mine"
if args.debug_mode:
args.use_dev_stream = True
args.long_term_delta = True
assert args.cl_method_name == "simple_ds_mine"
## init the useful args ##
cl_supervision_miner, data_args, base_model_args, debugger_args, logger = run_lifelong_finetune.setup_args(
args)
setattr(data_args, "replay_stream_json_path", "")
## Init the cl_supervision_miner
cl_supervision_miner.load_base_model(base_model_args)
cl_supervision_miner.init_model = copy.deepcopy(cl_supervision_miner.base_model) # maintain M0
## Create Training Stream ##
for _rid in range(args.num_rounds):
logger.info(f"Starting Round {_rid} ....")
seeds = list(range(100000))
random.shuffle(seeds)
selected_seed = seeds[args.seed] # actually the index
logger.info(f"Active Seed = {selected_seed}")
set_seeds(selected_seed)
if not args.use_dev_stream:
initial_memory, sampled_train_stream = create_training_stream(args, logger)
else:
initial_memory, sampled_train_stream = create_training_stream_with_dev(args, logger)
## Init the RandomMemroy module ##
memory_manager = RandomMemoryManger(logger) # TODO: try the BART-base one?
formatted_initial_memory = cl_supervision_miner.data_formatter(initial_memory)
memory_manager.set_up_initial_memory(formatted_examples=formatted_initial_memory)
logger.info(f"Initial memory size: {memory_manager.get_memory_size()}")
cl_supervision_miner.load_data(data_args, given_data_stream=sampled_train_stream)
cl_supervision_miner.debugger_setup(debugger_args)
mined_supervision = cl_supervision_miner.mine_supervision(memory_manager, all_args=args)
path_to_save = args.output_supervision.replace(".pkl", f"-{_rid}.pkl")
with open(path_to_save, "wb") as f:
logger.info(f"Saving {f.name}")
pickle.dump(mined_supervision, f)
logger.info(f"Saving {f.name}...Done!")
logger.info(f"Finished Round {_rid} !")
"""
# debug
index=0
gpu=0
prefix=data_collection_simple_${thread}
log_file=exp_results/supervision_data/logs/run_${prefix}.log
CUDA_VISIBLE_DEVICES=${gpu} python cmr/debug_algs/distant_supervision/data_collection.py \
--cl_method_name simple_ds_mine \
--seed ${thread} \
--output_supervision "exp_results/supervision_data/simple_mir_dm/dm.${thread}.pkl" \
--learning_rate 3e-5 --num_train_epochs 5 --train_batch_size 10 \
--prefix ${prefix} \
--stream_mode dynamic \
--replay_stream_json_path "" \
--upstream_eval_data exp_results/data_streams/mrqa_naturalquestions_dev.hidden_passes.jsonl \
--save_ckpt_freq 0
> ${log_file} 2>&1
&
echo $log_file
"""
|
CMR-main
|
cmr/debug_algs/distant_supervision/data_collection.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch
from tqdm import tqdm
from transformers.modeling_bart import _prepare_bart_decoder_inputs
from transformers.tokenization_utils import trim_batch
import numpy as np
from cmr.debug_algs.cl_utils import _keep_first_answer
def masked_mean(reps, masks):
masks = masks.view(reps.size()[0], reps.size()[1], 1)
masked_reps = reps * masks
masked_reps_sum = masked_reps.sum(dim=1)
length_reps = masks.sum(dim=1).view(masked_reps_sum.size()[0], 1)
mean_reps = masked_reps_sum / length_reps
return mean_reps
def get_bart_dual_representation(cl_trainer, bart_model, tokenizer, data_args, examples, return_all_hidden=False, agg_method="mean"):
examples_with_single_ans = _keep_first_answer(examples)
data_manager, _ = cl_trainer.get_dataloader(data_args,
examples_with_single_ans,
mode="train",
is_training=False)
all_vectors = []
bart_model = bart_model if cl_trainer.n_gpu == 1 else bart_model.module
bart_model.eval()
all_hiddens = {"input_reps":[], "input_masks": [], "output_reps": [] , "output_masks": []}
for batch in tqdm(data_manager.dataloader, desc="Computing BART representation"):
# self.logger.info(f"len(batch)={len(batch)}")
if cl_trainer.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
# Encode the input text with BART-encoder
input_ids = batch[0]
input_attention_mask = batch[1]
encoder_outputs = bart_model.model.encoder(
input_ids, input_attention_mask)
x = encoder_outputs[0]
#
if agg_method == "mean":
x = masked_mean(x, input_attention_mask) # use the mean instead of the first
elif agg_method == "first":
x = x[:, 0, :]
input_vectors = x.detach().cpu().numpy()
# self.logger.info(f"input_vectors.shape = {input_vectors.shape}")
# Encode the output text with BART-decoder
output_ids = batch[2]
output_attention_mask = batch[3]
decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs(
bart_model.model.config,
input_ids,
decoder_input_ids=output_ids,
decoder_padding_mask=output_attention_mask,
causal_mask_dtype=bart_model.model.shared.weight.dtype,
)
decoder_outputs = bart_model.model.decoder(
decoder_input_ids,
encoder_outputs[0],
input_attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
decoder_cached_states=None,
use_cache=False
)
y = decoder_outputs[0]
if agg_method == "mean":
y = masked_mean(y, output_attention_mask) # use the mean instead of the first
elif agg_method == "first":
y = y[:, 0, :]
output_vectors = y.detach().cpu().numpy()
# self.logger.info(f"output_vectors.shape = {output_vectors.shape}")
# concatenate the vectors
vectors = np.concatenate([input_vectors, output_vectors], axis=1)
if return_all_hidden:
all_hiddens["input_reps"] += list(encoder_outputs[0].detach().cpu().numpy())
all_hiddens["output_reps"] += list(decoder_outputs[0].detach().cpu().numpy())
all_hiddens["input_masks"] += list(input_attention_mask.detach().cpu().numpy())
all_hiddens["output_masks"] += list(output_attention_mask.detach().cpu().numpy())
# self.logger.info(f"vectors.shape = {vectors.shape}")
all_vectors += list(vectors)
del batch
del encoder_outputs
del decoder_outputs
if return_all_hidden:
return all_hiddens
else:
return all_vectors
|
CMR-main
|
cmr/debug_algs/index_based/index_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/debug_algs/index_based/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from cmr.debug_algs.cl_utils import get_top_interfered_examples, get_virtual_updated_model
from cmr.debug_algs.index_based.IO_each_index import BartIOIndexManager
from cmr.debug_algs.index_based.biencoder import BiEncoderIndexManager
from cmr.debug_algs.index_based.index_manager import BartIndexManager, RandomMemoryManger
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import random
import numpy as np
import torch
import transformers
from cmr.task_manager.eval_metrics import evaluate_func
import copy
import pickle
import os
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from argparse import Namespace
import more_itertools
import json
class IndexBasedCL(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "tbd"
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = [
"replay_size",
"replay_candidate_size",
"replay_frequency",
"memory_store_rate", # 0, 0.1, 1 etc.
"upstream_sample_ratio",
"memory_path", # to save the memory module from disk
"use_replay_mix",
"init_memory_cache_path",
"index_rank_method"
]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
# assert self.debugger_args.index_rank_method in ["most_similar", "most_different"]
def debugger_setup(self, debugger_args):
super().debugger_setup(debugger_args)
self.memroy_module = None # if online is used seperately
self.upstream_memroy_module = None
def setup_bart_index():
mm = BartIndexManager(self.logger)
mm.set_up_data_args(self.data_args)
mm.data_args.predict_batch_size = 4
mm.load_encoder_model(self.base_model_args)
return mm
def setup_bart_io_index():
mm = BartIOIndexManager(self.logger)
mm.set_up_data_args(self.data_args)
mm.data_args.predict_batch_size = 4
mm.load_encoder_model(self.base_model_args)
return mm
def setup_biencoder():
with open(debugger_args.indexing_args_path) as f:
train_args_dict = json.load(f)
mm = BiEncoderIndexManager(self.logger)
mm.train_args = Namespace(**train_args_dict)
mm.set_up_data_args(self.data_args)
mm.data_args.predict_batch_size = 4
mm.load_encoder_model(
self.base_model_args,
mm.train_args.memory_encoder_path,
mm.train_args.query_encoder_path)
return mm
# Initializing the BartIndexManager
self.logger.info(f"indexing_method={debugger_args.indexing_method}")
self.name = f"index_cl_{debugger_args.indexing_method}"
if debugger_args.indexing_method == "bart_index":
self.logger.info("setup_bart_index")
self.upstream_memroy_module = setup_bart_index()
elif debugger_args.indexing_method == "bart_io_index":
self.logger.info("bart_io_index")
self.upstream_memroy_module = setup_bart_io_index()
elif debugger_args.indexing_method == "biencoder":
self.logger.info("biencoder")
self.upstream_memroy_module = setup_biencoder()
assert self.upstream_memroy_module is not None
if debugger_args.init_memory_cache_path:
self.upstream_memroy_module.load_memory_from_path(debugger_args.init_memory_cache_path)
else:
self.upstream_memroy_module.set_up_initial_memory(
formatted_examples=self.sampled_upstream_examples)
if self.debugger_args.upstream_sample_ratio < 0:
self.logger.info("upstream_sample_ratio < 0 ; self.memroy_module <---> self.upstream_memroy_module")
self.memroy_module = self.upstream_memroy_module
else:
self.logger.info("upstream_sample_ratio > 0 ; two seperate memory module")
if debugger_args.indexing_method == "bart_io_index":
self.memroy_module = setup_bart_io_index()
elif debugger_args.indexing_method == "biencoder":
self.memroy_module = setup_biencoder()
elif debugger_args.indexing_method == "bart_index":
self.memroy_module = setup_bart_index()
return
def online_debug(self):
self.logger.info("Start Online Debugging with Dynamic Error Mode")
self.logger.info(f"Number of Batches of Data: {self.num_data_batches}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
# save the initial model as the 0-th model.
self._save_base_model()
self.past_errors = []
self.past_submission = []
last_steps = 0
self.logger.info("Copying initial model")
initial_model = copy.deepcopy(self.base_model) # for the use of query
for data_eval_loader in tqdm(self.data_eval_loaders, desc="Online Debugging (with Index-based replay)"):
result_dict = {"timecode": self.timecode} # start with 0
self.eval_knowledge_retention(result_dict)
self.eval_knowledge_generalization(result_dict)
############### CORE ###############
# self._replay_based_eval(result_dict)
formatted_bug_examples = self._get_dynamic_errors(
data_eval_loader, result_dict, return_raw_bug_examples=True)
_, bug_eval_loader = self.get_dataloader(self.data_args, formatted_bug_batch=formatted_bug_examples, mode="eval")
examples_to_train = formatted_bug_examples[:]
# if (self.model_update_steps - last_steps) >= self.debugger_args.replay_frequency \
if self.timecode % self.debugger_args.replay_frequency == 0 \
and self.debugger_args.replay_frequency > 0 and self.debugger_args.replay_size > 0 \
and self.timecode > 0:
# sparse experience replay
self.logger.info("Triggering Sampling from Memory and starting to replay.")
self.logger.info(f"Current memroy_module size: {self.memroy_module.get_memory_size()}.")
if self.upstream_memroy_module:
self.logger.info(f"Current upstream_memroy_module size: {self.upstream_memroy_module.get_memory_size()}.")
if self.debugger_args.indexing_method == "biencoder":
# self.memroy_module.before_model = initial_model # if for longer-delta
self.upstream_memroy_module.before_model = initial_model
self.upstream_memroy_module.after_model = get_virtual_updated_model(self, bug_train_loader)
elif self.debugger_args.indexing_method == "bart_io_index":
# self.upstream_memroy_module.bart_model = initial_model
if self.debugger_args.upstream_sample_ratio > 0: # a seperate online memory module
self.memroy_module.bart_model = self.base_model
elif self.debugger_args.indexing_method == "bart_index":
# self.upstream_memroy_module.bart_model = initial_model
if self.debugger_args.upstream_sample_ratio > 0: # a seperate online memory module
self.memroy_module.bart_model = self.base_model
if self.debugger_args.use_mir:
assert self.debugger_args.replay_candidate_size >= self.debugger_args.replay_size
def mir_retrieve(mm, sample_size):
effective_cand_size = min(self.debugger_args.replay_candidate_size, mm.get_memory_size())
self.logger.info(f"effective_cand_size={effective_cand_size}")
each_sample_size = int(effective_cand_size*1.1/sample_size)
self.logger.info(f"each_sample_size={each_sample_size}")
assert effective_cand_size >= self.debugger_args.replay_size
retrieved_examples_candidates = mm.retrieve_from_memory(
query_examples=formatted_bug_examples,
sample_size=effective_cand_size,
rank_method=self.debugger_args.index_rank_method,
agg_method="each_topk_then_random",
each_sample_size=each_sample_size,
each_sim_sample_size=min(each_sample_size*5, mm.get_memory_size()), # only used for the bart-IO
)
if "mir_buffer_ids" not in result_dict:
result_dict["mir_buffer_ids"] = []
result_dict["mir_buffer_ids"] += [_id for (_input, _truth, _id) in retrieved_examples_candidates]
retrieved_examples = get_top_interfered_examples(self,
K=sample_size, candidate_examples=retrieved_examples_candidates, query_data_loader=bug_train_loader)
return retrieved_examples
if self.debugger_args.upstream_sample_ratio > 0:
upstream_sample_budget = int(self.debugger_args.upstream_sample_ratio * self.debugger_args.replay_size)
self.logger.info(f"Memory from upstream_memroy_module = {upstream_sample_budget}; ")
self.logger.info(f"Memory from memroy_module = {self.debugger_args.replay_size-upstream_sample_budget}; ")
retrieved_examples = []
if upstream_sample_budget > 0:
retrieved_examples += mir_retrieve(mm=self.upstream_memroy_module,
sample_size=upstream_sample_budget)
retrieved_examples += mir_retrieve(mm=self.memroy_module,
sample_size=self.debugger_args.replay_size-upstream_sample_budget)
else:
retrieved_examples = mir_retrieve(mm=self.memroy_module, sample_size=self.debugger_args.replay_size)
else:
each_sample_size=5
each_sim_sample_size=30
retrieved_examples = []
upstream_sample_budget = 0
if self.debugger_args.upstream_sample_ratio > 0:
upstream_sample_budget = int(self.debugger_args.upstream_sample_ratio * self.debugger_args.replay_size)
self.logger.info(f"Memory from upstream_memroy_module = {upstream_sample_budget}; ")
self.logger.info(f"Memory from memroy_module = {self.debugger_args.replay_size-upstream_sample_budget}; ")
retrieved_examples += self.upstream_memroy_module.retrieve_from_memory(
query_examples=formatted_bug_examples,
sample_size=upstream_sample_budget,
agg_method="each_topk_then_random",
rank_method=self.debugger_args.index_rank_method,
each_sample_size=each_sample_size, each_sim_sample_size=each_sim_sample_size)
retrieved_examples += self.memroy_module.retrieve_from_memory(
query_examples=formatted_bug_examples,
sample_size=self.debugger_args.replay_size-upstream_sample_budget,
agg_method="each_topk_then_random",
rank_method=self.debugger_args.index_rank_method,
each_sample_size=each_sample_size, each_sim_sample_size=each_sample_size*5)
# self.logger.info(f"retrieved_examples (index)={retrieved_examples}")
result_dict["retrieved_ids"] = [_id for (_input, _truth, _id) in retrieved_examples]
if self.debugger_args.use_replay_mix:
examples_to_train += retrieved_examples
self.logger.info(
f"Mixed the retrieved examples (len={len(retrieved_examples)}) to the current batch for training.")
else:
self.logger.info(
f"Replay-Training Start! Using the retrieved examples (len={len(retrieved_examples)}) ")
replay_data_loader, _ = self.get_dataloader(
self.data_args, retrieved_examples, mode="train")
self.fix_bugs(replay_data_loader, quiet=False) # sparse replay
self.logger.info("Replay-Training done.")
last_steps = self.model_update_steps
# Fix the bugs by mini-batch based "training"
self.logger.info(
f"Start error-fixing (len(examples_to_train)={len(examples_to_train)}) .... Timecode: {self.timecode}")
bug_train_loader, _ = self.get_dataloader(
self.data_args, examples_to_train, mode="train")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start error-fixing .... Done!")
flag_store_examples = True
if flag_store_examples:
self.logger.info(
f"Saving the current error examples (len={len(formatted_bug_examples)}) to the memory.")
self.logger.info(f"Current memroy_module size: {self.memroy_module.get_memory_size()}.")
if self.upstream_memroy_module:
self.logger.info(f"Current upstream_memroy_module size: {self.upstream_memroy_module.get_memory_size()}.")
self.memroy_module.store_examples(formatted_bug_examples)
self.logger.info("Finished.")
############### CORE ###############
self.evaluate_error_fixing(result_dict, bug_eval_loader)
self._update_result_dict(result_dict)
if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
self._save_base_model()
self.save_result_file()
self.logger.info("-"*50)
self.timecode += 1
#### Final evaluation ####
self.final_evaluation()
#### Save the final model ####
self._save_base_model()
# Save to path
self.memroy_module.save_memory_to_path(self.debugger_args.memory_path)
|
CMR-main
|
cmr/debug_algs/index_based/cl_indexed_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from cmr.debug_algs.cl_utils import _keep_first_answer
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import torch
from cmr.debug_algs.index_based.index_utils import get_bart_dual_representation
from cmr.models.utils import trim_batch
import json
from transformers.modeling_bart import _prepare_bart_decoder_inputs
import numpy as np
import faiss
import pickle
import random
class BaseMemoryManager():
def __init__(self, logger):
super().__init__()
self.logger = logger
self.name = "base_memory_manager"
self.memory_examples = {}
def get_memory_size(self):
return len(self.memory_examples)
def _load_init_memory_examples(self, initial_memory_path="", formatted_examples=None, cut_off=None):
assert len(self.memory_examples) == 0
if initial_memory_path:
with open(initial_memory_path) as f:
initial_memory_examples = [json.loads(line)
for line in set(f.read().splitlines())][:]
initial_memory_examples = self.cl_utils.upstream_data_formatter(initial_memory_examples)
elif formatted_examples:
initial_memory_examples = formatted_examples
for item in initial_memory_examples:
# Note that we only keep the all answers here now.
self.memory_examples[item[2]] = (item[0], item[1], item[2])
self.logger.info(f"Set up the initial memory with {len(self.memory_examples)} examples.")
def set_up_initial_memory(self, initial_memory_path="", formatted_examples=None):
raise NotImplementedError
def load_memory_from_path(self, init_memory_cache_path):
raise NotImplementedError
def save_memory_to_path(self, memory_pkl_path):
raise NotImplementedError
def retrieve_from_memory(self, query_examples, sample_size, **kwargs):
raise NotImplementedError
def store_examples(self, examples):
raise NotImplementedError
class RandomMemoryManger(BaseMemoryManager):
### Mainly used for ER, MIR
def __init__(self, logger):
super().__init__(logger=logger)
self.logger = logger
self.name = "random_memory_manager"
self.memory_examples = {}
def set_up_initial_memory(self, initial_memory_path="", formatted_examples=None, cut_off=None):
self._load_init_memory_examples(initial_memory_path, formatted_examples, cut_off=None)
def load_memory_from_path(self, init_memory_cache_path):
with open(init_memory_cache_path, "rb") as f:
memory_cache = pickle.load(f)
self.logger.info(f"Load the cache to {f.name}")
self.memory_examples = memory_cache["memory_examples"]
def save_memory_to_path(self, memory_pkl_path):
memory_cache = {}
memory_cache["memory_examples"] = self.memory_examples
with open(memory_pkl_path, "wb") as f:
pickle.dump(memory_cache, f)
self.logger.info(f"Saved the cache to {f.name}")
def retrieve_from_memory(self, query_examples=None, sample_size=-1, **kwargs):
assert sample_size > 0
sample_size = min(sample_size, self.get_memory_size())
self.logger.info("Randomly retrieve from the memory. `query_examples` not used")
retrieved_example_ids = random.sample(list(self.memory_examples.keys()), sample_size)
retrieved_examples = [self.memory_examples[rid] for rid in retrieved_example_ids]
return retrieved_examples
def store_examples(self, examples):
for item in examples:
# Note that we only keep the all answers here now.
self.memory_examples[item[2]] = (item[0], item[1], item[2])
self.logger.info(f"Save {len(examples)} examples to the memory.")
class BartIndexManager(BaseMemoryManager):
def __init__(self, logger):
super().__init__(logger=logger)
self.logger = logger
self.name = "bart_index_manager"
self.memory_index = None
self.memory_examples = {}
self.bart_model = None
self.tokenizer = None
self.cl_utils = ContinualFinetuning(logger=logger)
self.data_args = None
self.dim_vector = 2*768
self.memory_index_sorted_ids = []
def set_up_data_args(self, args):
self.data_args = Namespace(
do_lowercase=args.do_lowercase,
append_another_bos=args.append_another_bos,
max_input_length=args.max_input_length,
max_output_length=args.max_output_length,
task_name=args.task_name,
train_batch_size=args.train_batch_size,
predict_batch_size=args.predict_batch_size,
)
def set_up_initial_memory(self, initial_memory_path="", formatted_examples=None, cut_off=None):
assert self.bart_model is not None
self._load_init_memory_examples(initial_memory_path, formatted_examples)
# build index
initial_memory_example_ids = sorted(list(self.memory_examples.keys()))[:cut_off]
examples = self.get_examples_by_ids(initial_memory_example_ids)
vectors = self.get_representation(examples)
self.update_index(initial_memory_example_ids, vectors)
def update_index(self, example_ids, vectors):
assert len(example_ids) == len(vectors)
if not self.memory_index:
self.memory_index = faiss.IndexFlatL2(self.dim_vector)
self.memory_index_sorted_ids += example_ids
# for ex_id in example_ids:
# self.memory_examples[ex_id]["memory_index_id"] = len(self.memory_index_sorted_ids)
# self.memory_index_sorted_ids.append(ex_id)
vectors = np.array(vectors)
faiss.normalize_L2(vectors)
self.memory_index.add(vectors)
def set_up_model(self, model, tokenizer):
del self.bart_model
del self.tokenizer
self.bart_model = model
self.tokenizer = tokenizer
def get_examples_by_ids(self, example_ids):
return [self.memory_examples[eid] for eid in example_ids]
def load_memory_from_path(self, init_memory_cache_path):
with open(init_memory_cache_path, "rb") as f:
memory_cache = pickle.load(f)
self.logger.info(f"Load the cache to {f.name}")
self.memory_index_sorted_ids = memory_cache["memory_index_sorted_ids"]
self.memory_index = memory_cache["memory_index"]
self.memory_examples = memory_cache["memory_examples"]
def save_memory_to_path(self, memory_pkl_path):
memory_cache = {}
memory_cache["memory_index_sorted_ids"] = self.memory_index_sorted_ids
memory_cache["memory_index"] = self.memory_index
memory_cache["memory_examples"] = self.memory_examples
with open(memory_pkl_path, "wb") as f:
pickle.dump(memory_cache, f)
self.logger.info(f"Saved the cache to {f.name}")
def search_index(self, query_vector, k=5):
q = np.array([query_vector])
faiss.normalize_L2(q)
D, I = self.memory_index.search(q, k)
retrieved_example_ids = [self.memory_index_sorted_ids[int(eid)] for eid in I[0]]
scores = [float(s) for s in D[0]]
return retrieved_example_ids, scores
def get_query_representation(self, query_examples):
return self.get_representation(query_examples)
def retrieve_from_memory(self, query_examples, sample_size, **kwargs):
input_vectors = self.get_query_representation(query_examples)
agg_method = kwargs.get("agg_method", "each_topk_then_random")
rank_method = kwargs.get("rank_method", "most_similar")
if agg_method == "each_topk_then_random":
each_sample_size = kwargs.get("each_sample_size", 5)
retrieved_example_ids = []
retrieved_scores = []
for query_vector in input_vectors:
ids, scores = self.search_index(query_vector, each_sample_size)
retrieved_example_ids += ids
retrieved_scores += scores
# retrieved_example_ids = set(retrieved_example_ids) # TODO: decide later.
# retrieved_example_ids = random.sample(retrieved_example_ids, sample_size)
sorted_retrieved_example_ids = [x for _, x in sorted(zip(retrieved_scores, retrieved_example_ids), reverse=False)]
retrieved_examples = self.get_examples_by_ids(sorted_retrieved_example_ids)
self.logger.info(f"index_manager.retrieve_from_memory --> len(retrieved_examples)={len(retrieved_examples)}")
return retrieved_examples[:sample_size]
def store_examples(self, examples):
example_ids = []
for item in examples:
self.memory_examples[item[2]] = item
example_ids.append(item[2])
vectors = self.get_representation(examples)
self.update_index(example_ids, vectors)
def get_representation(self, examples):
all_vectors = get_bart_dual_representation(cl_trainer=self.cl_utils,
bart_model=self.bart_model,
tokenizer=self.tokenizer,
data_args=self.data_args,
examples=examples,
agg_method="mean")
return all_vectors
def load_encoder_model(self, base_model_args):
self.cl_utils.load_base_model(base_model_args)
self.set_up_model(model=self.cl_utils.base_model, tokenizer=self.cl_utils.tokenizer)
if __name__ == '__main__':
from cmr.debug_algs import run_lifelong_finetune
parser = run_lifelong_finetune.get_cli_parser()
args = parser.parse_args()
debugging_alg, data_args, base_model_args, debugger_args, logger = run_lifelong_finetune.setup_args(
args)
args.predict_batch_size = 8
index_manager = BartIndexManager(logger=logger)
index_manager.set_up_data_args(args)
index_manager.load_encoder_model(base_model_args)
# index_manager.initial_memory_path = "exp_results/data_streams/mrqa.nq_train.memory.jsonl"
index_manager.initial_memory_path = "data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl"
index_manager.set_up_initial_memory(index_manager.initial_memory_path, cut_off=None)
index_manager.save_memory_to_path("exp_results/data_streams/bart_index.upstream_memory.full.pkl")
# copy_item = [0,0,0]
# copy_item[2] = "mrqa_naturalquestions-train-10-copy"
# copy_item[0] = "Context: The movie was shot in LA and the Hawaiian islands of Bologno and Kologno between March 3 , 2020 and May 25 , 2020 . The movie is deliberately vague about which Hawaiian island its latter portion depicts ; thus , the characters hike across a rope bridge on Bologno and arrive in the next scene at a spectacular waterfall on Kologno , rather than the ordinary irrigation dam and pond on Bologno where the actual trail terminates . | Question: which did they hike in just go with it ?"
# copy_item[1] = ['Kauai ', 'Maui .']
# index_manager.store_examples([copy_item])
# # sanity check #
# # index_manager.load_memory_from_path("exp_results/data_streams/bart_index.init_memory.pkl")
# query_ids = ["mrqa_naturalquestions-train-10", "mrqa_naturalquestions-train-10600"]
# print(query_ids)
# print(index_manager.memory_examples[query_ids[0]])
# retrieved_exmaples = index_manager.retrieve_from_memory(query_examples=[
# index_manager.memory_examples[qid] for qid in query_ids], sample_size=15, each_sample_size=10, rank_method="most_similar", agg_method="each_topk_then_random")
# for item in retrieved_exmaples:
# print("-"*50)
# print(item[2])
# print(item[0])
# print(item[1])
|
CMR-main
|
cmr/debug_algs/index_based/index_manager.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from cmr.debug_algs.cl_utils import _keep_first_answer
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import torch
from cmr.debug_algs.index_based.index_manager import BartIndexManager, BaseMemoryManager
from cmr.debug_algs.index_based.index_utils import get_bart_dual_representation
from cmr.models.utils import trim_batch
import json
from transformers.modeling_bart import _prepare_bart_decoder_inputs
import numpy as np
import faiss
import pickle
import random
from scipy.spatial import distance
class BartIOIndexManager(BartIndexManager):
def __init__(self, logger):
super().__init__(logger=logger)
self.logger = logger
self.name = "bart_io_index_manager"
self.memory_index = {"input": None, "output": None}
self.dim_vector = 768
def set_up_data_args(self, args):
self.data_args = Namespace(
do_lowercase=args.do_lowercase,
append_another_bos=args.append_another_bos,
max_input_length=args.max_input_length,
max_output_length=args.max_output_length,
task_name=args.task_name,
train_batch_size=args.train_batch_size,
predict_batch_size=args.predict_batch_size,
)
def update_index(self, example_ids, vectors):
assert len(example_ids) == len(vectors)
if not self.memory_index["input"]:
self.memory_index["input"] = faiss.IndexFlatL2(self.dim_vector)
self.memory_index["output"] = faiss.IndexFlatL2(self.dim_vector)
self.memory_index_sorted_ids += example_ids
## add to input
input_vectors = np.array([v[:self.dim_vector] for v in vectors])
self.memory_index["input"].add(input_vectors)
## add to output
output_vectors = np.array([v[self.dim_vector:] for v in vectors])
self.memory_index["output"].add(output_vectors)
def search_index(self, query_vector, k=5, partition="input", return_index_ids=False):
if partition=="input":
query_vector = query_vector[:self.dim_vector]
elif partition=="output":
query_vector = query_vector[self.dim_vector:]
D, I = self.memory_index[partition].search(np.array([query_vector]), k)
scores = D[0]
if return_index_ids:
return I[0]
else:
retrieved_example_ids = [self.memory_index_sorted_ids[int(eid)] for eid in I[0]]
return retrieved_example_ids
def retrieve_from_memory(self, query_examples, sample_size, **kwargs):
input_vectors = self.get_query_representation(query_examples)
agg_method = kwargs.get("agg_method", "each_topk_then_random")
rank_method = kwargs.get("rank_method", "most_sim_input")
if agg_method == "each_topk_then_random":
each_sample_size = kwargs.get("each_sample_size", 5)
each_sim_sample_size = kwargs.get("each_sim_sample_size", 30)
retrieved_example_ids = []
retrieved_example_scores = []
for query_vector in input_vectors:
sim_input_index_ids = self.search_index(query_vector, each_sim_sample_size, partition="input", return_index_ids=True)
if rank_method == "most_sim_input":
retrieved_ids = sim_input_index_ids
elif rank_method == "most_sim_input_most_diff_output":
sim_output_vectors = [self.memory_index["output"].reconstruct(int(eid)) for eid in sim_input_index_ids]
query_output_vector = query_vector[self.dim_vector:]
distances = [distance.cosine(query_output_vector, s) for s in sim_output_vectors]
retrieved_ids = [int(x) for _, x in sorted(zip(distances, sim_input_index_ids), reverse=True)]
retrieved_example_ids += [self.memory_index_sorted_ids[int(eid)] for eid in retrieved_ids][:each_sample_size]
# retrieved_example_scores += # TODO:
self.logger.info(f"IO index -- retrieved_example_ids={len(retrieved_example_ids)}")
retrieved_examples = self.get_examples_by_ids(retrieved_example_ids)
retrieved_examples = random.sample(retrieved_examples, sample_size) # TODO: consider ranking
# retrieved_examples = retrieved_examples[:sample_size]
return retrieved_examples
if __name__ == '__main__':
from cmr.debug_algs import run_lifelong_finetune
parser = run_lifelong_finetune.get_cli_parser()
args = parser.parse_args()
debugging_alg, data_args, base_model_args, debugger_args, logger = run_lifelong_finetune.setup_args(
args)
base_model_args.base_model_path = "out/mrqa_squad_bart-base_1029_upstream_model//best-model.pt"
args.predict_batch_size = 8
index_manager = BartIOIndexManager(logger=logger)
index_manager.set_up_data_args(args)
index_manager.load_encoder_model(base_model_args)
# index_manager.initial_memory_path = "exp_results/data_streams/mrqa.nq_train.memory.jsonl"
# index_manager.set_up_initial_memory(index_manager.initial_memory_path, cut_off=None)
# index_manager.save_memory_to_path("exp_results/data_streams/bart_io_index.sample_init_memory.pkl")
index_manager.initial_memory_path = "data/mrqa_squad/mrqa_squad_train.jsonl"
index_manager.set_up_initial_memory(index_manager.initial_memory_path, cut_off=None)
index_manager.save_memory_to_path("experiments/eval_data/qa/bart_io_index.init_memory.pkl")
# query_ids = ["mrqa_squad-train-10"]
# print(index_manager.memory_examples[query_ids[0]])
# retrieved_exmaples = index_manager.retrieve_from_memory(query_examples=[
# index_manager.memory_examples[qid] for qid in query_ids], each_sample_size=10, sample_size=10, rank_method="most_sim_input")
# for item in retrieved_exmaples:
# print("-"*50)
# print(item[2])
# print(item[0])
# print(item[1])
|
CMR-main
|
cmr/debug_algs/index_based/IO_each_index.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import argparse
from logging import Logger
import logging
from torch.cuda import memory
from tqdm.utils import disp_trim
from cmr.debug_algs.index_based.index_manager import BartIndexManager
import torch
from torch import Tensor, combinations, normal
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import Module
import random
import glob
import os
import pickle
from tqdm import tqdm
import numpy as np
import faiss
import json
import wandb
from cmr.debug_algs.index_based.index_utils import get_bart_dual_representation
from cmr.models.run_bart import train
from cmr.models.utils import set_seeds
from faiss import normalize_L2
def load_distant_supervision(folder_name, sample_size=1, logger=None, specified_names=None, exclude_files=[], train_args=None):
pkl_files = glob.glob(os.path.join(folder_name, '*.pkl'))[:]
pkl_files = [f for f in pkl_files if f not in exclude_files]
if specified_names:
pkl_files = [p for p in pkl_files if p.split(
"/")[-1].replace(".pkl", "") in specified_names]
else:
pkl_files = random.choices(pkl_files, k=sample_size)
ds_items = []
logger.info(f"Loading {pkl_files}")
for pkl_path in tqdm(pkl_files, desc="loading pkl files"):
with open(pkl_path, "rb") as f:
ds_items += pickle.load(f)
for item in ds_items:
for q in item["query"]:
original_dim = len(item["query"][q])
if train_args.query_only_after:
item["query"][q] = item["query"][q][original_dim//2:]
if train_args.query_only_before:
item["query"][q] = item["query"][q][:original_dim//2]
if train_args.query_delta:
before = item["query"][q][original_dim//2:]
after = item["query"][q][:original_dim//2]
item["query"][q] = before + [i-j for i, j in zip(before, after)]
# np.random.seed(42)
# # # For Debugging the data-distribution #
# print("generating random data")
# for item in ds_items:
# for q in item["query"]:
# item["query"][q] = np.random.normal(0, 0.1, 768*2*2)
# for q in item["positive"]:
# item["positive"][q] = np.random.normal(0, 0.1, 768*2)
# for q in item["negative"]:
# item["negative"][q] = np.random.normal(0.6, 0.1, 768*2)
# pass
# if exclude_files:
# np.random.seed(45)
# print("generating purturbs on the test data")
# for item in ds_items:
# for q in item["query"]:
# item["query"][q] += np.random.normal(0, 5e-2, 768*2*2)
# for q in item["positive"]:
# item["positive"][q] += np.random.normal(0, 5e-2, 768*2)
# for q in item["negative"]:
# item["negative"][q] += np.random.normal(0, 5e-2, 768*2)
return ds_items, pkl_files
class MLP(Module):
def __init__(self, input_dim, output_dim, hidden_dim, droprate=0):
super().__init__()
if hidden_dim > 0:
self.layers = torch.nn.Sequential(
# torch.nn.Flatten(),
# nn.BatchNorm1d(input_dim),
# nn.LayerNorm(input_dim),
nn.Linear(input_dim, hidden_dim),
# nn.LayerNorm(hidden_dim),
# nn.Sigmoid(),
nn.Dropout(droprate),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
)
else:
self.layers = torch.nn.Sequential(
# torch.nn.Flatten(),
# nn.BatchNorm1d(input_dim),
# nn.Linear(input_dim, hidden_dim),
# nn.BatchNorm1d(hidden_dim),
# nn.ReLU(),
# nn.Sigmoid(),
# nn.Dropout(droprate),
# nn.Linear(hidden_dim, output_dim),
nn.BatchNorm1d(input_dim),
# nn.LayerNorm(input_dim),
nn.Linear(input_dim, output_dim),
)
self.init_weights()
def init_weights(self):
for module in self.layers:
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, X):
X = self.layers(X)
# X = normalize(X)
return X
def create_batch_from_groups(groups, qry_size=1, pos_size=1, neg_size=1, seen_query_ids=None, seen_memory_ids=None, query_mean=True):
if query_mean:
qry_sample_size = qry_size
qry_size = 1 # effective qry size
else:
qry_sample_size = qry_size
queries, candidates, targets = [], [], []
for group in groups:
# TODO: this is overly recorded..
if seen_query_ids is not None:
seen_query_ids.update(set(list(group["query"].keys())))
if seen_memory_ids is not None:
seen_memory_ids.update((set(list(group["positive"].keys()))))
seen_memory_ids.update((set(list(group["negative"].keys()))))
# queries.append(random.choice(list(group["query"].values())))
selected_queries = random.choices(list(group["query"].values()), k=qry_sample_size)
if query_mean:
selected_queries = np.array(selected_queries)
queries.append(np.mean(selected_queries, axis=0))
else:
queries += selected_queries
target = len(candidates)
candidates += random.choices(list(group["positive"].values()), k=pos_size) # for training, it must be a single positive
candidates += random.choices(list(group["negative"].values()), k=neg_size)
if pos_size > 1:
targets += [list(range(target, target+pos_size))] * qry_size # N*C
elif pos_size == 1:
targets += [target] * qry_size # N*1
assert len(queries) == len(targets) == len(groups) * qry_size
assert len(candidates) == len(groups) * (pos_size + neg_size)
if pos_size > 1:
return np.array(queries), np.array(candidates), targets
else:
return np.array(queries), np.array(candidates), np.array(targets)
class BiEncoderIndexManager(BartIndexManager):
def __init__(self, logger):
super().__init__(logger=logger)
self.logger = logger
self.name = "biencoder_index_manager"
self.query_input_dim = 768*2*2
self.memory_input_dim = 768*2
self.hidden_dim = 512
self.dim_vector = 256 # final dim
self.memory_encoder = None
self.query_encoder = None
self.train_args = None
# cl
self.before_model = None
self.after_model = None
def load_encoder_model(self, base_model_args, memory_encoder_path, query_encoder_path):
super().load_encoder_model(base_model_args)
if self.memory_encoder is None:
self.init_biencoder_modules()
self.memory_encoder.load_state_dict(torch.load(memory_encoder_path))
self.query_encoder.load_state_dict(torch.load(query_encoder_path))
self.logger.info(f"Loading bi-encoders.memory_encoder from {memory_encoder_path}")
self.logger.info(f"Loading bi-encoders.query_encoder from {query_encoder_path}")
def init_biencoder_modules(self):
self.query_input_dim = self.train_args.query_input_dim
self.memory_input_dim = self.train_args.memory_input_dim
self.hidden_dim = self.train_args.hidden_dim
self.dim_vector = self.train_args.dim_vector
self.memory_encoder = MLP(self.memory_input_dim, self.dim_vector,
self.hidden_dim, droprate=self.train_args.droprate)
self.query_encoder = MLP(self.query_input_dim, self.dim_vector,
self.hidden_dim, droprate=self.train_args.droprate)
def get_representation(self, examples):
"""only for the memory encoding here"""
bart_reps = super().get_representation(examples)
bart_reps = np.array(bart_reps)
self.memory_encoder.eval()
all_vectors = self.memory_encoder(torch.Tensor(bart_reps)).detach().numpy()
return all_vectors
def train_biencoder(self, train_data, eval_data):
trainable_params = list(self.query_encoder.parameters()) + \
list(self.memory_encoder.parameters())
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
self.logger.info(f"# params of query_encoder = {count_parameters(self.query_encoder)}")
self.logger.info(f"# params of memory_encoder = {count_parameters(self.memory_encoder)}")
optimizer = torch.optim.Adam(trainable_params, lr=self.train_args.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1000, gamma=0.99, last_epoch=-1)
gradient_acc_steps = 1
if self.train_args.use_cuda:
self.query_encoder.to(torch.device("cuda"))
self.memory_encoder.to(torch.device("cuda"))
seen_query_ids = set()
seen_memory_ids = set()
eval_at_K = 16
best_eval_acc = self.eval_func_v1(
eval_data, k=eval_at_K, seen_query_ids=seen_query_ids, seen_memory_ids=seen_memory_ids)
self.logger.info(f"Valid Acc @ 0: Top-{eval_at_K} acc: {best_eval_acc}")
if self.train_args.wandb:
wandb.log({"valid_accuracy": best_eval_acc}, step=0)
wandb.log({"best_valid_acc": best_eval_acc}, step=0)
losses = []
no_up = 0
for _step in tqdm(range(self.train_args.n_steps), desc="Training steps"):
self.memory_encoder.train()
self.query_encoder.train()
# self.logger.info(f"Training step {_step}/{self.train_args.n_steps}")
sampled_groups = random.choices(train_data, k=self.train_args.batch_size)
queries, candidates, targets = create_batch_from_groups(
sampled_groups,
qry_size=self.train_args.qry_size,
pos_size=self.train_args.pos_size,
neg_size=self.train_args.neg_size,
seen_query_ids=seen_query_ids, seen_memory_ids=seen_memory_ids,
query_mean=self.train_args.use_query_mean)
optimizer.zero_grad()
qry_tensors = torch.Tensor(queries)
mem_tensors = torch.Tensor(candidates)
if self.train_args.use_cuda:
qry_tensors = qry_tensors.to(torch.device("cuda"))
mem_tensors = mem_tensors.to(torch.device("cuda"))
query_inputs = self.query_encoder(qry_tensors)
memory_inputs = self.memory_encoder(mem_tensors)
scores = torch.matmul(query_inputs, memory_inputs.transpose(0, 1))
if self.train_args.pos_size == 1:
tgt_tensors = torch.LongTensor(targets)
if self.train_args.use_cuda:
tgt_tensors = tgt_tensors.to(torch.device("cuda"))
loss = F.cross_entropy(scores, tgt_tensors, reduction="mean")
elif self.train_args.pos_size > 1:
multi_hot_targets = []
for target in targets:
labels = torch.LongTensor(target)
labels = labels.unsqueeze(0)
multi_hot_targets.append(torch.zeros(labels.size(0), len(candidates)).scatter_(1, labels, 1.))
multi_hot_targets = torch.stack(multi_hot_targets, dim=1)
multi_hot_targets = multi_hot_targets.view(scores.size())
tgt_tensors = torch.Tensor(multi_hot_targets)
criterion = torch.nn.BCEWithLogitsLoss(reduction="mean")
if self.train_args.use_cuda:
tgt_tensors = tgt_tensors.to(torch.device("cuda"))
loss = criterion(scores, tgt_tensors)
# self.logger.info(f"loss.item()={loss.item()};")
losses.append(loss.item())
loss.backward()
if self.train_args.wandb:
wandb.log({"lr": float(optimizer.param_groups[0]['lr'])}, step=_step)
wandb.log({"loss": float(loss)}, step=_step)
wandb.log({"avg_loss": float(sum(losses)/len(losses))}, step=_step)
# clip
torch.nn.utils.clip_grad_norm_(trainable_params, 1.0)
optimizer.step()
scheduler.step()
# self.logger.info(f"self.query_encoder.layers[0].weight = {self.query_encoder.layers[0].weight}")
# self.logger.info(f"self.memory_encoder.layers[0].weight = {self.memory_encoder.layers[0].weight}")
if _step > 0 and _step % self.train_args.eval_per_steps == 0:
self.logger.info(f"---- Completed epoch with avg training loss {sum(losses)/len(losses)}.")
train_acc = self.eval_func_v1(train_data[:], k=eval_at_K)
self.logger.info(
f"Train Acc: Top-{eval_at_K} acc @ {_step}: {train_acc} | ")
valid_acc = self.eval_func_v1(eval_data, k=eval_at_K, seen_query_ids=seen_query_ids, seen_memory_ids=seen_memory_ids)
best_eval_acc = max(best_eval_acc, valid_acc)
if self.train_args.wandb:
wandb.log({"train_accuracy": train_acc}, step=_step)
wandb.log({"valid_accuracy": valid_acc}, step=_step)
wandb.log({"best_valid_acc": best_eval_acc}, step=_step)
self.logger.info(
f"Valid ACc: Top-{eval_at_K} acc @ {_step}: {valid_acc} | best_eval_acc={best_eval_acc}")
if best_eval_acc == valid_acc:
self.logger.info("new record; saving the biencoder ckpts.")
no_up = 0
elif best_eval_acc > valid_acc:
no_up += 1
if no_up >= self.train_args.patience:
break
if self.train_args.save_ckpt:
self.save_biencoder()
def eval_func_v2(self, eval_data, k=None, seen_query_ids=None, seen_memory_ids=None, filter=False):
# based on pair-wise comparisions
self.query_encoder.eval()
self.memory_encoder.eval()
eval_scores = []
for group in eval_data:
queries, candidates, targets = create_batch_from_groups([group], qry_size=16, pos_size=8, neg_size=8)
# query_inputs = self.query_encoder(torch.Tensor(queries))
# memory_inputs = self.memory_encoder(torch.Tensor(candidates))
qry_tensors = torch.Tensor(queries)
mem_tensors = torch.Tensor(candidates)
if self.train_args.use_cuda:
qry_tensors = qry_tensors.to(torch.device("cuda"))
mem_tensors = mem_tensors.to(torch.device("cuda"))
query_inputs = self.query_encoder(qry_tensors)
memory_inputs = self.memory_encoder(mem_tensors)
scores = torch.matmul(query_inputs, memory_inputs.transpose(0, 1))
querywise_scores = []
for qid in range(len(queries)):
pairwise_comp = []
pos_start = 0 # always 0
pos_end = pos_start + 8
neg_start = pos_end
neg_end = neg_start + 8
for pos_ind in range(pos_start, pos_end):
for neg_ind in range(neg_start, neg_end):
score_pos = scores[qid][pos_ind]
score_neg = scores[qid][neg_ind]
pairwise_comp.append(int(score_pos > score_neg))
pairwise_score = np.mean(pairwise_comp)
querywise_scores.append(pairwise_score)
group_score = np.mean(querywise_scores)
eval_scores.append(group_score)
return np.mean(eval_scores)
def eval_func_v1(self, eval_data, k=5, seen_query_ids=None, seen_memory_ids=None, filter=False):
top_k_accs = []
tested_query_ids = set()
tested_memory_ids = set()
for item in eval_data:
query_vectors = []
query_ids = []
for qry_id, qry_vec in item["query"].items():
if filter and seen_query_ids is not None and qry_id in seen_query_ids:
# Remove the seen qry ids
continue
query_ids.append(qry_id)
query_vectors.append(qry_vec)
if len(query_ids) == 0:
continue
tested_query_ids.update(query_ids)
positive_ids = set()
all_candidaites = []
all_candidate_vectors = []
for ex_id, vector in item["positive"].items():
positive_ids.add(ex_id)
memory_items = list(item["negative"].items()) + list(item["positive"].items())
random.shuffle(memory_items) # to avoid the case where they have the same scores
for ex_id, vector in memory_items:
all_candidaites.append(ex_id)
tested_memory_ids.add(ex_id)
all_candidate_vectors.append(vector)
if filter and seen_memory_ids is not None and ex_id in seen_memory_ids:
# Remove the seen memory ids
continue
all_candidaites.append(ex_id)
tested_memory_ids.add(ex_id)
all_candidate_vectors.append(vector)
# all_candidate_vectors.append([v-1 for v in vector]) # DEBUG:
query_vectors = np.array(query_vectors)
all_candidate_vectors = np.array(all_candidate_vectors)
self.query_encoder.eval()
self.memory_encoder.eval()
q_inputs = torch.Tensor(query_vectors)
m_inputs = torch.Tensor(all_candidate_vectors)
if self.train_args.use_cuda:
q_inputs = q_inputs.to(torch.device("cuda"))
m_inputs = m_inputs.to(torch.device("cuda"))
q = self.query_encoder(q_inputs).detach().cpu().numpy()
m = self.memory_encoder(m_inputs).detach().cpu().numpy()
memory_index = faiss.IndexFlatL2(m.shape[1])
memory_index.add(m)
Ds, Is = memory_index.search(q, k)
for index_list in Is:
retrieved_top_ids = [all_candidaites[ind] for ind in index_list]
top_k_accs.append(len([x for x in retrieved_top_ids if x in positive_ids])/k)
del memory_index
if seen_query_ids is not None:
coverage = len(tested_query_ids & seen_query_ids)/len(tested_query_ids)
self.logger.info(f"#tested_query_ids={len(tested_query_ids)}; coverage={coverage}")
if seen_memory_ids is not None:
coverage = len(tested_memory_ids & seen_memory_ids)/len(tested_memory_ids)
self.logger.info(f"#tested_memory_ids={len(tested_memory_ids)}; coverage={coverage}")
# self.logger.info(f"top_k_accs = {top_k_accs}; ")
return np.mean(top_k_accs)
def save_biencoder(self, query_encoder_path=None, memory_encoder_path=None):
if not query_encoder_path:
query_encoder_path = self.train_args.query_encoder_path
if not memory_encoder_path:
memory_encoder_path = self.train_args.memory_encoder_path
def save_module(module, path):
model_state_dict = {k: v.cpu() for (
k, v) in module.state_dict().items()}
torch.save(model_state_dict, path)
self.logger.info(f"Model saved to {path}.")
save_module(self.query_encoder, query_encoder_path)
save_module(self.memory_encoder, memory_encoder_path)
def get_query_representation(self, query_examples):
"""Using the concatenation"""
before_all_vectors = get_bart_dual_representation(cl_trainer=self.cl_utils,
bart_model=self.before_model,
tokenizer=self.tokenizer,
data_args=self.data_args,
examples=query_examples)
after_all_vectors = get_bart_dual_representation(cl_trainer=self.cl_utils,
bart_model=self.after_model,
tokenizer=self.tokenizer,
data_args=self.data_args,
examples=query_examples)
bart_reps = []
for b, a in zip(before_all_vectors, after_all_vectors):
bart_reps.append(list(b)+list(a))
bart_reps = np.array(bart_reps)
self.query_encoder.eval()
all_vectors = self.query_encoder(torch.Tensor(bart_reps)).detach().numpy()
return all_vectors
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--ds_dir_path",
default="exp_results/supervision_data/1020_dm_simple/")
parser.add_argument("--num_ds_train_file", type=int, default=24)
parser.add_argument("--num_ds_dev_file", type=int, default=8)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--run_mode", type=str, default="train") # TODO:
parser.add_argument("--query_encoder_path", type=str,
default="exp_results/supervision_data/$prefix.qry_encoder.pt")
parser.add_argument("--memory_encoder_path", type=str,
default="exp_results/supervision_data/$prefix.mem_encoder.pt")
parser.add_argument("--memory_index_path", type=str,
default="exp_results/supervision_data/$prefix.memory.index")
parser.add_argument("--train_args_path", type=str,
default="exp_results/supervision_data/$prefix.train_args.json")
# train_args
parser.add_argument("--query_input_dim", type=int, default=768*2*2)
parser.add_argument("--memory_input_dim", type=int, default=768*2)
parser.add_argument("--hidden_dim", type=int, default=-1) # -1 means no hidden layer; 256 for example
parser.add_argument("--dim_vector", type=int, default=128)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--n_steps", type=int, default=8000)
parser.add_argument("--eval_per_steps", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--qry_size", type=int, default=8) # 1-16
parser.add_argument("--pos_size", type=int, default=16) # 1-8
parser.add_argument("--neg_size", type=int, default=1) # 1-8
parser.add_argument("--patience", type=int, default=8)
parser.add_argument("--droprate", type=float, default=0)
parser.add_argument('--use_query_mean', default=True, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--run_name', default="1020_dm_simple", type=str)
parser.add_argument('--save_ckpt', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--use_cuda', default=True, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--wandb', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--query_only_after', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--query_only_before', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--query_delta', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
return parser
if __name__ == '__main__':
biencoder_args = get_parser().parse_args()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
if biencoder_args.wandb and biencoder_args.run_mode == "train":
run = wandb.init(reinit=True, project="FAIR_Biencoder", settings=wandb.Settings(start_method="fork"))
run_name = wandb.run.name
biencoder_args.run_name = run_name
logger.info(f"run_name = {run_name}")
biencoder_args.query_encoder_path = biencoder_args.query_encoder_path.replace("$prefix", biencoder_args.run_name)
biencoder_args.memory_encoder_path = biencoder_args.memory_encoder_path.replace("$prefix", biencoder_args.run_name)
biencoder_args.memory_index_path = biencoder_args.memory_index_path.replace("$prefix", biencoder_args.run_name)
biencoder_args.train_args_path = biencoder_args.train_args_path.replace("$prefix", biencoder_args.run_name)
set_seeds(biencoder_args.seed)
if biencoder_args.run_mode == "train":
with open(biencoder_args.train_args_path, "w") as f:
json.dump(vars(biencoder_args), f)
if biencoder_args.wandb:
wandb.config.update(biencoder_args)
if biencoder_args.query_only_after or biencoder_args.query_only_before:
# or biencoder_args.query_delta
biencoder_args.query_input_dim = biencoder_args.query_input_dim // 2
train_data, train_files = load_distant_supervision(
biencoder_args.ds_dir_path, sample_size=biencoder_args.num_ds_train_file, logger=logger, train_args=biencoder_args)
logger.info(f"num_groups = {len(train_data)}")
eval_data, eval_files = load_distant_supervision(
biencoder_args.ds_dir_path, sample_size=biencoder_args.num_ds_dev_file, logger=logger, exclude_files=train_files, train_args=biencoder_args)
biencoder_memory_module = BiEncoderIndexManager(logger)
biencoder_memory_module.train_args = biencoder_args
biencoder_memory_module.init_biencoder_modules()
biencoder_memory_module.train_biencoder(train_data, eval_data)
if biencoder_args.save_ckpt:
biencoder_memory_module.save_biencoder(
biencoder_args.query_encoder_path, biencoder_args.memory_encoder_path)
run.finish()
elif biencoder_args.run_mode == "index":
# json.dump(vars(biencoder_args), open(biencoder_args.train_args_path, "w"))
with open(biencoder_args.train_args_path, "r") as f:
backup_args = json.load(f)
biencoder_args.hidden_dim = backup_args["hidden_dim"]
biencoder_args.query_input_dim = backup_args["query_input_dim"]
biencoder_args.memory_input_dim = backup_args["memory_input_dim"]
biencoder_args.hidden_dim = backup_args["hidden_dim"]
biencoder_args.dim_vector = backup_args["dim_vector"]
biencoder_args.use_query_mean = backup_args["use_query_mean"]
from cmr.debug_algs import run_lifelong_finetune
parser = run_lifelong_finetune.get_cli_parser()
cl_args = parser.parse_args("")
debugging_alg, data_args, base_model_args, debugger_args, logger = run_lifelong_finetune.setup_args(
cl_args)
cl_args.predict_batch_size = 8
index_manager = BiEncoderIndexManager(logger)
index_manager.train_args = biencoder_args
index_manager.set_up_data_args(cl_args)
index_manager.load_encoder_model(
base_model_args, biencoder_args.memory_encoder_path, biencoder_args.query_encoder_path)
index_manager.initial_memory_path = "data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl"
index_manager.set_up_initial_memory(index_manager.initial_memory_path)
index_manager.save_memory_to_path("exp_results/data_streams/1021_biencoder_init_memory.pkl")
|
CMR-main
|
cmr/debug_algs/index_based/biencoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import altair as alt
from altair.vegalite.v4.schema.core import Axis, Legend
def draw_curve(df, y_scale=[0, 1], fig_title="", y_title="Y Title", x_key="timecode", y_key="em:Q", height=800, width=1150, x_scale=[0, 100], color_dom=None, color_range=None, orient="top-right"):
df = df[(df["timecode"] <= x_scale[1]) & (df["timecode"] >= x_scale[0])]
x = alt.X(x_key, type="ordinal", title="", axis=alt.Axis(tickCount=10, grid=False))
if color_dom and color_range:
color=alt.Color('prefix:N', scale=alt.Scale(domain=color_dom, range=color_range), sort=color_dom)
color_wo_lengend = alt.Color('prefix:N', scale=alt.Scale(domain=color_dom, range=color_range), sort=color_dom, legend=None)
shape=alt.Shape('prefix:N', sort=color_dom)
shape_wo_legend = shape=alt.Shape('prefix:N', sort=color_dom, legend=None)
else:
color=alt.Color('prefix:N', )
color_wo_lengend = alt.Color('prefix:N', legend=None)
shape=alt.Shape('prefix:N', )
shape_wo_legend = shape=alt.Shape('prefix:N', legend=None)
# scale=alt.Scale(range=['cross', 'circle', 'square', 'triangle-right', 'diamond'])
y=alt.Y(y_key, stack=None, title="", scale=alt.Scale(domain=y_scale), axis=alt.Axis(tickCount=10, grid=False))
points = alt.Chart(df).mark_point(opacity=0.8, filled=True, size=350).encode(x=x, y=y, shape=shape ,color=color).properties(title=fig_title)
lines = alt.Chart(df).mark_line(point=False).encode(x=x, y=y, color=color).properties(title=fig_title)
fig = alt.layer(points, lines).resolve_scale(color="independent", shape="independent")
# fig = points
fig = fig.properties(width=width, height=height).configure_axis(
labelFontSize=30,
titleFontSize=30,
).configure_view(stroke="black", strokeWidth=3)
if orient != "none":
fig = fig.configure_legend(titleFontSize=0, labelFontSize=30, symbolSize=300, orient=orient, strokeColor='gray',
fillColor='#EEEEEE',
padding=5,
cornerRadius=3,).configure_title(
fontSize=30,
font='Courier',
anchor='middle',
orient="top", align="center",
color='black'
)
return fig
def draw_stacked_bars(df, y_scale=[0, 1], fig_title="", y_title="Y Title", x_key="timecode", y_key="em:Q", height=800, width=1150, x_scale=[0, 100], bin_width=10, color_dom=None, color_range=None):
if color_dom and color_range:
color=alt.Color('prefix:N', scale=alt.Scale(domain=color_dom, range=color_range))
else:
color=alt.Color('prefix:N')
dom = ['Europe', 'Japan', 'USA']
rng = ['red', 'green', 'black']
fig = alt.Chart(df).mark_bar().encode(x=alt.X(x_key, title="Time Step", axis=alt.Axis(tickMinStep=10, tickOffset=0, tickWidth=5,), scale=alt.Scale(domain=x_scale)),
y=alt.Y(y_key, title=y_title, scale=alt.Scale(domain=y_scale)),
color=color,).properties(title=fig_title)
fig = alt.layer(fig).resolve_scale()
fig = fig.properties(width=width, height=height).configure_title(fontSize=50,
).configure_bar(binSpacing=0, width=bin_width).configure_axis(
labelFontSize=25,
titleFontSize=25,
).configure_legend(titleFontSize=0, labelFontSize=30, orient='top-left', strokeColor='gray',
fillColor='#EEEEEE',
padding=5,
cornerRadius=3,).configure_title(
fontSize=30,
font='Courier',
anchor='middle',
orient="top", align="center",
color='black'
)
return fig
def draw_grouped_bars(df, y_scale=[0, 1], fig_title="", y_title="Y Title", x_key="timecode", group_key="", y_key="em:Q", height=800, width=1150, x_scale=[0, 100], bin_width=10, color_dom=None, color_range=None, orient = "none"):
if color_dom and color_range:
color=alt.Color(x_key, scale=alt.Scale(domain=color_dom, range=color_range), sort=color_dom, legend=None)
else:
color=alt.Color(x_key)
bars = alt.Chart(df).mark_bar(clip=True).encode(x=alt.X(x_key, title="CL Method", sort=color_dom),
y=alt.Y(y_key, title=y_title, scale=alt.Scale(domain=y_scale), axis=alt.Axis(grid=False)),
color=color).properties(title=fig_title)
text = bars.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text=y_key
)
fig = bars
# fig = alt.layer(fig).resolve_scale()
fig = fig.properties(width=width, height=height).configure_title(fontSize=0,
).configure_bar(binSpacing=0, width=bin_width).configure_axis(
labelFontSize=10,
titleFontSize=10,
)
if orient != "none":
fig = fig.configure_legend(titleFontSize=0, labelFontSize=30, orient='top-left', strokeColor='gray',
fillColor='#EEEEEE',
padding=5,
cornerRadius=3,)
fig = fig.configure_title(
fontSize=10,
font='Courier',
anchor='middle',
orient="top", align="center",
color='black'
)
return fig
|
CMR-main
|
cmr/notebooks/draw_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from enum import unique
from posixpath import split
from re import L
import datasets
import numpy as np
import os
import gzip
import sys
import json
def show_statistics(lines):
len_list = []
for l in lines:
item = json.loads(l)
len_list.append(len(item["input"].split()))
print(np.min(len_list), np.max(len_list),
np.mean(len_list), np.median(len_list))
return
def escape(s):
# TODO: remove the markups
filter_words = ["</P>", "<P>", "<Li>", "</Li>", "<Ul>", "</Ul>", "[DOC]", "[TLE]", "[PAR]", "[SEP]", "\n", "\t"]
for fw in filter_words:
s = s.replace(fw, "")
return s.strip()
# Filtering the bad examples.
def example_pass(context):
if "<table>" in context.lower() or "<td>" in context.lower():
return False
else:
return True
def add_qmark(s):
return s if s.endswith("?") else s + " ?"
def write_to_jsonl(lst, out_file):
with open(out_file, "w") as fout:
fout.write("\n".join(lst))
# for line in lst:
# fout.write("{}\t{}\n".format(line[0], line[1]))
def deduplicate(lines):
seen_inputs = set()
unique_lines = []
for line in lines:
# print(line)
item = json.loads(line)
if item['input'] not in seen_inputs:
unique_lines.append(line)
seen_inputs.add(f"{item['input']}")
# result = list(set(lines))
print("deduplicate", len(lines), len(unique_lines))
return unique_lines
class TextToTextDataset():
def get_all_lines(self, dataset):
train_lines = deduplicate(self.map_to_list(dataset, "train"))
val_lines = deduplicate(self.map_to_list(dataset, "validation"))
test_lines = deduplicate(self.map_to_list(dataset, "test"))
show_statistics(train_lines)
show_statistics(val_lines)
# TODO: de-duplicate the lines!
return train_lines, val_lines, test_lines
def write_dataset(self, path):
"""
return train, dev, test
"""
# load dataset
dataset = self.load_dataset()
# formulate into list (for consistency in np.random)
train_lines, val_lines, test_lines = self.get_all_lines(dataset)
# shuffle the data
# np.random.seed(seed)
# np.random.shuffle(train_lines)
os.makedirs(os.path.join(path, self.task_identifier), exist_ok=True)
prefix = os.path.join(path, self.task_identifier,
"{}".format(self.task_identifier))
write_to_jsonl(train_lines, prefix + "_train.jsonl")
write_to_jsonl(val_lines, prefix + "_dev.jsonl")
if test_lines:
write_to_jsonl(test_lines, prefix + "_test.jsonl")
class MRQA(TextToTextDataset):
def __init__(self, task_identifier="mrqa", subset="SQuAD", mrqa_path="data/mrqa"):
self.task_identifier = task_identifier + "_" + subset.lower()
self.mrqa_path = mrqa_path
self.subset = subset
def map_to_list(self, dataset, split_name):
if split_name not in dataset:
print("No such split_name:", split_name)
return []
lines = []
for datapoint in dataset[split_name]:
if not datapoint["answers"]:
print("empty answer")
continue
if not example_pass(datapoint["context"]):
continue
# add question mark!
# lines.append(("Context: " + escape(datapoint["context"]) +
# " | Question: " +
# add_qmark(escape(datapoint["question"])),
# "\t".join([escape(a) for a in datapoint["answers"]])))
# _input = "Context: " + escape(datapoint["context"]) + \
# " | " + "Question: " + add_qmark(escape(datapoint["question"]))
# TODO: need re-training
_input = f'Question: {add_qmark(escape(datapoint["question"]))} </s> Context: {escape(datapoint["context"])}'
_output = [escape(a) for a in datapoint["answers"]]
_id = f"{self.task_identifier}-{split_name}-{len(lines)}"
instance = {"id": _id, "input": _input, "output": _output}
lines.append(json.dumps(instance))
print("Three examples: \n" + "\n".join([str(_) for _ in lines[:3]]))
return lines
def load_dataset(self):
def load_jsonl_gz(gzpath):
data = []
with gzip.open(gzpath, 'rb') as myzip:
for example in myzip:
json_line = json.loads(example)
if "header" in json_line:
print(json_line["header"])
pass
else:
context = json_line["context"]
qa_items = []
for item in json_line["qas"]:
qa_items.append(dict(context=context,
qid=item["qid"],
question=item["question"],
answers=list(set(item["answers"]))))
data.extend(qa_items)
return data
path_to_train = os.path.join(
self.mrqa_path, "mrqa_train", self.subset+".jsonl.gz")
path_to_dev = os.path.join(
self.mrqa_path, "mrqa_dev", self.subset+".jsonl.gz")
dataset = {}
dataset["train"] = load_jsonl_gz(path_to_train)
dataset["validation"] = load_jsonl_gz(path_to_dev)
return dataset
class NLI(TextToTextDataset):
def __init__(self, task_identifier="snli"):
self.task_identifier = task_identifier
# for classification tasks, specify the meaning of each label
self.prompt = " " # are two sentences entailment or not entailment?
if self.task_identifier in ["snli", "anli", "multi_nli"]:
self.label = {
0: ["entailment"],
1: ["neutral"],
2: ["contradiction"]
}
elif self.task_identifier == "qnli":
self.label = {
0: ["entailment"],
1: ["neutral"],
}
elif self.task_identifier == "scitail":
self.label = {
"entails": ["entailment"],
"neutral": ["neutral"],
}
def get_all_lines(self, dataset, splits=["train", "validation", "test"]):
all_lines = {}
for split in splits:
all_lines[split] = deduplicate(self.map_to_list(dataset, split))
show_statistics(all_lines[split])
# TODO: de-duplicate the lines!
return all_lines
def write_dataset(self, path):
"""
return train, dev, test
"""
# load dataset
dataset = self.load_dataset()
# formulate into list (for consistency in np.random)
if self.task_identifier in ["snli", "scitail", "qnli"]:
splits = ["train", "validation", "test"]
elif self.task_identifier == "anli":
splits = ['train_r1', 'dev_r1', 'test_r1', 'train_r2', 'dev_r2', 'test_r2', 'train_r3', 'dev_r3', 'test_r3']
elif self.task_identifier == "multi_nli":
splits = ['validation_matched', 'validation_mismatched']
all_lines = self.get_all_lines(dataset, splits)
# shuffle the data
# np.random.seed(seed)
# np.random.shuffle(train_lines)
os.makedirs(os.path.join(path, self.task_identifier), exist_ok=True)
prefix = os.path.join(path, self.task_identifier,
"{}".format(self.task_identifier))
for split in splits:
write_to_jsonl(all_lines[split], f"{prefix}_{split}.jsonl")
def map_to_list(self, dataset, split_name):
lines = []
for datapoint in dataset[split_name]:
# print(datapoint["label"])
if datapoint["label"] not in self.label:
continue
# lines.append(("Premise: " + datapoint["premise"] + " | Hypothesis: " +
# datapoint["hypothesis"], self.label[datapoint["label"]]))
_id = f"{self.task_identifier}-{split_name}-{len(lines)}"
if self.task_identifier == "qnli":
_input = f'Premise: {datapoint["sentence"]} </s> Hypothesis: {datapoint["question"]}'
else:
_input = f'Premise: {datapoint["premise"]} </s> Hypothesis: {datapoint["hypothesis"]}'
_input += " | Options: entailment, neutral, contradiction "
_output = self.label[datapoint["label"]]
instance = {"id": _id, "input": _input, "output": _output}
lines.append(json.dumps(instance))
print("Three examples: \n" + "\n".join([str(_) for _ in lines[:3]]))
return lines
def load_dataset(self):
if self.task_identifier == "scitail":
return datasets.load_dataset("scitail", "dgem_format")
elif self.task_identifier == "qnli":
return datasets.load_dataset("glue", "qnli")
else:
return datasets.load_dataset(self.task_identifier)
class CSR(TextToTextDataset):
def __init__(self, task_identifier="commonsense_qa"):
self.task_identifier = task_identifier
# for classification tasks, specify the meaning of each label
# self.prompt = " " # are two sentences entailment or not entailment?
# if self.task_identifier in ["snli", "anli", "multi_nli"]:
# self.label = {
# 0: ["entailment"],
# 1: ["neutral"],
# 2: ["contradiction"]
# }
# elif self.task_identifier == "qnli":
# self.label = {
# 0: ["entailment"],
# 1: ["neutral"],
# }
# elif self.task_identifier == "scitail":
# self.label = {
# "entails": ["entailment"],
# "neutral": ["neutral"],
# }
def get_all_lines(self, dataset, splits=["train", "validation", "test"]):
all_lines = {}
for split in splits:
all_lines[split] = deduplicate(self.map_to_list(dataset, split))
show_statistics(all_lines[split])
# TODO: de-duplicate the lines!
return all_lines
def write_dataset(self, path):
"""
return train, dev, test
"""
# load dataset
dataset = self.load_dataset()
# formulate into list (for consistency in np.random)
# if self.task_identifier in ["snli", "scitail", "qnli"]:
# splits = ["train", "validation", "test"]
# elif self.task_identifier == "anli":
# splits = ['train_r1', 'dev_r1', 'test_r1', 'train_r2', 'dev_r2', 'test_r2', 'train_r3', 'dev_r3', 'test_r3']
# elif self.task_identifier == "multi_nli":
# splits = ['validation_matched', 'validation_mismatched']
splits = ["train", "validation"]
all_lines = self.get_all_lines(dataset, splits)
# shuffle the data
# np.random.seed(seed)
# np.random.shuffle(train_lines)
os.makedirs(os.path.join(path, self.task_identifier), exist_ok=True)
prefix = os.path.join(path, self.task_identifier,
"{}".format(self.task_identifier))
for split in splits:
write_to_jsonl(all_lines[split], f"{prefix}_{split}.jsonl")
def map_to_list(self, dataset, split_name):
lines = []
for datapoint in dataset[split_name]:
choices = datapoint["choices"]
choices_map = {}
choice_strs = []
for ind, (key, choice) in enumerate(list(zip(choices["label"], choices["text"]))):
if self.task_identifier == "openbookqa":
key = list("ABCDEF")[ind]
choices_map[key] = choice
choice_strs.append(f"{key}: {choice}")
_id = f"{self.task_identifier}-{split_name}-{len(lines)}"
if self.task_identifier == "openbookqa":
_input = f'Question: {datapoint["question_stem"]} </s> {" | ".join(choice_strs)}'
else:
_input = f'Question: {datapoint["question"]} </s> {" | ".join(choice_strs)}'
_output = [choices_map[datapoint["answerKey"]]]
instance = {"id": _id, "input": _input, "output": _output}
lines.append(json.dumps(instance))
print("Three examples: \n" + "\n".join([str(_) for _ in lines[:3]]))
return lines
def load_dataset(self):
if self.task_identifier == "ai2_arc-easy":
return datasets.load_dataset("ai2_arc", "ARC-Easy")
elif self.task_identifier == "ai2_arc-hard":
return datasets.load_dataset("ai2_arc", "ARC-Challenge")
elif self.task_identifier == "openbookqa":
return datasets.load_dataset("openbookqa", "main")
return datasets.load_dataset(self.task_identifier)
def format(dataset_name, path="./"):
print("Formatting ", dataset_name)
if dataset_name.startswith("mrqa_"):
data = MRQA(subset=dataset_name.split("_")[1])
data.write_dataset(path)
elif dataset_name.startswith("nli#"):
name = dataset_name.split("#")[1]
data = NLI(task_identifier=name)
data.write_dataset(path)
elif dataset_name.startswith("csr#"):
name = dataset_name.split("#")[1]
data = CSR(task_identifier=name)
data.write_dataset(path)
path = "data/"
if len(sys.argv) >= 2:
path = sys.argv[1]
format("mrqa_SQuAD", path)
format("mrqa_TriviaQA", path)
format("mrqa_NaturalQuestions", path)
format("mrqa_HotpotQA", path)
format("mrqa_NewsQA", path)
format("mrqa_SearchQA", path)
# format("nli#snli", path)
# format("nli#anli", path)
# format("nli#multi_nli", path)
# format("nli#scitail", path)
# format("csr#commonsense_qa", path)
# format("csr#riddle_sense", path)
# format("csr#ai2_arc-easy", path)
# format("csr#ai2_arc-hard", path)
# format("csr#openbookqa", path)
|
CMR-main
|
data/data_formatter.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import distutils.command.clean
import os
import shutil
import subprocess
from pathlib import Path
from setuptools import find_packages, setup
cwd = os.path.dirname(os.path.abspath(__file__))
version_txt = os.path.join(cwd, "version.txt")
with open(version_txt, "r") as f:
version = f.readline().strip()
ROOT_DIR = Path(__file__).parent.resolve()
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd)
.decode("ascii")
.strip()
)
except Exception:
sha = "Unknown"
package_name = "rlhive"
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
version += "+" + sha[:7]
def write_version_file():
version_path = os.path.join(cwd, "rlhive", "version.py")
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
def _get_pytorch_version():
# if "PYTORCH_VERSION" in os.environ:
# return f"torch=={os.environ['PYTORCH_VERSION']}"
return "torch"
def _get_packages():
exclude = [
"build*",
"test*",
# "rlhive.csrc*",
# "third_party*",
# "tools*",
]
return find_packages(exclude=exclude)
ROOT_DIR = Path(__file__).parent.resolve()
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove rlhive extension
for path in (ROOT_DIR / "rlhive").glob("**/*.so"):
print(f"removing '{path}'")
path.unlink()
# Remove build directory
build_dirs = [
ROOT_DIR / "build",
]
for path in build_dirs:
if path.exists():
print(f"removing '{path}' (and everything under it)")
shutil.rmtree(str(path), ignore_errors=True)
def _check_robohive():
import importlib
import sys
name = "robohive"
spam_loader = importlib.find_loader(name)
found = spam_loader is not None
if name in sys.modules:
print(f"{name!r} already in sys.modules")
# elif (spec := importlib.util.find_spec(name)) is not None:
elif found:
print(f"{name!r} is importable")
else:
raise ImportError(
f"can't find {name!r}: check README.md for " f"install instructions"
)
def _main():
pytorch_package_dep = _get_pytorch_version()
print("-- PyTorch dependency:", pytorch_package_dep)
# branch = _run_cmd(["git", "rev-parse", "--abbrev-ref", "HEAD"])
# tag = _run_cmd(["git", "describe", "--tags", "--exact-match", "@"])
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
# install robohive locally
# subprocess.run(
# [
# "git",
# "clone",
# "-c",
# "submodule.robohive/sims/neuromuscular_sim.update=none",
# "--branch",
# "non-local-install",
# "--recursive",
# "https://github.com/vikashplus/robohive.git",
# "third_party/robohive",
# ]
# )
# subprocess.run(
# [
# "git",
# "clone",
# "--branch",
# "main",
# "https://github.com/pytorch/rl.git",
# "third_party/rl",
# ]
# )
# mj_env_path = os.path.join(os.getcwd(), "third_party", "robohive#egg=robohive")
# rl_path = os.path.join(os.getcwd(), "third_party", "rl#egg=torchrl")
setup(
# Metadata
name="rlhive",
version=version,
author="rlhive contributors",
author_email="vmoens@fb.com",
url="https://github.com/fairinternal/rlhive",
long_description=long_description,
long_description_content_type="text/markdown",
license="BSD",
# Package info
packages=find_packages(exclude=("test", "tutorials", "third_party")),
# ext_modules=get_extensions(),
# cmdclass={
# "build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
# "clean": clean,
# },
install_requires=[
pytorch_package_dep,
# "torchrl @ git+ssh://git@github.com/pytorch/rl@main#egg=torchrl",
# f"torchrl @ file://{rl_path}",
"torchrl",
"gym==0.13",
# "robohive",
# f"robohive @ file://{mj_env_path}",
"numpy",
"packaging",
"cloudpickle",
"hydra-core",
"dm_control",
],
zip_safe=False,
dependency_links=[
# location to your egg file
],
extra_requires={
"tests": ["pytest", "pyyaml", "pytest-instafail"],
},
)
if __name__ == "__main__":
write_version_file()
print("Building wheel {}-{}".format(package_name, version))
print(f"BUILD_VERSION is {os.getenv('BUILD_VERSION')}")
# _check_robohive()
_main()
|
agenthive-dev
|
setup.py
|
import robohive
import torchrl
from rlhive import RoboHiveEnv
|
agenthive-dev
|
test/smoke_test.py
|
import argparse
import pytest
import torch
from rlhive.rl_envs import RoboHiveEnv
from torchrl.envs import (
CatTensors,
EnvCreator,
ParallelEnv,
R3MTransform,
TransformedEnv,
)
from torchrl.envs.utils import check_env_specs
def test_state_env():
pass
def test_pixel_env():
pass
@pytest.mark.parametrize(
"env_name",
[
"visual_franka_slide_random-v3",
"visual_franka_slide_close-v3",
"visual_franka_slide_open-v3",
"visual_franka_micro_random-v3",
"visual_franka_micro_close-v3",
"visual_franka_micro_open-v3",
"visual_kitchen_knob1_off-v3",
"visual_kitchen_knob1_on-v3",
"visual_kitchen_knob2_off-v3",
"visual_kitchen_knob2_on-v3",
"visual_kitchen_knob3_off-v3",
"visual_kitchen_knob3_on-v3",
"visual_kitchen_knob4_off-v3",
"visual_kitchen_knob4_on-v3",
"visual_kitchen_light_off-v3",
"visual_kitchen_light_on-v3",
"visual_kitchen_sdoor_close-v3",
"visual_kitchen_sdoor_open-v3",
"visual_kitchen_ldoor_close-v3",
"visual_kitchen_ldoor_open-v3",
"visual_kitchen_rdoor_close-v3",
"visual_kitchen_rdoor_open-v3",
"visual_kitchen_micro_close-v3",
"visual_kitchen_micro_open-v3",
"visual_kitchen_close-v3",
],
)
def test_mixed_env(env_name):
base_env = RoboHiveEnv(
env_name,
)
assert base_env.from_pixels
env = TransformedEnv(
base_env,
CatTensors(
[key for key in base_env.observation_spec.keys() if "pixels" not in key],
"observation",
),
)
# reset
tensordict = env.reset()
assert {"done", "observation", "pixels"} == set(tensordict.keys())
assert tensordict["pixels"].shape[0] == 2
# step
env.rand_step(tensordict)
assert {
"reward",
"done",
"observation",
"pixels",
"action",
("next", "observation"),
("next", "pixels"),
"next",
} == set(tensordict.keys(True))
# rollout
tensordict = env.rollout(10)
assert {
"reward",
"done",
"observation",
"pixels",
"action",
("next", "observation"),
("next", "pixels"),
"next",
} == set(tensordict.keys(True))
assert tensordict.shape == torch.Size([10])
env.close()
@pytest.mark.parametrize(
"env_name",
[
"visual_franka_slide_random-v3",
"visual_franka_slide_close-v3",
"visual_franka_slide_open-v3",
"visual_franka_micro_random-v3",
"visual_franka_micro_close-v3",
"visual_franka_micro_open-v3",
"visual_kitchen_knob1_off-v3",
"visual_kitchen_knob1_on-v3",
"visual_kitchen_knob2_off-v3",
"visual_kitchen_knob2_on-v3",
"visual_kitchen_knob3_off-v3",
"visual_kitchen_knob3_on-v3",
"visual_kitchen_knob4_off-v3",
"visual_kitchen_knob4_on-v3",
"visual_kitchen_light_off-v3",
"visual_kitchen_light_on-v3",
"visual_kitchen_sdoor_close-v3",
"visual_kitchen_sdoor_open-v3",
"visual_kitchen_ldoor_close-v3",
"visual_kitchen_ldoor_open-v3",
"visual_kitchen_rdoor_close-v3",
"visual_kitchen_rdoor_open-v3",
"visual_kitchen_micro_close-v3",
"visual_kitchen_micro_open-v3",
"visual_kitchen_close-v3",
],
)
def test_specs(env_name):
base_env = RoboHiveEnv(
env_name,
)
check_env_specs(base_env)
env = TransformedEnv(
base_env,
CatTensors(
[key for key in base_env.observation_spec.keys() if "pixels" not in key],
"observation",
),
)
check_env_specs(env)
@pytest.mark.parametrize(
"env_name",
[
"visual_franka_slide_random-v3",
"visual_franka_slide_close-v3",
"visual_franka_slide_open-v3",
"visual_franka_micro_random-v3",
"visual_franka_micro_close-v3",
"visual_franka_micro_open-v3",
"visual_kitchen_knob1_off-v3",
"visual_kitchen_knob1_on-v3",
"visual_kitchen_knob2_off-v3",
"visual_kitchen_knob2_on-v3",
"visual_kitchen_knob3_off-v3",
"visual_kitchen_knob3_on-v3",
"visual_kitchen_knob4_off-v3",
"visual_kitchen_knob4_on-v3",
"visual_kitchen_light_off-v3",
"visual_kitchen_light_on-v3",
"visual_kitchen_sdoor_close-v3",
"visual_kitchen_sdoor_open-v3",
"visual_kitchen_ldoor_close-v3",
"visual_kitchen_ldoor_open-v3",
"visual_kitchen_rdoor_close-v3",
"visual_kitchen_rdoor_open-v3",
"visual_kitchen_micro_close-v3",
"visual_kitchen_micro_open-v3",
"visual_kitchen_close-v3",
],
)
def test_parallel(env_name):
def make_env():
base_env = RoboHiveEnv(
env_name,
)
check_env_specs(base_env)
env = TransformedEnv(
base_env,
CatTensors(
[
key
for key in base_env.observation_spec.keys()
if "pixels" not in key
],
"observation",
),
)
return env
env = ParallelEnv(3, make_env)
env.reset()
env.rollout(3)
@pytest.mark.parametrize("parallel", [False, True])
def test_env_render_native(parallel):
if not parallel:
env = RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0")
else:
env = ParallelEnv(3, lambda: RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0"))
td = env.reset()
assert set(td.keys(True)) == {
"done",
"observation",
"pixels",
}
td = env.rand_step(td)
assert set(td.keys(True)) == {
"done",
"next",
("next", "pixels"),
"pixels",
"observation",
("next", "observation"),
"reward",
"action",
}
td = env.rollout(50)
if not parallel:
assert td.shape == torch.Size([50])
else:
assert td.shape == torch.Size([3, 50])
assert set(td.keys(True)) == {
"done",
"next",
("next", "pixels"),
"pixels",
"observation",
("next", "observation"),
"reward",
"action",
}
env.close()
@pytest.mark.parametrize(
"parallel,env_creator", [[True, True], [True, False], [False, True]]
)
def test_env_r3m_native(parallel, env_creator):
if not parallel:
base_env = RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0")
else:
if env_creator:
env_creator = EnvCreator(
lambda: RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0")
)
else:
env_creator = lambda: RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0")
base_env = ParallelEnv(3, env_creator)
env = TransformedEnv(
base_env,
R3MTransform(
"resnet18",
["pixels"],
["pixels_embed"],
),
)
td = env.reset()
_ = env.rand_step(td)
td = env.rollout(50)
if parallel:
assert td.shape == torch.Size([3, 50])
else:
assert td.shape == torch.Size([50])
env.close()
if __name__ == "__main__":
args, unknown = argparse.ArgumentParser().parse_known_args()
pytest.main([__file__, "--capture", "no", "--exitfirst"] + unknown)
|
agenthive-dev
|
test/test_envs.py
|
import torch
def get_available_devices():
devices = [torch.device("cpu")]
n_cuda = torch.cuda.device_count()
if n_cuda > 0:
for i in range(n_cuda):
devices += [torch.device(f"cuda:{i}")]
return devices
|
agenthive-dev
|
test/utils.py
|
import argparse
import pytest
import torch
from omegaconf import OmegaConf
from rlhive.sim_algos.helpers import EnvConfig
from rlhive.sim_algos.run import make_env_constructor
from utils import get_available_devices
@pytest.mark.parametrize("device", get_available_devices())
def test_make_r3menv(device):
cfg = EnvConfig
# hacky way of create a config that can be shared across processes
cfg = OmegaConf.create(OmegaConf.to_yaml(cfg))
cfg.env_name = "FrankaReachRandom_v2d-v0"
cfg.r3m = "resnet50"
cfg.collector_devices = str(device)
cfg.norm_stats = False
cfg.env_per_collector = 2
cfg.pin_memory = False
cfg.batch_transform = True
single_env_constructor, multi_env_constructor = make_env_constructor(cfg)
env = single_env_constructor()
print(env)
td = env.reset()
assert {"done", "observation_vector"} == set(td.keys())
td = env.rollout(10)
assert {
"action",
"done",
(
"next",
"observation_vector",
),
"next",
"observation_vector",
"reward",
} == set(td.keys())
assert td.shape == torch.Size([10])
env = multi_env_constructor
print(env)
td = env.reset()
assert {"done", "observation_vector"} == set(td.keys())
td = env.rollout(10)
assert {
"action",
"done",
(
"next",
"observation_vector",
),
"next",
"observation_vector",
"reward",
} == set(td.keys())
assert td.shape == torch.Size([2, 10])
env.close()
if __name__ == "__main__":
args, unknown = argparse.ArgumentParser().parse_known_args()
pytest.main([__file__, "--capture", "no", "--exitfirst"] + unknown)
|
agenthive-dev
|
test/test_helpers.py
|
''' Use this script to comapare multiple results \n
Usage: python viz_resulyts.py -j expdir1_group0 expdir2_group0 -j expdir3_group1 expdir4_group1 -k "key1" "key2"...
'''
from vtils.plotting import simple_plot
import argparse
from scipy import signal
import pandas
import glob
def get_files(search_path, file_name):
search_path = search_path[:-1] if search_path.endswith('/') else search_path
search_path = search_path+"*/**/"+file_name
filenames = glob.glob(search_path, recursive=True)
assert (len(filenames) > 0), "No file found at: {}".format(search_path)
return filenames
# Another example, Python 3.5+
def get_files_p35(search_path, file_name):
from pathlib import Path
filenames = []
for path in Path(search_path).rglob(file_name):
filenames.append(path)
return filenames
def get_log(filename, format="csv"):
try:
if format=="csv":
data = pandas.read_csv(filename)
elif format=="json":
data = pandas.read_json(filename)
except Exception as e:
print("WARNING: Can't read %s." % filename)
quit()
return data
def smooth_data(y, window_length=101, polyorder=3):
window_length = min(int(len(y) / 2),
window_length) # set maximum valid window length
# if window not off
if window_length % 2 == 0:
window_length = window_length + 1
try:
return signal.savgol_filter(y, window_length, polyorder)
except Exception as e:
return y # nans
# MAIN =========================================================
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-j', '--job', required=True, action='append', nargs='?', help='job group')
parser.add_argument(
'-lf', '--log_file', type=str, default="log.csv", help='name of log file (with extension)')
parser.add_argument(
'-cf', '--config_file', type=str, default="job_config.json", help='name of config file (with extension)')
parser.add_argument(
'-t', '--title', type=str, default=None, help='Title of the plot')
parser.add_argument(
'-l', '--label', action='append', nargs='?', help='job group label')
parser.add_argument(
'-s', '--smooth', type=int, default=21, help='window for smoothing')
parser.add_argument(
'-y', '--ykeys', nargs='+', default=["eval_score", 'norm_score'], help='yKeys to plot')
parser.add_argument(
'-x', '--xkey', default="total_num_samples", help='xKey to plot')
parser.add_argument(
'-i', '--index', type=int, default=-4, help='index in log filename to use as labels')
args = parser.parse_args()
# scan labels
if args.label is not None:
assert (len(args.job) == len(args.label)), "The number of labels has to be same as the number of jobs"
else:
args.label = [''] * len(args.job)
# for all the algo jobs
for ialgo, algo_dir in enumerate(args.job):
print("algo> "+algo_dir)
envs_dirs = glob.glob(algo_dir+"/*/")
# for all envs inside the algo
nenv = len(envs_dirs)
for ienv, env_dir in enumerate(sorted(envs_dirs)):
print("env>> "+env_dir)
run_dirs = glob.glob(env_dir+"/*/")
# all the seeds/ variations within the env
for irun, run_dir in enumerate(sorted(run_dirs)):
print("run> "+run_dir)
title = run_dir.split('/')[3]
title = title[:title.find('-v')]
# for log_file in get_files(env_dir, args.file):
log_file = get_files(run_dir, args.log_file)
log = get_log(filename=log_file[0], format="csv")
# validate keys
for key in [args.xkey]+args.ykeys:
assert key in log.keys(), "{} not present in available keys {}".format(key, log.keys())
nykeys = len(args.ykeys)
for iykey, ykey in enumerate(sorted(args.ykeys)):
simple_plot.plot(xdata=log[args.xkey]/1e6,
ydata=smooth_data(log[ykey], args.smooth),
legend='job_name',
subplot_id=(nenv, nykeys, nykeys*ienv+iykey+1),
xaxislabel=args.xkey+'(M)',
plot_name=title,
yaxislabel=ykey,
fig_size=(4*nykeys, 4*nenv),
fig_name='SAC performance'
)
# simple_plot.show_plot()
simple_plot.save_plot(args.job[0]+'RS-SAC.pdf')
if __name__ == '__main__':
main()
|
agenthive-dev
|
agents/utils/plot_all_sac.py
|
''' Use this script to comapare multiple results \n
Usage: python agents/NPG/plot_all_npg.py -j agents/v0.1/kitchen/NPG/outputs_kitchenJ5c_3.8/ -j agents/v0.1/kitchen/NPG/outputs_kitchenJ5d_3.9/ -j /Users/vikashplus/Projects/mj_envs/kitchen/outputs_kitchenJ8a/ -l 'v0.1(fixed_init)' -l 'v0.1(random_init)' -l 'v0.2(random_init)' -pt True
'''
from vtils.plotting import simple_plot
import argparse
from scipy import signal
import pandas
import glob
import numpy as np
import os
def get_files(search_path, file_name):
search_path = search_path[:-1] if search_path.endswith('/') else search_path
search_path = search_path+"*/**/"+file_name
filenames = glob.glob(search_path, recursive=True)
assert (len(filenames) > 0), "No file found at: {}".format(search_path)
return filenames
# Another example, Python 3.5+
def get_files_p35(search_path, file_name):
from pathlib import Path
filenames = []
for path in Path(search_path).rglob(file_name):
filenames.append(path)
return filenames
def get_log(filename, format="csv"):
try:
if format=="csv":
data = pandas.read_csv(filename)
elif format=="json":
data = pandas.read_json(filename)
except Exception as e:
print("WARNING: Can't read %s." % filename)
quit()
return data
def smooth_data(y, window_length=101, polyorder=3):
window_length = min(int(len(y) / 2),
window_length) # set maximum valid window length
# if window not off
if window_length % 2 == 0:
window_length = window_length + 1
try:
return signal.savgol_filter(y, window_length, polyorder)
except Exception as e:
return y # nans
# MAIN =========================================================
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-j', '--job', required=True, action='append', nargs='?', help='job group')
parser.add_argument(
'-lf', '--run_log', type=str, default="log.csv", help='name of log file (with extension)')
parser.add_argument(
'-cf', '--config_file', type=str, default="job_config.json", help='name of config file (with extension)')
parser.add_argument(
'-t', '--title', type=str, default=None, help='Title of the plot')
parser.add_argument(
'-l', '--label', action='append', nargs='?', help='job group label')
parser.add_argument(
'-s', '--smooth', type=int, default=21, help='window for smoothing')
parser.add_argument(
'-y', '--ykeys', nargs='+', default=['success_percentage', 'rwd_sparse', 'rwd_dense'], help='yKeys to plot')
parser.add_argument(
'-x', '--xkey', default="num_samples", help='xKey to plot')
parser.add_argument(
'-ei', '--env_index', type=int, default=-2, help='index in log filename to use as labels')
parser.add_argument(
'-pt', '--plot_train', type=bool, default=False, help='plot train perf')
parser.add_argument(
'-od', '--output_dir', type=str, default=None, help='Save outputs here')
args = parser.parse_args()
# init
nykeys = len(args.ykeys)
njob = len(args.job)
nenv = -1
env_labels = []
# scan labels
if args.label is not None:
assert (njob == len(args.label)), "The number of labels has to be same as the number of jobs"
else:
args.label = [''] * njob
# for all the jobs
for ijob, job_dir in enumerate(args.job):
print("Job> "+job_dir)
envs_dirs = glob.glob(job_dir+"/*/")
if nenv ==-1:
nenv = len(envs_dirs)
else:
assert nenv == len(envs_dirs), f"Number of envs changed {envs_dirs}"
for env_dir in sorted(envs_dirs):
env_labels.append(env_dir.split('/')[args.env_index])
# for all envs inside the exp
env_means = []
env_stds = []
for ienv, env_dir in enumerate(sorted(envs_dirs)):
print(" env> "+env_dir)
# all the seeds/ variations runs within the env
yruns = []
xruns = [] # known bug: Logs will different lengths will cause a bug. Its hacked via using [:len(xdata)]
for irun, run_log in enumerate(sorted(get_files(env_dir, args.run_log))):
print(" run> "+run_log, flush=True)
log = get_log(filename=run_log, format="csv")
# validate keys
for key in [args.xkey]+args.ykeys:
assert key in log.keys(), "{} not present in available keys {}".format(key, log.keys())
if 'sample' in args.xkey: #special keys
xdata = np.cumsum(log[args.xkey])/1e6
plot_xkey = args.xkey+"(M)"
else:
xdata = log[args.xkey]
plot_xkey = args.xkey
yruns.append(log[args.ykeys])
# print(xdata.shape, log[args.ykeys].shape)
del log
# stats over keys
yruns = pandas.concat(yruns)
yruns_stacked = yruns.groupby(yruns.index)
yruns_mean = yruns_stacked.mean()
yruns_min = yruns_stacked.min()
yruns_max = yruns_stacked.max()
yruns_std = yruns_stacked.std()
# stats over jobs
env_means.append(yruns_mean.tail(1))
env_stds.append(yruns_std.tail(1))
if args.plot_train:
for iykey, ykey in enumerate(sorted(args.ykeys)):
h_figp,_,_= simple_plot.plot(xdata=xdata,
ydata=smooth_data(yruns_mean[ykey][:len(xdata)], args.smooth),
errmin=yruns_min[ykey][:len(xdata)],
errmax=yruns_max[ykey][:len(xdata)],
legend=args.label[ijob],
subplot_id=(nenv, nykeys, nykeys*ienv+iykey+1),
xaxislabel=plot_xkey,
plot_name=env_labels[ienv],
yaxislabel=ykey,
fig_size=(4*nykeys, 3*nenv),
fig_name='NPG performance',
)
env_means = pandas.concat(env_means)
env_stds = pandas.concat(env_stds)
width = 1/(njob+1)
for iykey, ykey in enumerate(sorted(args.ykeys)):
h_figb, h_axisb, h_bar = simple_plot.bar(
xdata=np.arange(nenv)+width*ijob,
ydata=env_means[ykey],
errdata=env_stds[ykey],
width=width,
subplot_id=(nykeys, 1, iykey+1),
fig_size=(2+0.2*nenv, 4*nykeys),
fig_name="Env perfs",
yaxislabel=ykey,
legend=args.label[ijob],
xticklabels=env_labels[:nenv],
# plot_name="Performance using 5M samples"
)
args.output_dir = args.job[-1] if args.output_dir == None else args.output_dir
if args.plot_train:
simple_plot.save_plot(os.path.join(args.output_dir, 'TrainPerf-NPG.pdf'), h_figp)
simple_plot.save_plot(os.path.join(args.output_dir,'FinalPerf-NPG.pdf'), h_figb)
if __name__ == '__main__':
main()
|
agenthive-dev
|
agents/utils/plot_all_npg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from tensordict.tensordict import make_tensordict, TensorDictBase
from torchrl.data import BoundedTensorSpec, CompositeSpec, UnboundedContinuousTensorSpec
from torchrl.envs.libs.gym import _gym_to_torchrl_spec_transform, _has_gym, GymEnv
from torchrl.envs.transforms import CatTensors, Compose, R3MTransform, TransformedEnv
from torchrl.envs.utils import make_composite_from_td
from torchrl.trainers.helpers.envs import LIBS
if _has_gym:
import gym
class RoboHiveEnv(GymEnv):
# info_keys = ["time", "rwd_dense", "rwd_sparse", "solved"]
def _build_env(
self,
env_name: str,
from_pixels: bool = False,
pixels_only: bool = False,
**kwargs,
) -> "gym.core.Env":
self.pixels_only = pixels_only
try:
render_device = int(str(self.device)[-1])
except ValueError:
render_device = 0
print(f"rendering device: {render_device}, device is {self.device}")
if not _has_gym:
raise RuntimeError(
f"gym not found, unable to create {env_name}. "
f"Consider downloading and installing dm_control from"
f" {self.git_url}"
)
try:
env = self.lib.make(
env_name,
frameskip=self.frame_skip,
device_id=render_device,
return_dict=True,
**kwargs,
)
self.wrapper_frame_skip = 1
from_pixels = bool(len(env.visual_keys))
except TypeError as err:
if "unexpected keyword argument 'frameskip" not in str(err):
raise TypeError(err)
kwargs.pop("framek_skip")
env = self.lib.make(
env_name, return_dict=True, device_id=render_device, **kwargs
)
self.wrapper_frame_skip = self.frame_skip
self.from_pixels = from_pixels
self.render_device = render_device
self.info_dict_reader = self.read_info
return env
def _make_specs(self, env: "gym.Env") -> None:
if self.from_pixels:
num_cams = len(env.visual_keys)
# n_pix = 224 * 224 * 3 * num_cams
# env.observation_space = gym.spaces.Box(
# -8 * np.ones(env.obs_dim - n_pix),
# 8 * np.ones(env.obs_dim - n_pix),
# dtype=np.float32,
# )
self.action_spec = _gym_to_torchrl_spec_transform(
env.action_space, device=self.device
)
observation_spec = _gym_to_torchrl_spec_transform(
env.observation_space,
device=self.device,
)
if not isinstance(observation_spec, CompositeSpec):
observation_spec = CompositeSpec(observation=observation_spec)
self.observation_spec = observation_spec
if self.from_pixels:
self.observation_spec["pixels"] = BoundedTensorSpec(
torch.zeros(
num_cams,
224, # working with 640
224, # working with 480
3,
device=self.device,
dtype=torch.uint8,
),
255
* torch.ones(
num_cams,
224,
224,
3,
device=self.device,
dtype=torch.uint8,
),
torch.Size(torch.Size([num_cams, 224, 224, 3])),
dtype=torch.uint8,
device=self.device,
)
self.reward_spec = UnboundedContinuousTensorSpec(
device=self.device,
) # default
rollout = self.rollout(2).get("next").exclude("done", "reward")[0]
self.observation_spec.update(make_composite_from_td(rollout))
def set_from_pixels(self, from_pixels: bool) -> None:
"""Sets the from_pixels attribute to an existing environment.
Args:
from_pixels (bool): new value for the from_pixels attribute
"""
if from_pixels is self.from_pixels:
return
self.from_pixels = from_pixels
self._make_specs(self.env)
def read_obs(self, observation):
# the info is missing from the reset
observations = self.env.obs_dict
visual = self.env.get_exteroception()
try:
del observations["t"]
except KeyError:
pass
# recover vec
obsvec = []
pixel_list = []
observations.update(visual)
for key in observations:
if key.startswith("rgb"):
pix = observations[key]
if not pix.shape[0] == 1:
pix = pix[None]
pixel_list.append(pix)
elif key in self._env.obs_keys:
value = observations[key]
if not value.shape:
value = value[None]
obsvec.append(value) # ravel helps with images
if obsvec:
obsvec = np.concatenate(obsvec, 0)
if self.from_pixels:
out = {"observation": obsvec, "pixels": np.concatenate(pixel_list, 0)}
else:
out = {"observation": obsvec}
return super().read_obs(out)
def read_info(self, info, tensordict_out):
out = {}
for key, value in info.items():
if key in ("obs_dict", "done", "reward"):
continue
if isinstance(value, dict):
value = {key: _val for key, _val in value.items() if _val is not None}
value = make_tensordict(value, batch_size=[])
out[key] = value
tensordict_out.update(out)
return tensordict_out
def to(self, *args, **kwargs):
out = super().to(*args, **kwargs)
try:
render_device = int(str(out.device)[-1])
except ValueError:
render_device = 0
if render_device != self.render_device:
out._build_env(**self._constructor_kwargs)
return out
def make_r3m_env(env_name, model_name="resnet50", download=True, **kwargs):
base_env = RoboHiveEnv(env_name, from_pixels=True, pixels_only=False)
vec_keys = [k for k in base_env.observation_spec.keys() if k not in "pixels"]
env = TransformedEnv(
base_env,
Compose(
R3MTransform(
model_name,
keys_in=["pixels"],
keys_out=["pixel_r3m"],
download=download,
**kwargs,
),
CatTensors(keys_in=["pixel_r3m", *vec_keys], out_key="observation_vector"),
),
)
return env
LIBS["robohive"] = RoboHiveEnv
|
agenthive-dev
|
rlhive/rl_envs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Custom env reg for RoboHive usage in TorchRL
# Pixel rendering will be queried by torchrl, so we don't include those keys in visual_obs_keys_wt
import os
import warnings
from pathlib import Path
import robohive.envs.multi_task.substeps1
from robohive.envs.env_variants import register_env_variant
visual_obs_keys_wt = robohive.envs.multi_task.substeps1.visual_obs_keys_wt
class set_directory(object):
"""Sets the cwd within the context
Args:
path (Path): The path to the cwd
"""
def __init__(self, path: Path):
self.path = path
self.origin = Path().absolute()
def __enter__(self):
os.chdir(self.path)
def __exit__(self, *args, **kwargs):
os.chdir(self.origin)
def __call__(self, fun):
def new_fun(*args, **kwargs):
with set_directory(Path(self.path)):
return fun(*args, **kwargs)
return new_fun
CURR_DIR = robohive.envs.multi_task.substeps1.CURR_DIR
MODEL_PATH = robohive.envs.multi_task.substeps1.MODEL_PATH
CONFIG_PATH = robohive.envs.multi_task.substeps1.CONFIG_PATH
RANDOM_ENTRY_POINT = robohive.envs.multi_task.substeps1.RANDOM_ENTRY_POINT
FIXED_ENTRY_POINT = robohive.envs.multi_task.substeps1.FIXED_ENTRY_POINT
ENTRY_POINT = RANDOM_ENTRY_POINT
override_keys = [
"objs_jnt",
"end_effector",
"knob1_site_err",
"knob2_site_err",
"knob3_site_err",
"knob4_site_err",
"light_site_err",
"slide_site_err",
"leftdoor_site_err",
"rightdoor_site_err",
"microhandle_site_err",
"kettle_site0_err",
"rgb:right_cam:224x224:2d",
"rgb:left_cam:224x224:2d",
]
@set_directory(CURR_DIR)
def register_kitchen_envs():
print("RLHive:> Registering Kitchen Envs")
env_list = [
"kitchen_knob1_off-v3",
"kitchen_knob1_on-v3",
"kitchen_knob2_off-v3",
"kitchen_knob2_on-v3",
"kitchen_knob3_off-v3",
"kitchen_knob3_on-v3",
"kitchen_knob4_off-v3",
"kitchen_knob4_on-v3",
"kitchen_light_off-v3",
"kitchen_light_on-v3",
"kitchen_sdoor_close-v3",
"kitchen_sdoor_open-v3",
"kitchen_ldoor_close-v3",
"kitchen_ldoor_open-v3",
"kitchen_rdoor_close-v3",
"kitchen_rdoor_open-v3",
"kitchen_micro_close-v3",
"kitchen_micro_open-v3",
"FK1_RelaxFixed-v4",
# "kitchen_close-v3",
]
obs_keys_wt = {
"robot_jnt": 1.0,
"end_effector": 1.0,
}
visual_obs_keys = {
"rgb:right_cam:224x224:2d": 1.0,
"rgb:left_cam:224x224:2d": 1.0,
}
for env in env_list:
try:
new_env_name = "visual_" + env
register_env_variant(
env,
variants={"obs_keys_wt": obs_keys_wt, "visual_keys": list(visual_obs_keys.keys())},
variant_id=new_env_name,
override_keys=override_keys,
)
except AssertionError as err:
warnings.warn(
f"Could not register {new_env_name}, the following error was raised: {err}"
)
@set_directory(CURR_DIR)
def register_franka_envs():
print("RLHive:> Registering Franka Envs")
env_list = [
"franka_slide_random-v3",
"franka_slide_close-v3",
"franka_slide_open-v3",
"franka_micro_random-v3",
"franka_micro_close-v3",
"franka_micro_open-v3",
]
# Franka Appliance ======================================================================
obs_keys_wt = {
"robot_jnt": 1.0,
"end_effector": 1.0,
}
visual_obs_keys = {
"rgb:right_cam:224x224:2d": 1.0,
"rgb:left_cam:224x224:2d": 1.0,
}
for env in env_list:
try:
new_env_name = "visual_" + env
register_env_variant(
env,
variants={"obs_keys_wt": obs_keys_wt, "visual_keys": visual_obs_keys},
variant_id=new_env_name,
override_keys=override_keys,
)
except AssertionError as err:
warnings.warn(
f"Could not register {new_env_name}, the following error was raised: {err}"
)
@set_directory(CURR_DIR)
def register_hand_envs():
print("RLHive:> Registering Arm Envs")
env_list = ["door-v1", "hammer-v1", "pen-v1", "relocate-v1"]
visual_obs_keys = [
"rgb:vil_camera:224x224:2d",
"rgb:fixed:224x224:2d",
]
# Hand Manipulation Suite ======================================================================
for env in env_list:
try:
new_env_name = "visual_" + env
register_env_variant(
env,
variants={
"obs_keys": [
"hand_jnt",
],
"visual_keys": visual_obs_keys,
},
variant_id=new_env_name,
)
except AssertionError as err:
warnings.warn(
f"Could not register {new_env_name}, the following error was raised: {err}"
)
@set_directory(CURR_DIR)
def register_myo_envs():
print("RLHive:> Registering Myo Envs")
env_list = ["motorFingerReachFixed-v0"]
visual_keys = [
"rgb:vil_camera:224x224:2d",
"rgb:fixed:224x224:2d",
]
# Hand Manipulation Suite ======================================================================
for env in env_list:
try:
new_env_name = "visual_" + env
register_env_variant(
env,
variants={
"obs_keys": [
"hand_jnt",
],
"visual_keys": visual_keys,
},
variant_id=new_env_name,
)
except AssertionError as err:
warnings.warn(
f"Could not register {new_env_name}, the following error was raised: {err}"
)
|
agenthive-dev
|
rlhive/envs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .envs import (
register_franka_envs,
register_hand_envs,
register_kitchen_envs,
register_myo_envs,
)
register_franka_envs()
register_kitchen_envs()
register_hand_envs()
register_myo_envs()
from .rl_envs import RoboHiveEnv
|
agenthive-dev
|
rlhive/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Union
import torch
from torch.nn import Identity
from torchrl.data.tensor_specs import (
CompositeSpec,
TensorSpec,
UnboundedContinuousTensorSpec,
)
from torchrl.data.utils import DEVICE_TYPING
from torchrl.envs.transforms.transforms import (
CatTensors,
Compose,
FlattenObservation,
ObservationNorm,
Resize,
ToTensorImage,
Transform,
UnsqueezeTransform,
)
try:
from torchvision import models
_has_tv = True
except ImportError:
_has_tv = False
class _RRLNet(Transform):
inplace = False
def __init__(self, in_keys, out_keys, model_name, del_keys: bool = True):
if not _has_tv:
raise ImportError(
"Tried to instantiate RRL without torchvision. Make sure you have "
"torchvision installed in your environment."
)
if model_name == "resnet18":
self.model_name = "rrl_18"
self.outdim = 512
convnet = models.resnet18(pretrained=True)
elif model_name == "resnet34":
self.model_name = "rrl_34"
self.outdim = 512
convnet = models.resnet34(pretrained=True)
elif model_name == "resnet50":
self.model_name = "rrl_50"
self.outdim = 2048
convnet = models.resnet50(pretrained=True)
else:
raise NotImplementedError(
f"model {model_name} is currently not supported by RRL"
)
convnet.fc = Identity()
super().__init__(in_keys=in_keys, out_keys=out_keys)
self.convnet = convnet
self.del_keys = del_keys
def _call(self, tensordict):
tensordict_view = tensordict.view(-1)
super()._call(tensordict_view)
if self.del_keys:
tensordict.exclude(*self.in_keys, inplace=True)
return tensordict
@torch.no_grad()
def _apply_transform(self, obs: torch.Tensor) -> None:
shape = None
if obs.ndimension() > 4:
shape = obs.shape[:-3]
obs = obs.flatten(0, -4)
out = self.convnet(obs)
if shape is not None:
out = out.view(*shape, *out.shape[1:])
return out
def transform_observation_spec(self, observation_spec: TensorSpec) -> TensorSpec:
if not isinstance(observation_spec, CompositeSpec):
raise ValueError("_RRLNet can only infer CompositeSpec")
keys = [key for key in observation_spec._specs.keys() if key in self.in_keys]
device = observation_spec[keys[0]].device
dim = observation_spec[keys[0]].shape[:-3]
observation_spec = CompositeSpec(observation_spec)
if self.del_keys:
for in_key in keys:
del observation_spec[in_key]
for out_key in self.out_keys:
observation_spec[out_key] = UnboundedContinuousTensorSpec(
shape=torch.Size([*dim, self.outdim]), device=device
)
return observation_spec
# @staticmethod
# def _load_weights(model_name, r3m_instance, dir_prefix):
# if model_name not in ("r3m_50", "r3m_34", "r3m_18"):
# raise ValueError(
# "model_name should be one of 'r3m_50', 'r3m_34' or 'r3m_18'"
# )
# # url = "https://download.pytorch.org/models/rl/r3m/" + model_name
# url = "https://pytorch.s3.amazonaws.com/models/rl/r3m/" + model_name + ".pt"
# d = load_state_dict_from_url(
# url,
# progress=True,
# map_location=next(r3m_instance.parameters()).device,
# model_dir=dir_prefix,
# )
# td = TensorDict(d["r3m"], []).unflatten_keys(".")
# td_flatten = td["module"]["convnet"].flatten_keys(".")
# state_dict = td_flatten.to_dict()
# r3m_instance.convnet.load_state_dict(state_dict)
# def load_weights(self, dir_prefix=None):
# self._load_weights(self.model_name, self, dir_prefix)
def _init_first(fun):
def new_fun(self, *args, **kwargs):
if not self.initialized:
self._init()
return fun(self, *args, **kwargs)
return new_fun
class RRLTransform(Compose):
"""RRL Transform class.
RRL provides pre-trained ResNet weights aimed at facilitating visual
embedding for robotic tasks. The models are trained using Ego4d.
See the paper:
Shah, Rutav, and Vikash Kumar. "RRl: Resnet as representation for reinforcement learning."
arXiv preprint arXiv:2107.03380 (2021).
The RRLTransform is created in a lazy manner: the object will be initialized
only when an attribute (a spec or the forward method) will be queried.
The reason for this is that the :obj:`_init()` method requires some attributes of
the parent environment (if any) to be accessed: by making the class lazy we
can ensure that the following code snippet works as expected:
Examples:
>>> transform = RRLTransform("resnet50", in_keys=["pixels"])
>>> env.append_transform(transform)
>>> # the forward method will first call _init which will look at env.observation_spec
>>> env.reset()
Args:
model_name (str): one of resnet50, resnet34 or resnet18
in_keys (list of str): list of input keys. If left empty, the
"pixels" key is assumed.
out_keys (list of str, optional): list of output keys. If left empty,
"rrl_vec" is assumed.
size (int, optional): Size of the image to feed to resnet.
Defaults to 244.
stack_images (bool, optional): if False, the images given in the :obj:`in_keys`
argument will be treaded separetely and each will be given a single,
separated entry in the output tensordict. Defaults to :obj:`True`.
download (bool, optional): if True, the weights will be downloaded using
the torch.hub download API (i.e. weights will be cached for future use).
Defaults to False.
download_path (str, optional): path where to download the models.
Default is None (cache path determined by torch.hub utils).
tensor_pixels_keys (list of str, optional): Optionally, one can keep the
original images (as collected from the env) in the output tensordict.
If no value is provided, this won't be collected.
"""
@classmethod
def __new__(cls, *args, **kwargs):
cls.initialized = False
cls._device = None
cls._dtype = None
return super().__new__(cls)
def __init__(
self,
model_name: str,
in_keys: List[str],
out_keys: List[str] = None,
size: int = 244,
stack_images: bool = True,
download: bool = False,
download_path: Optional[str] = None,
tensor_pixels_keys: List[str] = None,
):
super().__init__()
self.in_keys = in_keys if in_keys is not None else ["pixels"]
self.download = download
self.download_path = download_path
self.model_name = model_name
self.out_keys = out_keys
self.size = size
self.stack_images = stack_images
self.tensor_pixels_keys = tensor_pixels_keys
self._init()
def _init(self):
"""Initializer for RRL."""
self.initialized = True
in_keys = self.in_keys
model_name = self.model_name
out_keys = self.out_keys
size = self.size
stack_images = self.stack_images
tensor_pixels_keys = self.tensor_pixels_keys
# ToTensor
transforms = []
if tensor_pixels_keys:
for i in range(len(in_keys)):
transforms.append(
CatTensors(
in_keys=[in_keys[i]],
out_key=tensor_pixels_keys[i],
del_keys=False,
)
)
totensor = ToTensorImage(
unsqueeze=False,
in_keys=in_keys,
)
transforms.append(totensor)
# Normalize
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
normalize = ObservationNorm(
in_keys=in_keys,
loc=torch.tensor(mean).view(3, 1, 1),
scale=torch.tensor(std).view(3, 1, 1),
standard_normal=True,
)
transforms.append(normalize)
# Resize: note that resize is a no-op if the tensor has the desired size already
resize = Resize(size, size, in_keys=in_keys)
transforms.append(resize)
# RRL
if out_keys is None:
if stack_images:
out_keys = ["rrl_vec"]
else:
out_keys = [f"rrl_vec_{i}" for i in range(len(in_keys))]
self.out_keys = out_keys
elif stack_images and len(out_keys) != 1:
raise ValueError(
f"out_key must be of length 1 if stack_images is True. Got out_keys={out_keys}"
)
elif not stack_images and len(out_keys) != len(in_keys):
raise ValueError(
"out_key must be of length equal to in_keys if stack_images is False."
)
if stack_images and len(in_keys) > 1:
unsqueeze = UnsqueezeTransform(
in_keys=in_keys,
out_keys=in_keys,
unsqueeze_dim=-4,
)
transforms.append(unsqueeze)
cattensors = CatTensors(
in_keys,
out_keys[0],
dim=-4,
)
network = _RRLNet(
in_keys=out_keys,
out_keys=out_keys,
model_name=model_name,
del_keys=False,
)
flatten = FlattenObservation(-2, -1, out_keys)
transforms = [*transforms, cattensors, network, flatten]
else:
network = _RRLNet(
in_keys=in_keys,
out_keys=out_keys,
model_name=model_name,
del_keys=True,
)
transforms = [*transforms, network]
for transform in transforms:
self.append(transform)
# if self.download:
# self[-1].load_weights(dir_prefix=self.download_path)
if self._device is not None:
self.to(self._device)
if self._dtype is not None:
self.to(self._dtype)
def to(self, dest: Union[DEVICE_TYPING, torch.dtype]):
if isinstance(dest, torch.dtype):
self._dtype = dest
else:
self._device = dest
return super().to(dest)
@property
def device(self):
return self._device
@property
def dtype(self):
return self._dtype
|
agenthive-dev
|
rlhive/sim_algos/helpers/rrl_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Multi-node distributed data collection with submitit in contexts where jobs can't launch other jobs.
The default configuration will ask for 8 nodes with 1 GPU each and 32 procs / node.
It should reach a collection speed of roughly 15-25K fps, or better depending
on the cluster specs.
The logic of the script is the following: we create a `main()` function that
executes or code (in this case just a data collection but in practice a training
loop should be present).
Since this `main()` function cannot launch sub-jobs by design, we launch the script
from the jump host and pass the slurm specs to submitit.
*Note*:
Although we don't go in much details into this in this script, the specs of the training
node and the specs of the inference nodes can differ (look at the DEFAULT_SLURM_CONF
and DEFAULT_SLURM_CONF_MAIN dictionaries below).
"""
import time
from argparse import ArgumentParser
import torch
from torchrl.collectors.distributed import submitit_delayed_launcher
from torchrl.collectors.distributed.default_configs import (
DEFAULT_SLURM_CONF,
DEFAULT_SLURM_CONF_MAIN,
)
parser = ArgumentParser()
parser.add_argument("--partition", "-p", help="slurm partition to use")
parser.add_argument("--num_jobs", type=int, default=8, help="Number of jobs")
parser.add_argument("--tcp_port", type=int, default=1234, help="TCP port")
parser.add_argument(
"--num_workers", type=int, default=8, help="Number of workers per node"
)
parser.add_argument(
"--gpus_per_node",
"--gpus-per-node",
"-G",
type=int,
default=1,
help="Number of GPUs per node. If greater than 0, the backend used will be NCCL.",
)
parser.add_argument(
"--cpus_per_task",
"--cpus-per-task",
"-c",
type=int,
default=32,
help="Number of CPUs per node.",
)
parser.add_argument(
"--sync", action="store_true", help="Use --sync to collect data synchronously."
)
parser.add_argument(
"--frames_per_batch",
"--frames-per-batch",
default=4000,
type=int,
help="Number of frames in each batch of data. Must be "
"divisible by the product of nodes and workers if sync, by the number of "
"workers otherwise.",
)
parser.add_argument(
"--total_frames",
"--total-frames",
default=10_000_000,
type=int,
help="Total number of frames collected by the collector.",
)
parser.add_argument(
"--time",
"-t",
default="1:00:00",
help="Timeout for the nodes",
)
parser.add_argument(
"--backend",
"-b",
default="gloo",
help="Backend for the collector",
)
parser.add_argument("--env_name", default="franka_micro_random-v3")
parser.add_argument("--r3m", action="store_true")
args = parser.parse_args()
slurm_gpus_per_node = args.gpus_per_node
slurm_time = args.time
backend = args.backend
DEFAULT_SLURM_CONF["slurm_gpus_per_node"] = slurm_gpus_per_node
DEFAULT_SLURM_CONF["slurm_time"] = slurm_time
DEFAULT_SLURM_CONF["slurm_cpus_per_task"] = args.cpus_per_task
DEFAULT_SLURM_CONF["slurm_partition"] = args.partition
DEFAULT_SLURM_CONF_MAIN["slurm_partition"] = args.partition
DEFAULT_SLURM_CONF_MAIN["slurm_time"] = slurm_time
num_jobs = args.num_jobs
tcp_port = args.tcp_port
num_workers = args.num_workers
sync = args.sync
total_frames = args.total_frames
frames_per_batch = args.frames_per_batch
device = "cpu" if backend == "gloo" else "cuda:0"
def make_env(args):
def constructor():
from rlhive import RoboHiveEnv
from torchrl.envs import EnvCreator, ParallelEnv, R3MTransform, TransformedEnv
from torchrl.envs.libs.gym import GymEnv
if args.num_workers > 1:
penv = ParallelEnv(
args.num_workers,
# EnvCreator(lambda: RoboHiveEnv(args.env_name, device="cuda:0")),
EnvCreator(lambda: GymEnv("Pendulum-v0", device="cuda:0")),
)
else:
# penv = RoboHiveEnv(args.env_name, device="cuda:0")
penv = GymEnv("Pendulum-v0", device="cuda:0")
if "visual" in args.env_name:
if args.r3m:
tenv = TransformedEnv(
penv,
R3MTransform(
in_keys=["pixels"], download=True, model_name="resnet50"
),
)
else:
tenv = penv
else:
tenv = penv
return tenv
return constructor
@submitit_delayed_launcher(
num_jobs=num_jobs,
backend=backend,
tcpport=tcp_port,
)
def main():
assert torch.cuda.device_count()
import tqdm
from torchrl.collectors import SyncDataCollector
from torchrl.collectors.collectors import RandomPolicy
from torchrl.collectors.distributed.generic import DistributedDataCollector
from torchrl.envs import EnvCreator
collector_class = SyncDataCollector
collector = DistributedDataCollector(
[EnvCreator(make_env(args))] * num_jobs,
policy=RandomPolicy(make_env(args)().action_spec),
launcher="submitit_delayed",
frames_per_batch=frames_per_batch,
total_frames=total_frames,
tcp_port=tcp_port,
collector_class=collector_class,
num_workers_per_collector=args.num_workers,
collector_kwargs={
"device": "cuda:0" if slurm_gpus_per_node else "cpu",
"storing_device": device,
},
storing_device="cpu",
backend=backend,
sync=sync,
)
counter = 0
pbar = tqdm.tqdm(total=collector.total_frames)
for i, data in enumerate(collector):
pbar.update(data.numel())
pbar.set_description(f"data shape: {data.shape}, data device: {data.device}")
if i >= 10:
counter += data.numel()
if i == 10:
t0 = time.time()
t1 = time.time()
print(f"time elapsed: {t1-t0}s, rate: {counter/(t1-t0)} fps")
collector.shutdown()
exit()
if __name__ == "__main__":
main()
|
agenthive-dev
|
examples/collection_speed_delayed.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from omegaconf import DictConfig
os.environ["sim_backend"] = "MUJOCO"
def main(args: DictConfig):
import numpy as np
import torch.cuda
import tqdm
from rlhive.rl_envs import RoboHiveEnv
from tensordict import TensorDict
from torch import nn, optim
from torchrl.collectors import MultiaSyncDataCollector
from torchrl.data import TensorDictPrioritizedReplayBuffer, TensorDictReplayBuffer
from torchrl.data.replay_buffers.storages import LazyMemmapStorage
# from torchrl.envs import SerialEnv as ParallelEnv, R3MTransform, SelectTransform, TransformedEnv
from torchrl.envs import (
CatTensors,
EnvCreator,
ParallelEnv,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode, step_mdp
from torchrl.modules import MLP, NormalParamWrapper, SafeModule
from torchrl.modules.distributions import TanhNormal
from torchrl.modules.tensordict_module.actors import (
ProbabilisticActor,
ValueOperator,
)
from torchrl.objectives import SoftUpdate
from torchrl.objectives.deprecated import REDQLoss_deprecated as REDQLoss
from torchrl.record import VideoRecorder
from torchrl.record.loggers.wandb import WandbLogger
from torchrl.trainers import Recorder
# ===========================================================================================
# Env constructor
# ---------------
# - Use the RoboHiveEnv class to wrap robohive envs in torchrl's GymWrapper
# - Add transforms immediately after that:
# - SelectTransform: selects the relevant kesy from our output
# - R3MTransform
# - FlattenObservation: The images delivered by robohive have a singleton dim to start with, we need to flatten that
# - RewardScaling
#
# One can also possibly use ObservationNorm.
#
# TIPS:
# - For faster execution, you should follow this abstract scheme, where we reduce the data
# to be passed from worker to worker to a minimum, we apply R3M to a batch and append the
# rest of the transforms afterward:
#
# >>> env = TransformedEnv(
# ... ParallelEnv(N, lambda: TransformedEnv(RoboHiveEnv(...), SelectTransform(...))),
# ... Compose(
# ... R3MTransform(...),
# ... FlattenObservation(...),
# ... *other_transforms,
# ... ))
#
def traj_is_solved(done, solved):
solved = solved.view_as(done)
done_cumsum = done.cumsum(-2)
count = 0
_i = 0
for _i, u in enumerate(done_cumsum.unique()):
is_solved = solved[done_cumsum == u].any()
count += is_solved
return count / (_i + 1)
def traj_total_reward(done, reward):
reward = reward.view_as(done)
done_cumsum = done.cumsum(-2)
count = 0
_i = 0
for _i, u in enumerate(done_cumsum.unique()):
count += reward[done_cumsum == u].sum()
return count / (_i + 1)
def make_env(num_envs, task, visual_transform, reward_scaling, device):
if num_envs > 1:
base_env = ParallelEnv(
num_envs, EnvCreator(lambda: RoboHiveEnv(task, device=device))
)
else:
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(
env=base_env,
reward_scaling=reward_scaling,
visual_transform=visual_transform,
)
return env
def make_transformed_env(
env,
reward_scaling=5.0,
visual_transform="r3m",
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
env = TransformedEnv(
env,
SelectTransform(
"solved", "pixels", "observation", "rwd_dense", "rwd_sparse"
),
)
if visual_transform == "r3m":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True).eval(),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif visual_transform == "rrl":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform(
"resnet50", in_keys=["pixels"], download="IMAGENET1K_V2"
).eval(),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif not visual_transform:
selected_keys = ["observation"]
else:
raise NotImplementedError(visual_transform)
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
return env
# ===========================================================================================
# Making a recorder
# -----------------
#
# A `Recorder` is a dedicated torchrl class that will run the policy in the test env
# once every X steps (eg X=1M).
#
def make_recorder(
task: str,
frame_skip: int,
record_interval: int,
actor_model_explore: object,
eval_traj: int,
env_configs: dict,
wandb_logger: WandbLogger,
num_envs: int,
):
test_env = make_env(num_envs=num_envs, task=task, **env_configs)
if "visual" in task:
test_env.insert_transform(
0, VideoRecorder(wandb_logger, "test", in_keys=["pixels"])
)
test_env.reset()
recorder_obj = Recorder(
record_frames=eval_traj * test_env.horizon,
frame_skip=frame_skip,
policy_exploration=actor_model_explore,
recorder=test_env,
exploration_mode="mean",
record_interval=record_interval,
log_keys=["reward", "solved", "done", "rwd_sparse"],
out_keys={
"reward": "r_evaluation",
"solved": "success",
"done": "done",
"rwd_sparse": "rwd_sparse",
},
)
return recorder_obj
# ===========================================================================================
# Relplay buffers
# ---------------
#
# TorchRL also provides prioritized RBs if needed.
#
def make_replay_buffer(
prb: bool,
buffer_size: int,
buffer_scratch_dir: str,
device: torch.device,
prefetch: int = 10,
):
if prb:
replay_buffer = TensorDictPrioritizedReplayBuffer(
alpha=0.7,
beta=0.5,
pin_memory=False,
prefetch=prefetch,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
else:
replay_buffer = TensorDictReplayBuffer(
pin_memory=False,
prefetch=prefetch,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
return replay_buffer
# ===========================================================================================
# Dataloader
# ----------
#
# This is a simplified version of the dataloder
#
@torch.no_grad()
@set_exploration_mode("random")
def dataloader(
total_frames, fpb, train_env, actor, actor_collection, device_collection
):
params = TensorDict(
{k: v for k, v in actor.named_parameters()}, batch_size=[]
).unflatten_keys(".")
params_collection = TensorDict(
{k: v for k, v in actor_collection.named_parameters()}, batch_size=[]
).unflatten_keys(".")
_prev = None
collected_frames = 0
while collected_frames < total_frames:
params_collection.update_(params)
batch = TensorDict(
{}, batch_size=[fpb, *train_env.batch_size], device=device_collection
)
for t in range(fpb):
if _prev is None:
_prev = train_env.reset()
_reset = _prev["_reset"] = _prev["done"].clone().squeeze(-1)
if _reset.any():
_prev = train_env.reset(_prev)
_new = train_env.step(actor_collection(_prev))
batch[t] = _new
_prev = step_mdp(_new, exclude_done=False)
collected_frames += batch.numel()
yield batch
# customize device at will
device = args.device
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Create Environment
env_configs = {
"reward_scaling": args.reward_scaling,
"visual_transform": args.visual_transform,
"device": "cpu",
}
train_env = make_env(num_envs=args.env_per_collector, task=args.task, **env_configs)
# add forward pass for initialization with proof env
proof_env = make_env(num_envs=1, task=args.task, **env_configs)
# Create Agent
# Define Actor Network
in_keys = ["observation_vector"]
action_spec = proof_env.action_spec
actor_net_kwargs = {
"num_cells": [256, 256],
"out_features": 2 * action_spec.shape[-1],
"activation_class": nn.ReLU,
}
actor_net = MLP(**actor_net_kwargs)
dist_class = TanhNormal
dist_kwargs = {
"min": action_spec.space.minimum,
"max": action_spec.space.maximum,
"tanh_loc": True,
}
actor_net = NormalParamWrapper(
actor_net,
scale_mapping=f"biased_softplus_{1.0}",
scale_lb=0.1,
)
in_keys_actor = in_keys
actor_module = SafeModule(
actor_net,
in_keys=in_keys_actor,
out_keys=[
"loc",
"scale",
],
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=dist_class,
distribution_kwargs=dist_kwargs,
default_interaction_mode="random",
return_log_prob=True,
)
# Define Critic Network
qvalue_net_kwargs = {
"num_cells": [256, 256],
"out_features": 1,
"activation_class": nn.ReLU,
}
qvalue_net = MLP(
**qvalue_net_kwargs,
)
qvalue = ValueOperator(
in_keys=["action"] + in_keys,
module=qvalue_net,
)
model = actor, qvalue = nn.ModuleList([actor, qvalue]).to(device)
# init nets
with torch.no_grad(), set_exploration_mode("random"):
td = proof_env.reset()
td = td.to(device)
for net in model:
net(td)
del td
proof_env.close()
actor_model_explore = model[0]
# Create REDQ loss
loss_module = REDQLoss(
actor_network=model[0],
qvalue_network=model[1],
gamma=args.gamma,
loss_function="smooth_l1",
)
# Define Target Network Updater
target_net_updater = SoftUpdate(loss_module, args.target_update_polyak)
# Make Replay Buffer
replay_buffer = make_replay_buffer(
prb=args.prb,
buffer_size=args.buffer_size,
buffer_scratch_dir=args.buffer_scratch_dir,
device="cpu",
)
# Optimizers
params = list(loss_module.parameters())
optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
rewards = []
rewards_eval = []
# Main loop
target_net_updater.init_()
collected_frames = 0
episodes = 0
optim_steps = 0
pbar = tqdm.tqdm(total=args.total_frames)
r0 = None
loss = None
logger = WandbLogger(
exp_name=args.task,
project=args.wandb_project,
name=args.exp_name,
config=args,
entity=args.wandb_entity,
mode=args.wandb_mode,
)
# Trajectory recorder for evaluation
recorder = make_recorder(
task=args.task,
frame_skip=args.frame_skip,
record_interval=args.record_interval,
actor_model_explore=actor_model_explore,
eval_traj=args.eval_traj,
env_configs=env_configs,
wandb_logger=logger,
num_envs=args.num_record_envs,
)
collector_device = args.device_collection
if isinstance(collector_device, str):
collector_device = [collector_device]
collector = MultiaSyncDataCollector(
create_env_fn=[train_env for _ in collector_device],
policy=actor_model_explore,
total_frames=args.total_frames,
max_frames_per_traj=args.frames_per_batch,
frames_per_batch=args.frames_per_batch,
init_random_frames=args.init_random_frames,
reset_at_each_iter=False,
postproc=None,
split_trajs=False,
devices=collector_device, # device for execution
passing_devices=collector_device, # device where data will be stored and passed
seed=args.seed,
pin_memory=False,
update_at_each_batch=False,
exploration_mode="random",
)
for i, batch in enumerate(collector):
collector.update_policy_weights_()
if r0 is None:
r0 = batch["reward"].sum(-1).mean().item()
pbar.update(batch.numel())
# extend the replay buffer with the new data
batch = batch.cpu().view(-1)
current_frames = batch.numel()
collected_frames += current_frames
episodes += batch["done"].sum()
replay_buffer.extend(batch)
# optimization steps
if collected_frames >= args.init_random_frames:
(
total_losses,
actor_losses,
q_losses,
alpha_losses,
alphas,
entropies,
) = ([], [], [], [], [], [])
for _ in range(
max(1, args.frames_per_batch * args.utd_ratio // args.batch_size)
):
optim_steps += 1
# sample from replay buffer
sampled_tensordict = (
replay_buffer.sample(args.batch_size).clone().to(device)
)
loss_td = loss_module(sampled_tensordict)
actor_loss = loss_td["loss_actor"]
q_loss = loss_td["loss_qvalue"]
alpha_loss = loss_td["loss_alpha"]
loss = actor_loss + q_loss + alpha_loss
optimizer.zero_grad()
loss.backward()
gn = torch.nn.utils.clip_grad_norm_(params, args.clip_norm)
optimizer.step()
# update qnet_target params
target_net_updater.step()
# update priority
if args.prb:
replay_buffer.update_tensordict_priority(sampled_tensordict)
total_losses.append(loss.item())
actor_losses.append(actor_loss.item())
q_losses.append(q_loss.item())
alpha_losses.append(alpha_loss.item())
alphas.append(loss_td["alpha"].item())
entropies.append(loss_td["entropy"].item())
rewards.append((i, batch["reward"].mean().item()))
logger.log_scalar("train_reward", rewards[-1][1], step=collected_frames)
logger.log_scalar("optim_steps", optim_steps, step=collected_frames)
logger.log_scalar("episodes", episodes, step=collected_frames)
if loss is not None:
logger.log_scalar(
"total_loss", np.mean(total_losses), step=collected_frames
)
logger.log_scalar(
"actor_loss", np.mean(actor_losses), step=collected_frames
)
logger.log_scalar("q_loss", np.mean(q_losses), step=collected_frames)
logger.log_scalar(
"alpha_loss", np.mean(alpha_losses), step=collected_frames
)
logger.log_scalar("alpha", np.mean(alphas), step=collected_frames)
logger.log_scalar("entropy", np.mean(entropies), step=collected_frames)
logger.log_scalar("grad_norm", gn, step=collected_frames)
td_record = recorder(None)
if td_record is not None:
rewards_eval.append(
(
i,
td_record["r_evaluation"]
/ recorder.recorder.batch_size.numel(), # divide by number of eval worker
)
)
logger.log_scalar("test_reward", rewards_eval[-1][1], step=collected_frames)
solved = traj_is_solved(td_record["done"], td_record["success"])
logger.log_scalar("success", solved, step=collected_frames)
rwd_sparse = traj_total_reward(td_record["done"], td_record["rwd_sparse"])
logger.log_scalar("rwd_sparse", rwd_sparse, step=collected_frames)
if len(rewards_eval):
pbar.set_description(
f"reward: {rewards[-1][1]: 4.4f} (r0 = {r0: 4.4f}), test reward: {rewards_eval[-1][1]: 4.4f}, solved: {solved}"
)
del batch
# gc.collect()
if __name__ == "__main__":
main()
|
agenthive-dev
|
examples/redq.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from omegaconf import DictConfig
os.environ["sim_backend"] = "MUJOCO"
os.environ["MUJOCO_GL"] = "egl"
def main(args: DictConfig):
import numpy as np
import torch.cuda
import tqdm
from rlhive.rl_envs import RoboHiveEnv
from sac_loss import SACLoss
from tensordict import TensorDict
from torch import nn, optim
from torchrl.collectors import MultiaSyncDataCollector
from torchrl.data import TensorDictPrioritizedReplayBuffer, TensorDictReplayBuffer
from torchrl.data.replay_buffers.storages import LazyMemmapStorage
# from torchrl.envs import SerialEnv as ParallelEnv, R3MTransform, SelectTransform, TransformedEnv
from torchrl.envs import (
CatTensors,
EnvCreator,
ParallelEnv,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode, step_mdp
from torchrl.modules import MLP, NormalParamWrapper, SafeModule
from torchrl.modules.distributions import TanhNormal
from torchrl.modules.tensordict_module.actors import (
ProbabilisticActor,
ValueOperator,
)
from torchrl.objectives import SoftUpdate
from torchrl.record import VideoRecorder
from torchrl.record.loggers.wandb import WandbLogger
from torchrl.trainers import Recorder
# ===========================================================================================
# Env constructor
# ---------------
# - Use the RoboHiveEnv class to wrap robohive envs in torchrl's GymWrapper
# - Add transforms immediately after that:
# - SelectTransform: selects the relevant kesy from our output
# - R3MTransform
# - FlattenObservation: The images delivered by robohive have a singleton dim to start with, we need to flatten that
# - RewardScaling
#
# One can also possibly use ObservationNorm.
#
# TIPS:
# - For faster execution, you should follow this abstract scheme, where we reduce the data
# to be passed from worker to worker to a minimum, we apply R3M to a batch and append the
# rest of the transforms afterward:
#
# >>> env = TransformedEnv(
# ... ParallelEnv(N, lambda: TransformedEnv(RoboHiveEnv(...), SelectTransform(...))),
# ... Compose(
# ... R3MTransform(...),
# ... FlattenObservation(...),
# ... *other_transforms,
# ... ))
#
def traj_is_solved(done, solved):
solved = solved.view_as(done)
done_cumsum = done.cumsum(-2)
count = 0
_i = 0
for _i, u in enumerate(done_cumsum.unique()):
is_solved = solved[done_cumsum == u].any()
count += is_solved
return count / (_i + 1)
def traj_total_reward(done, reward):
reward = reward.view_as(done)
done_cumsum = done.cumsum(-2)
count = 0
_i = 0
for _i, u in enumerate(done_cumsum.unique()):
count += reward[done_cumsum == u].sum()
return count / (_i + 1)
def make_env(num_envs, task, visual_transform, reward_scaling, device):
if num_envs > 1:
base_env = ParallelEnv(
num_envs, EnvCreator(lambda: RoboHiveEnv(task, device=device))
)
else:
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(
env=base_env,
reward_scaling=reward_scaling,
visual_transform=visual_transform,
)
return env
def make_transformed_env(
env,
reward_scaling=5.0,
visual_transform="r3m",
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
env = TransformedEnv(
env,
SelectTransform(
"solved", "pixels", "observation", "rwd_dense", "rwd_sparse"
),
)
if visual_transform == "r3m":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True).eval(),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif visual_transform == "rrl":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform(
"resnet50", in_keys=["pixels"], download="IMAGENET1K_V2"
).eval(),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif not visual_transform:
selected_keys = ["observation"]
else:
raise NotImplementedError(visual_transform)
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
return env
# ===========================================================================================
# Making a recorder
# -----------------
#
# A `Recorder` is a dedicated torchrl class that will run the policy in the test env
# once every X steps (eg X=1M).
#
def make_recorder(
task: str,
frame_skip: int,
record_interval: int,
actor_model_explore: object,
eval_traj: int,
env_configs: dict,
wandb_logger: WandbLogger,
num_envs: int,
):
test_env = make_env(num_envs=num_envs, task=task, **env_configs)
if "visual" in task:
test_env.insert_transform(
0, VideoRecorder(wandb_logger, "test", in_keys=["pixels"])
)
test_env.reset()
recorder_obj = Recorder(
record_frames=eval_traj * test_env.horizon,
frame_skip=frame_skip,
policy_exploration=actor_model_explore,
recorder=test_env,
exploration_mode="mean",
record_interval=record_interval,
log_keys=["reward", "solved", "done", "rwd_sparse"],
out_keys={
"reward": "r_evaluation",
"solved": "success",
"done": "done",
"rwd_sparse": "rwd_sparse",
},
)
return recorder_obj
# ===========================================================================================
# Relplay buffers
# ---------------
#
# TorchRL also provides prioritized RBs if needed.
#
def make_replay_buffer(
prb: bool,
buffer_size: int,
buffer_scratch_dir: str,
device: torch.device,
prefetch: int = 10,
):
if prb:
replay_buffer = TensorDictPrioritizedReplayBuffer(
alpha=0.7,
beta=0.5,
pin_memory=False,
prefetch=prefetch,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
else:
replay_buffer = TensorDictReplayBuffer(
pin_memory=False,
prefetch=prefetch,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
return replay_buffer
# ===========================================================================================
# Dataloader
# ----------
#
# This is a simplified version of the dataloder
#
@torch.no_grad()
@set_exploration_mode("random")
def dataloader(
total_frames, fpb, train_env, actor, actor_collection, device_collection
):
params = TensorDict(
{k: v for k, v in actor.named_parameters()}, batch_size=[]
).unflatten_keys(".")
params_collection = TensorDict(
{k: v for k, v in actor_collection.named_parameters()}, batch_size=[]
).unflatten_keys(".")
_prev = None
collected_frames = 0
while collected_frames < total_frames:
params_collection.update_(params)
batch = TensorDict(
{}, batch_size=[fpb, *train_env.batch_size], device=device_collection
)
for t in range(fpb):
if _prev is None:
_prev = train_env.reset()
_reset = _prev["_reset"] = _prev["done"].clone().squeeze(-1)
if _reset.any():
_prev = train_env.reset(_prev)
_new = train_env.step(actor_collection(_prev))
batch[t] = _new
_prev = step_mdp(_new, exclude_done=False)
collected_frames += batch.numel()
yield batch
# customize device at will
device = args.device
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Create Environment
env_configs = {
"reward_scaling": args.reward_scaling,
"visual_transform": args.visual_transform,
"device": device,
}
train_env = make_env(num_envs=args.env_per_collector, task=args.task, **env_configs)
# add forward pass for initialization with proof env
proof_env = make_env(num_envs=1, task=args.task, **env_configs)
# Create Agent
# Define Actor Network
in_keys = ["observation_vector"]
action_spec = proof_env.action_spec
actor_net_kwargs = {
"num_cells": [256, 256],
"out_features": 2 * action_spec.shape[-1],
"activation_class": nn.ReLU,
}
actor_net = MLP(**actor_net_kwargs)
dist_class = TanhNormal
dist_kwargs = {
"min": action_spec.space.minimum,
"max": action_spec.space.maximum,
"tanh_loc": True,
}
actor_net = NormalParamWrapper(
actor_net,
scale_mapping=f"biased_softplus_{1.0}",
scale_lb=0.1,
)
in_keys_actor = in_keys
actor_module = SafeModule(
actor_net,
in_keys=in_keys_actor,
out_keys=[
"loc",
"scale",
],
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=dist_class,
distribution_kwargs=dist_kwargs,
default_interaction_mode="random",
return_log_prob=True,
)
# Define Critic Network
qvalue_net_kwargs = {
"num_cells": [256, 256],
"out_features": 1,
"activation_class": nn.ReLU,
}
qvalue_net = MLP(
**qvalue_net_kwargs,
)
qvalue = ValueOperator(
in_keys=["action"] + in_keys,
module=qvalue_net,
)
model = actor, qvalue = nn.ModuleList([actor, qvalue]).to(device)
# init nets
with torch.no_grad(), set_exploration_mode("random"):
td = proof_env.reset()
td = td.to(device)
for net in model:
net(td)
del td
proof_env.close()
actor_model_explore = model[0]
# Create SAC loss
loss_module = SACLoss(
actor_network=model[0],
qvalue_network=model[1],
num_qvalue_nets=2,
gamma=args.gamma,
loss_function="smooth_l1",
)
# Define Target Network Updater
target_net_updater = SoftUpdate(loss_module, args.target_update_polyak)
# Make Replay Buffer
replay_buffer = make_replay_buffer(
prb=args.prb,
buffer_size=args.buffer_size,
buffer_scratch_dir=args.buffer_scratch_dir,
device=args.device,
)
# Optimizers
params = list(loss_module.parameters())
optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
rewards = []
rewards_eval = []
# Main loop
target_net_updater.init_()
collected_frames = 0
episodes = 0
optim_steps = 0
pbar = tqdm.tqdm(total=args.total_frames)
r0 = None
loss = None
logger = WandbLogger(
exp_name=args.task,
project=args.wandb_project,
name=args.exp_name,
config=args,
entity=args.wandb_entity,
mode=args.wandb_mode,
)
# Trajectory recorder for evaluation
recorder = make_recorder(
task=args.task,
frame_skip=args.frame_skip,
record_interval=args.record_interval,
actor_model_explore=actor_model_explore,
eval_traj=args.eval_traj,
env_configs=env_configs,
wandb_logger=logger,
num_envs=args.num_record_envs,
)
collector_device = args.device_collection
if isinstance(collector_device, str):
collector_device = [collector_device]
collector = MultiaSyncDataCollector(
create_env_fn=[train_env for _ in collector_device],
policy=actor_model_explore,
total_frames=args.total_frames,
max_frames_per_traj=args.frames_per_batch,
frames_per_batch=args.frames_per_batch,
init_random_frames=args.init_random_frames,
reset_at_each_iter=False,
postproc=None,
split_trajs=False,
devices=collector_device,
# device for execution
storing_devices=collector_device,
# device where data will be stored and passed
seed=args.seed,
pin_memory=False,
update_at_each_batch=False,
exploration_mode="random",
)
for i, batch in enumerate(collector):
collector.update_policy_weights_()
if r0 is None:
r0 = batch["reward"].sum(-1).mean().item()
pbar.update(batch.numel())
# extend the replay buffer with the new data
batch = batch.cpu().view(-1)
current_frames = batch.numel()
collected_frames += current_frames
episodes += batch["done"].sum()
replay_buffer.extend(batch)
# optimization steps
if collected_frames >= args.init_random_frames:
(
total_losses,
actor_losses,
q_losses,
alpha_losses,
alphas,
entropies,
) = ([], [], [], [], [], [])
for _ in range(
max(1, args.frames_per_batch * args.utd_ratio // args.batch_size)
):
optim_steps += 1
# sample from replay buffer
sampled_tensordict = (
replay_buffer.sample(args.batch_size).clone().to(device)
)
loss_td = loss_module(sampled_tensordict)
actor_loss = loss_td["loss_actor"]
q_loss = loss_td["loss_qvalue"]
alpha_loss = loss_td["loss_alpha"]
loss = actor_loss + q_loss + alpha_loss
optimizer.zero_grad()
loss.backward()
gn = torch.nn.utils.clip_grad_norm_(params, args.clip_norm)
optimizer.step()
# update qnet_target params
target_net_updater.step()
# update priority
if args.prb:
replay_buffer.update_tensordict_priority(sampled_tensordict)
total_losses.append(loss.item())
actor_losses.append(actor_loss.item())
q_losses.append(q_loss.item())
alpha_losses.append(alpha_loss.item())
alphas.append(loss_td["alpha"].item())
entropies.append(loss_td["entropy"].item())
rewards.append((i, batch["reward"].mean().item()))
logger.log_scalar("train_reward", rewards[-1][1], step=collected_frames)
logger.log_scalar("optim_steps", optim_steps, step=collected_frames)
logger.log_scalar("episodes", episodes, step=collected_frames)
if loss is not None:
logger.log_scalar(
"total_loss", np.mean(total_losses), step=collected_frames
)
logger.log_scalar(
"actor_loss", np.mean(actor_losses), step=collected_frames
)
logger.log_scalar("q_loss", np.mean(q_losses), step=collected_frames)
logger.log_scalar(
"alpha_loss", np.mean(alpha_losses), step=collected_frames
)
logger.log_scalar("alpha", np.mean(alphas), step=collected_frames)
logger.log_scalar("entropy", np.mean(entropies), step=collected_frames)
logger.log_scalar("grad_norm", gn, step=collected_frames)
td_record = recorder(None)
if td_record is not None:
rewards_eval.append(
(
i,
td_record["r_evaluation"] / recorder.recorder.batch_size.numel(),
# divide by number of eval worker
)
)
logger.log_scalar("test_reward", rewards_eval[-1][1], step=collected_frames)
solved = traj_is_solved(td_record["done"], td_record["success"])
logger.log_scalar("success", solved, step=collected_frames)
rwd_sparse = traj_total_reward(td_record["done"], td_record["rwd_sparse"])
logger.log_scalar("rwd_sparse", rwd_sparse, step=collected_frames)
if len(rewards_eval):
pbar.set_description(
f"reward: {rewards[-1][1]: 4.4f} (r0 = {r0: 4.4f}), test reward: {rewards_eval[-1][1]: 4.4f}, solved: {solved}"
)
del batch
# gc.collect()
|
agenthive-dev
|
examples/sac.py
|
"""Entry point for RLHive"""
import hydra
from omegaconf import DictConfig
from redq import main as train_redq
from sac import main as train_sac
@hydra.main(config_name="sac_mixed.yaml", config_path="config")
def main(args: DictConfig):
if args.algo == "sac":
train_sac(args)
if args.algo == "redq":
train_redq(args)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
|
agenthive-dev
|
examples/train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["sim_backend"] = "MUJOCO"
import argparse
import time
import tqdm
from rlhive.rl_envs import RoboHiveEnv
from torchrl.collectors.collectors import MultiaSyncDataCollector, RandomPolicy
from torchrl.collectors.distributed import DistributedDataCollector, RPCDataCollector
from torchrl.envs import EnvCreator, ParallelEnv, R3MTransform, TransformedEnv
parser = argparse.ArgumentParser()
parser.add_argument("--num_workers", default=2, type=int)
parser.add_argument("--num_collectors", default=4, type=int)
parser.add_argument("--frames_per_batch", default=200, type=int)
parser.add_argument("--total_frames", default=20_000, type=int)
parser.add_argument("--r3m", action="store_true")
parser.add_argument("--env_name", default="franka_micro_random-v3")
if __name__ == "__main__":
args = parser.parse_args()
if args.num_workers > 1:
penv = ParallelEnv(
args.num_workers,
EnvCreator(lambda: RoboHiveEnv(args.env_name, device="cpu")),
)
else:
penv = RoboHiveEnv(args.env_name, device="cpu")
if "visual" in args.env_name:
if args.r3m:
tenv = TransformedEnv(
penv,
R3MTransform(in_keys=["pixels"], download=True, model_name="resnet50"),
)
else:
tenv = penv
else:
tenv = penv
# tenv.transform[-1].init_stats(reduce_dim=(0, 1), cat_dim=1,
# num_iter=1000)
policy = RandomPolicy(tenv.action_spec) # some random policy
device = "cpu"
slurm_conf = {
"timeout_min": 100,
"slurm_partition": "train",
"slurm_cpus_per_gpu": 12,
"slurm_gpus_per_task": 1,
}
collector = DistributedDataCollector(
[tenv] * args.num_collectors,
policy=policy,
frames_per_batch=args.frames_per_batch,
total_frames=args.total_frames,
storing_device=device,
split_trajs=False,
sync=True,
launcher="mp",
slurm_kwargs=slurm_conf,
backend="gloo",
)
pbar = tqdm.tqdm(total=args.total_frames)
for i, data in enumerate(collector):
if i == 3:
t0 = time.time()
total = 0
if i >= 3:
total += data.numel()
pbar.update(data.numel())
t = time.time() - t0
print(f"{args.env_name}, Time: {t:4.4f}, Rate: {args.total_frames / t: 4.4f} fps")
del collector
del tenv
|
agenthive-dev
|
examples/collection_speed.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from numbers import Number
from typing import Union
import numpy as np
import torch
from tensordict.nn import TensorDictSequential
from tensordict.tensordict import TensorDict, TensorDictBase
from torch import Tensor
from torchrl.envs.utils import set_exploration_mode, step_mdp
from torchrl.modules import SafeModule
from torchrl.objectives.common import LossModule
from torchrl.objectives.utils import (
distance_loss,
next_state_value as get_next_state_value,
)
try:
from functorch import vmap
FUNCTORCH_ERR = ""
_has_functorch = True
except ImportError as err:
FUNCTORCH_ERR = str(err)
_has_functorch = False
class SACLoss(LossModule):
"""SAC Loss module.
Args:
actor_network (SafeModule): the actor to be trained
qvalue_network (SafeModule): a single Q-value network that will be multiplicated as many times as needed.
num_qvalue_nets (int, optional): Number of Q-value networks to be trained. Default is 10.
gamma (Number, optional): gamma decay factor. Default is 0.99.
priotity_key (str, optional): Key where to write the priority value for prioritized replay buffers. Default is
`"td_error"`.
loss_function (str, optional): loss function to be used for the Q-value. Can be one of `"smooth_l1"`, "l2",
"l1", Default is "smooth_l1".
alpha_init (float, optional): initial entropy multiplier.
Default is 1.0.
min_alpha (float, optional): min value of alpha.
Default is 0.1.
max_alpha (float, optional): max value of alpha.
Default is 10.0.
fixed_alpha (bool, optional): whether alpha should be trained to match a target entropy. Default is :obj:`False`.
target_entropy (Union[str, Number], optional): Target entropy for the stochastic policy. Default is "auto".
delay_qvalue (bool, optional): Whether to separate the target Q value networks from the Q value networks used
for data collection. Default is :obj:`False`.
gSDE (bool, optional): Knowing if gSDE is used is necessary to create random noise variables.
Default is False
"""
delay_actor: bool = False
_explicit: bool = False
def __init__(
self,
actor_network: SafeModule,
qvalue_network: SafeModule,
num_qvalue_nets: int = 2,
gamma: Number = 0.99,
priotity_key: str = "td_error",
loss_function: str = "smooth_l1",
alpha_init: float = 1.0,
min_alpha: float = 0.1,
max_alpha: float = 10.0,
fixed_alpha: bool = False,
target_entropy: Union[str, Number] = "auto",
delay_qvalue: bool = True,
gSDE: bool = False,
):
if not _has_functorch:
raise ImportError(
f"Failed to import functorch with error message:\n{FUNCTORCH_ERR}"
)
super().__init__()
self.convert_to_functional(
actor_network,
"actor_network",
create_target_params=self.delay_actor,
funs_to_decorate=["forward", "get_dist_params"],
)
# let's make sure that actor_network has `return_log_prob` set to True
self.actor_network.return_log_prob = True
self.delay_qvalue = delay_qvalue
self.convert_to_functional(
qvalue_network,
"qvalue_network",
num_qvalue_nets,
create_target_params=self.delay_qvalue,
compare_against=list(actor_network.parameters()),
)
self.num_qvalue_nets = num_qvalue_nets
self.register_buffer("gamma", torch.tensor(gamma))
self.priority_key = priotity_key
self.loss_function = loss_function
try:
device = next(self.parameters()).device
except AttributeError:
device = torch.device("cpu")
self.register_buffer("alpha_init", torch.tensor(alpha_init, device=device))
self.register_buffer(
"min_log_alpha", torch.tensor(min_alpha, device=device).log()
)
self.register_buffer(
"max_log_alpha", torch.tensor(max_alpha, device=device).log()
)
self.fixed_alpha = fixed_alpha
if fixed_alpha:
self.register_buffer(
"log_alpha", torch.tensor(math.log(alpha_init), device=device)
)
else:
self.register_parameter(
"log_alpha",
torch.nn.Parameter(torch.tensor(math.log(alpha_init), device=device)),
)
if target_entropy == "auto":
if actor_network.spec["action"] is None:
raise RuntimeError(
"Cannot infer the dimensionality of the action. Consider providing "
"the target entropy explicitely or provide the spec of the "
"action tensor in the actor network."
)
target_entropy = -float(np.prod(actor_network.spec["action"].shape))
self.register_buffer(
"target_entropy", torch.tensor(target_entropy, device=device)
)
self.gSDE = gSDE
@property
def alpha(self):
self.log_alpha.data.clamp_(self.min_log_alpha, self.max_log_alpha)
with torch.no_grad():
alpha = self.log_alpha.exp()
return alpha
def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
if self._explicit:
# slow but explicit version
return self._forward_explicit(tensordict)
else:
return self._forward_vectorized(tensordict)
def _loss_alpha(self, log_pi: Tensor) -> Tensor:
if torch.is_grad_enabled() and not log_pi.requires_grad:
raise RuntimeError(
"expected log_pi to require gradient for the alpha loss)"
)
if self.target_entropy is not None:
# we can compute this loss even if log_alpha is not a parameter
alpha_loss = -self.log_alpha.exp() * (log_pi.detach() + self.target_entropy)
else:
# placeholder
alpha_loss = torch.zeros_like(log_pi)
return alpha_loss
def _forward_vectorized(self, tensordict: TensorDictBase) -> TensorDictBase:
obs_keys = self.actor_network.in_keys
tensordict_select = tensordict.select(
"reward", "done", "next", *obs_keys, "action"
)
actor_params = torch.stack(
[self.actor_network_params, self.target_actor_network_params], 0
)
tensordict_actor_grad = tensordict_select.select(
*obs_keys
) # to avoid overwriting keys
next_td_actor = step_mdp(tensordict_select).select(
*self.actor_network.in_keys
) # next_observation ->
tensordict_actor = torch.stack([tensordict_actor_grad, next_td_actor], 0)
tensordict_actor = tensordict_actor.contiguous()
with set_exploration_mode("random"):
if self.gSDE:
tensordict_actor.set(
"_eps_gSDE",
torch.zeros(tensordict_actor.shape, device=tensordict_actor.device),
)
# vmap doesn't support sampling, so we take it out from the vmap
td_params = vmap(self.actor_network.get_dist_params)(
tensordict_actor,
actor_params,
)
if isinstance(self.actor_network, TensorDictSequential):
sample_key = self.actor_network[-1].out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
else:
sample_key = self.actor_network.out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
tensordict_actor[sample_key] = self._rsample(tensordict_actor_dist)
tensordict_actor["sample_log_prob"] = tensordict_actor_dist.log_prob(
tensordict_actor[sample_key]
)
# repeat tensordict_actor to match the qvalue size
_actor_loss_td = (
tensordict_actor[0]
.select(*self.qvalue_network.in_keys)
.expand(self.num_qvalue_nets, *tensordict_actor[0].batch_size)
) # for actor loss
_qval_td = tensordict_select.select(*self.qvalue_network.in_keys).expand(
self.num_qvalue_nets,
*tensordict_select.select(*self.qvalue_network.in_keys).batch_size,
) # for qvalue loss
_next_val_td = (
tensordict_actor[1]
.select(*self.qvalue_network.in_keys)
.expand(self.num_qvalue_nets, *tensordict_actor[1].batch_size)
) # for next value estimation
tensordict_qval = torch.cat(
[
_actor_loss_td,
_next_val_td,
_qval_td,
],
0,
)
# cat params
q_params_detach = self.qvalue_network_params.detach()
qvalue_params = torch.cat(
[
q_params_detach,
self.target_qvalue_network_params,
self.qvalue_network_params,
],
0,
)
tensordict_qval = vmap(self.qvalue_network)(
tensordict_qval,
qvalue_params,
)
state_action_value = tensordict_qval.get("state_action_value").squeeze(-1)
(
state_action_value_actor,
next_state_action_value_qvalue,
state_action_value_qvalue,
) = state_action_value.split(
[self.num_qvalue_nets, self.num_qvalue_nets, self.num_qvalue_nets],
dim=0,
)
sample_log_prob = tensordict_actor.get("sample_log_prob").squeeze(-1)
(
action_log_prob_actor,
next_action_log_prob_qvalue,
) = sample_log_prob.unbind(0)
# E[alpha * log_pi(a) - Q(s, a)] where a is reparameterized
loss_actor = -(
state_action_value_actor.min(0)[0] - self.alpha * action_log_prob_actor
).mean()
next_state_value = (
next_state_action_value_qvalue.min(0)[0]
- self.alpha * next_action_log_prob_qvalue
)
target_value = get_next_state_value(
tensordict,
gamma=self.gamma,
pred_next_val=next_state_value,
)
pred_val = state_action_value_qvalue
td_error = (pred_val - target_value).pow(2)
loss_qval = (
distance_loss(
pred_val,
target_value.expand_as(pred_val),
loss_function=self.loss_function,
)
.mean(-1)
.sum()
* 0.5
)
tensordict.set("td_error", td_error.detach().max(0)[0])
loss_alpha = self._loss_alpha(sample_log_prob)
if not loss_qval.shape == loss_actor.shape:
raise RuntimeError(
f"QVal and actor loss have different shape: {loss_qval.shape} and {loss_actor.shape}"
)
td_out = TensorDict(
{
"loss_actor": loss_actor.mean(),
"loss_qvalue": loss_qval.mean(),
"loss_alpha": loss_alpha.mean(),
"alpha": self.alpha.detach(),
"entropy": -sample_log_prob.mean().detach(),
"state_action_value_actor": state_action_value_actor.mean().detach(),
"action_log_prob_actor": action_log_prob_actor.mean().detach(),
"next.state_value": next_state_value.mean().detach(),
"target_value": target_value.mean().detach(),
},
[],
)
return td_out
def _forward_explicit(self, tensordict: TensorDictBase) -> TensorDictBase:
loss_actor, sample_log_prob = self._loss_actor_explicit(tensordict.clone(False))
loss_qval, td_error = self._loss_qval_explicit(tensordict.clone(False))
tensordict.set("td_error", td_error.detach().max(0)[0])
loss_alpha = self._loss_alpha(sample_log_prob)
td_out = TensorDict(
{
"loss_actor": loss_actor.mean(),
"loss_qvalue": loss_qval.mean(),
"loss_alpha": loss_alpha.mean(),
"alpha": self.alpha.detach(),
"entropy": -sample_log_prob.mean().detach(),
# "state_action_value_actor": state_action_value_actor.mean().detach(),
# "action_log_prob_actor": action_log_prob_actor.mean().detach(),
# "next.state_value": next_state_value.mean().detach(),
# "target_value": target_value.mean().detach(),
},
[],
)
return td_out
def _rsample(
self,
dist,
):
# separated only for the purpose of making the sampling
# deterministic to compare methods
return dist.rsample()
def _sample_reparam(self, tensordict, params):
"""Given a policy param batch and input data in a tensordict, writes a reparam sample and log-prob key."""
with set_exploration_mode("random"):
if self.gSDE:
raise NotImplementedError
# vmap doesn't support sampling, so we take it out from the vmap
td_params = self.actor_network.get_dist_params(
tensordict,
params,
)
if isinstance(self.actor_network, TensorDictSequential):
sample_key = self.actor_network[-1].out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
else:
sample_key = self.actor_network.out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
tensordict[sample_key] = self._rsample(tensordict_actor_dist)
tensordict["sample_log_prob"] = tensordict_actor_dist.log_prob(
tensordict[sample_key]
)
return tensordict
def _loss_actor_explicit(self, tensordict):
tensordict_actor = tensordict.clone(False)
actor_params = self.actor_network_params
tensordict_actor = self._sample_reparam(tensordict_actor, actor_params)
action_log_prob_actor = tensordict_actor["sample_log_prob"]
tensordict_qval = tensordict_actor.select(*self.qvalue_network.in_keys).expand(
self.num_qvalue_nets, *tensordict_actor.batch_size
) # for actor loss
qvalue_params = self.qvalue_network_params.detach()
tensordict_qval = vmap(self.qvalue_network)(
tensordict_qval,
qvalue_params,
)
state_action_value_actor = tensordict_qval.get("state_action_value").squeeze(-1)
state_action_value_actor = state_action_value_actor.min(0)[0]
# E[alpha * log_pi(a) - Q(s, a)] where a is reparameterized
loss_actor = (
self.alpha * action_log_prob_actor - state_action_value_actor
).mean()
return loss_actor, action_log_prob_actor
def _loss_qval_explicit(self, tensordict):
next_tensordict = step_mdp(tensordict)
next_tensordict = self._sample_reparam(
next_tensordict, self.target_actor_network_params
)
next_action_log_prob_qvalue = next_tensordict["sample_log_prob"]
next_state_action_value_qvalue = vmap(self.qvalue_network, (None, 0))(
next_tensordict,
self.target_qvalue_network_params,
)["state_action_value"].squeeze(-1)
next_state_value = (
next_state_action_value_qvalue.min(0)[0]
- self.alpha * next_action_log_prob_qvalue
)
pred_val = vmap(self.qvalue_network, (None, 0))(
tensordict,
self.qvalue_network_params,
)["state_action_value"].squeeze(-1)
target_value = get_next_state_value(
tensordict,
gamma=self.gamma,
pred_next_val=next_state_value,
)
# 1/2 * E[Q(s,a) - (r + gamma * (Q(s,a)-alpha log pi(s, a)))
loss_qval = (
distance_loss(
pred_val,
target_value.expand_as(pred_val),
loss_function=self.loss_function,
)
.mean(-1)
.sum()
* 0.5
)
td_error = (pred_val - target_value).pow(2)
return loss_qval, td_error
if __name__ == "__main__":
from tensordict.nn import TensorDictModule
from torch import nn
from torchrl.data import BoundedTensorSpec
# Tests the vectorized version of SAC-v2 against plain implementation
from torchrl.modules import ProbabilisticActor, ValueOperator
from torchrl.modules.distributions import TanhNormal
torch.manual_seed(0)
action_spec = BoundedTensorSpec(-1, 1, shape=(3,))
class Splitter(nn.Linear):
def forward(self, x):
loc, scale = super().forward(x).chunk(2, -1)
return loc, scale.exp()
actor_module = TensorDictModule(
Splitter(6, 6), in_keys=["obs"], out_keys=["loc", "scale"]
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=TanhNormal,
default_interaction_mode="random",
return_log_prob=False,
)
class QVal(nn.Linear):
def forward(self, s: Tensor, a: Tensor) -> Tensor:
return super().forward(torch.cat([s, a], -1))
qvalue = ValueOperator(QVal(9, 1), in_keys=["obs", "action"])
_rsample_old = SACLoss._rsample
def _rsample_new(self, dist):
return torch.ones_like(_rsample_old(self, dist))
SACLoss._rsample = _rsample_new
loss = SACLoss(actor, qvalue)
for batch in ((), (2, 3)):
td_input = TensorDict(
{
"obs": torch.rand(*batch, 6),
"action": torch.rand(*batch, 3).clamp(-1, 1),
"next": {"obs": torch.rand(*batch, 6)},
"reward": torch.rand(*batch, 1),
"done": torch.zeros(*batch, 1, dtype=torch.bool),
},
batch,
)
loss._explicit = True
loss0 = loss(td_input)
loss._explicit = False
loss1 = loss(td_input)
print("a", loss0["loss_actor"] - loss1["loss_actor"])
print("q", loss0["loss_qvalue"] - loss1["loss_qvalue"])
|
agenthive-dev
|
examples/sac_loss.py
|
import json
import random
import torch
import numpy as np
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def control_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def stack_tensor_list(tensor_list):
return np.array(tensor_list)
def stack_tensor_dict_list(tensor_dict_list):
"""
Stack a list of dictionaries of {tensors or dictionary of tensors}.
:param tensor_dict_list: a list of dictionaries of {tensors or dictionary of tensors}.
:return: a dictionary of {stacked tensors or dictionary of stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = stack_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = stack_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def tensorize(var, device='cpu'):
"""
Convert input to torch.Tensor on desired device
:param var: type either torch.Tensor or np.ndarray
:param device: desired device for output (e.g. cpu, cuda)
:return: torch.Tensor mapped to the device
"""
if type(var) == torch.Tensor:
return var.to(device)
elif type(var) == np.ndarray:
return torch.from_numpy(var).float().to(device)
elif type(var) == float:
return torch.tensor(var).float()
else:
print("Variable type not compatible with function.")
return None
|
agenthive-dev
|
scripts/bc/misc.py
|
"""
Minimize bc loss (MLE, MSE, RWR etc.) with pytorch optimizers
"""
import logging
logging.disable(logging.CRITICAL)
import numpy as np
import torch
import time as timer
from tqdm import tqdm
from misc import tensorize
class BC:
def __init__(self, expert_paths,
policy,
epochs = 5,
batch_size = 64,
lr = 1e-3,
optimizer = None,
loss_type = 'MSE', # can be 'MLE' or 'MSE'
save_logs = True,
logger = None,
set_transforms = False,
*args, **kwargs,
):
self.policy = policy
self.expert_paths = expert_paths
self.epochs = epochs
self.mb_size = batch_size
self.logger = logger
self.loss_type = loss_type
self.save_logs = save_logs
self.device = self.policy.device
assert (self.loss_type == 'MSE' or self.loss_type == 'MLE')
if self.save_logs: assert not self.logger is None
if set_transforms:
in_shift, in_scale, out_shift, out_scale = self.compute_transformations()
self.set_transformations(in_shift, in_scale, out_shift, out_scale)
#self.set_variance_with_data(out_scale)
# construct optimizer
self.optimizer = torch.optim.Adam(self.policy.trainable_params, lr=lr) if optimizer is None else optimizer
# Loss criterion if required
if loss_type == 'MSE':
self.loss_criterion = torch.nn.MSELoss()
def compute_transformations(self):
# get transformations
if self.expert_paths == [] or self.expert_paths is None:
in_shift, in_scale, out_shift, out_scale = None, None, None, None
else:
print(type(self.expert_paths))
if type(self.expert_paths) is list:
observations = np.concatenate([path["observations"] for path in self.expert_paths])
actions = np.concatenate([path["actions"] for path in self.expert_paths])
else: # 'h5py._hl.files.File'
observations = np.concatenate([self.expert_paths[k]['observations'] for k in self.expert_paths.keys()])
actions = np.concatenate([self.expert_paths[k]['actions'] for k in self.expert_paths.keys()])
in_shift, in_scale = np.mean(observations, axis=0), np.std(observations, axis=0)
out_shift, out_scale = np.mean(actions, axis=0), np.std(actions, axis=0)
return in_shift, in_scale, out_shift, out_scale
def set_transformations(self, in_shift=None, in_scale=None, out_shift=None, out_scale=None):
# set scalings in the target policy
self.policy.set_transformations(in_shift, in_scale, out_shift, out_scale)
def set_variance_with_data(self, out_scale):
# set the variance of gaussian policy based on out_scale
out_scale = tensorize(out_scale, device=self.policy.device)
data_log_std = torch.log(out_scale + 1e-3)
self.policy.set_log_std(data_log_std)
def loss(self, data, idx=None):
if self.loss_type == 'MLE':
return self.mle_loss(data, idx)
elif self.loss_type == 'MSE':
return self.mse_loss(data, idx)
else:
print("Please use valid loss type")
return None
def mle_loss(self, data, idx):
# use indices if provided (e.g. for mini-batching)
# otherwise, use all the data
idx = range(data['observations'].shape[0]) if idx is None else idx
if type(data['observations']) == torch.Tensor:
idx = torch.LongTensor(idx)
obs = data['observations'][idx]
act = data['expert_actions'][idx]
mu, LL = self.policy.mean_LL(obs, act)
# minimize negative log likelihood
return -torch.mean(LL)
def mse_loss(self, data, idx=None):
idx = range(data['observations'].shape[0]) if idx is None else idx
if type(data['observations']) is torch.Tensor:
idx = torch.LongTensor(idx)
obs = data['observations'][idx]
act_expert = data['expert_actions'][idx]
act_expert = tensorize(act_expert, device=self.policy.device)
act_pi = self.policy.forward(obs)
return self.loss_criterion(act_pi, act_expert.detach())
def fit(self, data, suppress_fit_tqdm=False, **kwargs):
# data is a dict
# keys should have "observations" and "expert_actions"
validate_keys = all([k in data.keys() for k in ["observations", "expert_actions"]])
assert validate_keys is True
ts = timer.time()
num_samples = data["observations"].shape[0]
# log stats before
if self.save_logs:
loss_val = self.loss(data, idx=range(num_samples)).to('cpu').data.numpy().ravel()[0]
self.logger.log_scalar("train/loss_before", loss_val, step=0)
print('BC loss before', loss_val)
# train loop
for ep in config_tqdm(range(self.epochs), suppress_fit_tqdm):
avg_loss = 0.0
step = 0
for mb in range(int(num_samples / self.mb_size)):
rand_idx = np.random.choice(num_samples, size=self.mb_size)
self.optimizer.zero_grad()
loss = self.loss(data, idx=rand_idx)
loss.backward()
self.optimizer.step()
avg_loss = (avg_loss*step + loss.item())/(step+1)
step += 1
if self.save_logs:
self.logger.log_scalar("train/bc_loss", avg_loss, step=ep+1)
# log stats after
if self.save_logs:
loss_val = self.loss(data, idx=range(num_samples)).to('cpu').data.numpy().ravel()[0]
self.logger.log_scalar("train/loss_after", loss_val, step=self.epochs)
print('BC val loss', loss_val)
def train(self, **kwargs):
if not hasattr(self, 'data'):
observations = np.concatenate([path["observations"] for path in self.expert_paths])
expert_actions = np.concatenate([path["actions"] for path in self.expert_paths])
observations = tensorize(observations, device=self.policy.device)
expert_actions = tensorize(expert_actions, self.policy.device)
self.data = dict(observations=observations, expert_actions=expert_actions)
self.fit(self.data, **kwargs)
def train_h5(self, **kwargs):
if not hasattr(self, 'data'):
observations = np.concatenate([self.expert_paths[k]['observations'] for k in self.expert_paths.keys()])
expert_actions = np.concatenate([self.expert_paths[k]['actions'] for k in self.expert_paths.keys()])
observations = tensorize(observations, device=self.policy.device)
expert_actions = tensorize(expert_actions, self.policy.device)
self.data = dict(observations=observations, expert_actions=expert_actions)
self.fit(self.data, **kwargs)
def config_tqdm(range_inp, suppress_tqdm=False):
if suppress_tqdm:
return range_inp
else:
return tqdm(range_inp)
|
agenthive-dev
|
scripts/bc/behavior_cloning.py
|
"""
Job script to learn policy using BC
"""
import os
import time
from os import environ
environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
environ['MKL_THREADING_LAYER']='GNU'
import pickle
import yaml
import hydra
import gym
import wandb
import numpy as np
from omegaconf import DictConfig, OmegaConf, ListConfig
from batch_norm_mlp import BatchNormMLP
from gmm_policy import GMMPolicy
from behavior_cloning import BC
from misc import control_seed, \
bcolors, stack_tensor_dict_list
from torchrl.record.loggers.wandb import WandbLogger
from robohive.logger.grouped_datasets import Trace as Trace
def evaluate_policy(
policy,
env,
num_episodes,
epoch,
horizon=None,
gamma=1,
percentile=[],
get_full_dist=False,
eval_logger=None,
device='cpu',
seed=123,
verbose=True,
):
env.seed(seed)
horizon = env.horizon if horizon is None else horizon
mean_eval, std, min_eval, max_eval = 0.0, 0.0, -1e8, -1e8
ep_returns = np.zeros(num_episodes)
policy.eval()
paths = []
for ep in range(num_episodes):
observations=[]
actions=[]
rewards=[]
agent_infos = []
env_infos = []
o = env.reset()
t, done = 0, False
while t < horizon and (done == False):
a = policy.get_action(o)[1]['evaluation']
next_o, r, done, env_info = env.step(a)
ep_returns[ep] += (gamma ** t) * r
observations.append(o)
actions.append(a)
rewards.append(r)
agent_infos.append(None)
env_infos.append(env_info)
o = next_o
t += 1
if verbose:
print("Episode: {}; Reward: {}".format(ep, ep_returns[ep]))
path = dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
#agent_infos=stack_tensor_dict_list(agent_infos),
env_infos=stack_tensor_dict_list(env_infos),
terminated=done
)
paths.append(path)
mean_eval, std = np.mean(ep_returns), np.std(ep_returns)
min_eval, max_eval = np.amin(ep_returns), np.amax(ep_returns)
base_stats = [mean_eval, std, min_eval, max_eval]
percentile_stats = []
for p in percentile:
percentile_stats.append(np.percentile(ep_returns, p))
full_dist = ep_returns if get_full_dist is True else None
success = env.evaluate_success(paths, logger=None) ## Don't use the mj_envs logging function
if not eval_logger is None:
rwd_sparse = np.mean([np.mean(p['env_infos']['rwd_sparse']) for p in paths]) # return rwd/step
rwd_dense = np.mean([np.sum(p['env_infos']['rwd_dense'])/env.horizon for p in paths]) # return rwd/step
eval_logger.log_scalar('eval/rwd_sparse', rwd_sparse, step=epoch)
eval_logger.log_scalar('eval/rwd_dense', rwd_dense, step=epoch)
eval_logger.log_scalar('eval/success', success, step=epoch)
return [base_stats, percentile_stats, full_dist], success
class ObservationWrapper:
def __init__(self, env_name, visual_keys, encoder):
self.env = gym.make(env_name, visual_keys=visual_keys)
self.horizon = self.env.horizon
self.encoder = encoder
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
return self.get_obs(obs)
def step(self, action):
observation, reward, terminated, info = self.env.step(action)
return self.get_obs(observation), reward, terminated, info
def get_obs(self, observation=None):
if self.encoder == 'proprio':
proprio_vec = self.env.get_proprioception()[1]
return proprio_vec
if len(self.env.visual_keys) > 0:
visual_obs = self.env.get_exteroception()
final_visual_obs = None
for key in self.env.visual_keys:
if final_visual_obs is None:
final_visual_obs = visual_obs[key]
else:
final_visual_obs = np.concatenate((final_visual_obs, visual_obs[key]), axis=-1)
_, proprio_vec, _ = self.env.get_proprioception()
observation = np.concatenate((final_visual_obs, proprio_vec))
else:
observation = self.env.get_obs() if observation is None else observation
return observation
def seed(self, seed):
return self.env.seed(seed)
def set_env_state(self, state_dict):
return self.env.set_env_state(state_dict)
def evaluate_success(self, paths, logger=None):
return self.env.evaluate_success(paths, logger=logger)
def make_env(env_name, cam_name, encoder, from_pixels):
if from_pixels:
visual_keys = []
assert encoder in ["vc1s", "vc1l", "r3m18", "rrl18", "rrl50", "r3m50", "2d", "1d", "proprio"]
if encoder == "1d" or encoder == "2d":
visual_keys = [f'rgb:{cam_name}:84x84:{encoder}']
elif encoder == 'proprio':
visual_keys = []
else:
# cam_name is a list of cameras
if type(cam_name) == ListConfig:
visual_keys = []
for cam in cam_name:
visual_keys.append(f'rgb:{cam}:224x224:{encoder}')
else:
visual_keys = [f'rgb:{cam_name}:224x224:{encoder}']
print(f"Using visual keys {visual_keys}")
env = ObservationWrapper(env_name, visual_keys=visual_keys, encoder=encoder)
else:
env = gym.make(env_name)
return env
@hydra.main(config_name="bc.yaml", config_path="config")
def main(job_data: DictConfig):
OmegaConf.resolve(job_data)
job_data['policy_size'] = tuple(job_data['policy_size'])
exp_start = time.time()
OUT_DIR = os.getcwd()
if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR)
if not os.path.exists(OUT_DIR+'/iterations'): os.mkdir(OUT_DIR+'/iterations')
if not os.path.exists(OUT_DIR+'/logs'): os.mkdir(OUT_DIR+'/logs')
if job_data['from_pixels'] == False:
job_data['env_name'] = job_data['env_name'].replace('_v2d', '')
#exp_name = OUT_DIR.split('/')[-1] ## TODO: Customizer for logging
# Unpack args and make files for easy access
#logger = DataLog()
exp_name = job_data['env_name'] + '_pixels' + str(job_data['from_pixels']) + '_' + job_data['encoder']
logger = WandbLogger(
exp_name=exp_name,
config=job_data,
name=exp_name,
project=job_data['wandb_project'],
entity=job_data['wandb_entity'],
mode=job_data['wandb_mode'],
)
ENV_NAME = job_data['env_name']
EXP_FILE = OUT_DIR + '/job_data.yaml'
SEED = job_data['seed']
# base cases
if 'device' not in job_data.keys(): job_data['device'] = 'cpu'
assert 'data_file' in job_data.keys()
yaml_config = OmegaConf.to_yaml(job_data)
with open(EXP_FILE, 'w') as file: yaml.dump(yaml_config, file)
env = make_env(
env_name=job_data["env_name"],
cam_name=job_data["cam_name"],
encoder=job_data["encoder"],
from_pixels=job_data["from_pixels"]
)
# ===============================================================================
# Setup functions and environment
# ===============================================================================
control_seed(SEED)
env.seed(SEED)
paths_trace = Trace.load(job_data['data_file'])
bc_paths = []
for key, path in paths_trace.items():
path_dict = {}
traj_len = path['observations'].shape[0]
obs_list = []
ep_reward = 0.0
env.reset()
init_state_dict = {}
t0 = time.time()
for key, value in path['env_infos']['state'].items():
init_state_dict[key] = value[0]
env.set_env_state(init_state_dict)
obs = env.get_obs()
for step in range(traj_len-1):
next_obs, reward, done, env_info = env.step(path["actions"][step])
ep_reward += reward
obs_list.append(obs)
obs = next_obs
t1 = time.time()
obs_np = np.stack(obs_list, axis=0)
path_dict['observations'] = obs_np # [:-1]
path_dict['actions'] = path['actions'][()][:-1]
path_dict['env_infos'] = {'solved': path['env_infos']['solved'][()]}
print(f"Time to convert one trajectory: {(t1-t0)/60:4.2f}")
print("Converted episode reward:", ep_reward)
print("Original episode reward:", np.sum(path["rewards"]))
print(key, path_dict['observations'].shape, path_dict['actions'].shape)
bc_paths.append(path_dict)
expert_success = env.evaluate_success(bc_paths)
print(f"{bcolors.BOLD}{bcolors.OKGREEN}{exp_name} {bcolors.ENDC}")
print(f"{bcolors.BOLD}{bcolors.OKGREEN}Expert Success Rate: {expert_success}. {bcolors.ENDC}")
observation_dim = bc_paths[0]['observations'].shape[-1]
action_dim = bc_paths[0]['actions'].shape[-1]
print(f'Policy obs dim {observation_dim} act dim {action_dim}')
policy = GMMPolicy(
# network_kwargs
input_size=observation_dim,
output_size=action_dim,
hidden_size=job_data['policy_size'][0],
num_layers=len(job_data['policy_size']),
min_std=0.0001,
num_modes=5,
activation="softplus",
low_eval_noise=False,
# loss_kwargs
)
set_transforms = False
# ===============================================================================
# Model training
# ===============================================================================
print(f"{bcolors.OKBLUE}Training BC{bcolors.ENDC}")
policy.to(job_data['device'])
bc_agent = BC(
bc_paths,
policy,
epochs=job_data['eval_every_n'],
batch_size=job_data['bc_batch_size'],
lr=job_data['bc_lr'],
loss_type='MLE',
save_logs=True,
logger=logger,
set_transforms=set_transforms,
)
for ind in range(0, job_data['bc_epochs'], job_data['eval_every_n']):
policy.train()
bc_agent.train()
# bc_agent.train_h5()
policy.eval()
_, success_rate = evaluate_policy(
env=env,
policy=policy,
eval_logger=logger,
epoch=ind+job_data['eval_every_n'],
num_episodes=job_data['eval_traj'],
seed=job_data['seed'] + 123,
verbose=True,
device='cpu',
)
policy.to(job_data['device'])
exp_end = time.time()
print(f"{bcolors.BOLD}{bcolors.OKGREEN}Success Rate: {success_rate}. Time: {(exp_end - exp_start)/60:4.2f} minutes.{bcolors.ENDC}")
exp_end = time.time()
print(f"{bcolors.BOLD}{bcolors.OKGREEN}Success Rate: {success_rate}. Time: {(exp_end - exp_start)/60:4.2f} minutes.{bcolors.ENDC}")
# pickle.dump(bc_agent, open(OUT_DIR + '/iterations/agent_final.pickle', 'wb'))
pickle.dump(policy, open(OUT_DIR + '/iterations/policy_final.pickle', 'wb'))
wandb.finish()
if __name__ == '__main__':
main()
|
agenthive-dev
|
scripts/bc/run_bc_h5.py
|
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
class FCNetworkWithBatchNorm(nn.Module):
def __init__(self, obs_dim, act_dim,
hidden_sizes=(64,64),
nonlinearity='relu', # either 'tanh' or 'relu'
dropout=0, # probability to dropout activations (0 means no dropout)
*args, **kwargs,
):
super(FCNetworkWithBatchNorm, self).__init__()
self.obs_dim = obs_dim
self.act_dim = act_dim
assert type(hidden_sizes) == tuple
self.layer_sizes = (obs_dim, ) + hidden_sizes + (act_dim, )
self.device = 'cpu'
# hidden layers
self.fc_layers = nn.ModuleList([nn.Linear(self.layer_sizes[i], self.layer_sizes[i+1]) \
for i in range(len(self.layer_sizes) -1)])
self.nonlinearity = torch.relu if nonlinearity == 'relu' else torch.tanh
self.input_batchnorm = nn.BatchNorm1d(num_features=obs_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
out = x.to(self.device)
out = self.input_batchnorm(out)
for i in range(len(self.fc_layers)-1):
out = self.fc_layers[i](out)
out = self.dropout(out)
out = self.nonlinearity(out)
out = self.fc_layers[-1](out)
return out
def to(self, device):
self.device = device
return super().to(device)
def set_transformations(self, *args, **kwargs):
pass
class BatchNormMLP(nn.Module):
def __init__(self, env_spec=None,
action_dim=None,
observation_dim=None,
hidden_sizes=(64,64),
min_log_std=-3,
init_log_std=0,
seed=None,
nonlinearity='relu',
dropout=0,
device='cpu',
*args, **kwargs,
):
"""
:param env_spec: specifications of the env (see utils/gym_env.py)
:param hidden_sizes: network hidden layer sizes (currently 2 layers only)
:param min_log_std: log_std is clamped at this value and can't go below
:param init_log_std: initial log standard deviation
:param seed: random seed
"""
super(BatchNormMLP, self).__init__()
self.device = device
self.n = env_spec.observation_dim if observation_dim is None else observation_dim # number of states
self.m = env_spec.action_dim if action_dim is None else action_dim # number of actions
self.min_log_std = min_log_std
# Set seed
# ------------------------
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
# Policy network
# ------------------------
self.model = FCNetworkWithBatchNorm(self.n, self.m, hidden_sizes, nonlinearity, dropout)
# make weights small
for param in list(self.model.parameters())[-2:]: # only last layer
param.data = 1e-2 * param.data
self.log_std = Variable(torch.ones(self.m) * init_log_std, requires_grad=True)
self.trainable_params = list(self.model.parameters()) + [self.log_std]
self.model.eval()
# Easy access variables
# -------------------------
self.log_std_val = np.float64(self.log_std.data.numpy().ravel())
self.param_shapes = [p.data.numpy().shape for p in self.trainable_params]
self.param_sizes = [p.data.numpy().size for p in self.trainable_params]
self.d = np.sum(self.param_sizes) # total number of params
# Placeholders
# ------------------------
self.obs_var = Variable(torch.randn(self.n), requires_grad=False)
# Utility functions
# ============================================
def to(self, device):
super().to(device)
self.model = self.model.to(device)
print(self.model)
self.device = device
return self
# Main functions
# ============================================
def get_action(self, observation):
o = np.float32(observation.reshape(1, -1))
self.obs_var.data = torch.from_numpy(o)
mean = self.model(self.obs_var).to('cpu').data.numpy().ravel()
noise = np.exp(self.log_std_val) * np.random.randn(self.m)
action = mean + noise
return [action, {'mean': mean, 'log_std': self.log_std_val, 'evaluation': mean}]
# ============================================
def forward(self, observations):
if type(observations) == np.ndarray: observations = torch.from_numpy(observations).float()
assert type(observations) == torch.Tensor
observations = observations.to(self.device)
out = self.model(observations)
return out
|
agenthive-dev
|
scripts/bc/batch_norm_mlp.py
|
import torch
import numpy as np
import torch.nn as nn
import torch.distributions as D
import torch.nn.functional as F
class GMMPolicy(nn.Module):
def __init__(self,
# network_kwargs
input_size,
output_size,
hidden_size=1024,
num_layers=2,
min_std=0.0001,
num_modes=5,
activation="softplus",
low_eval_noise=False,
# loss_kwargs
loss_coef=1.0):
super().__init__()
self.num_modes = num_modes
self.output_size = output_size
self.min_std = min_std
if num_layers > 0:
sizes = [input_size] + [hidden_size] * num_layers
layers = [nn.BatchNorm1d(num_features=input_size)]
for i in range(num_layers):
layers += [nn.Linear(sizes[i], sizes[i+1]), nn.ReLU()]
layers += [nn.Linear(sizes[-2], sizes[-1])]
self.share = nn.Sequential(*layers)
else:
self.share = nn.Identity()
self.mean_layer = nn.Linear(hidden_size, output_size * num_modes)
self.logstd_layer = nn.Linear(hidden_size, output_size * num_modes)
self.logits_layer = nn.Linear(hidden_size, num_modes)
self.low_eval_noise = low_eval_noise
self.loss_coef = loss_coef
if activation == "softplus":
self.actv = F.softplus
else:
self.actv = torch.exp
self.trainable_params = list(self.share.parameters()) + \
list(self.mean_layer.parameters()) + \
list(self.logstd_layer.parameters()) + \
list(self.logits_layer.parameters())
def to(self, device):
super().to(device)
self.device = device
return self
def forward_fn(self, x):
# x: (B, input_size)
share = self.share(x)
means = self.mean_layer(share).view(-1, self.num_modes, self.output_size)
means = torch.tanh(means)
logits = self.logits_layer(share)
if self.training or not self.low_eval_noise:
logstds = self.logstd_layer(share).view(-1, self.num_modes, self.output_size)
stds = self.actv(logstds) + self.min_std
else:
stds = torch.ones_like(means) * 1e-4
return means, stds, logits
def get_action(self, observation):
o = np.float32(observation.reshape(1, -1))
o = torch.from_numpy(o).to(self.device)
means, stds, logits = self.forward_fn(o)
compo = D.Normal(loc=means, scale=stds)
compo = D.Independent(compo, 1)
mix = D.Categorical(logits=logits)
gmm = D.MixtureSameFamily(mixture_distribution=mix,
component_distribution=compo)
action = gmm.sample()
mean = gmm.mean
mean = mean.detach().cpu().numpy().ravel()
return [action, {'mean': mean, 'std': stds, 'evaluation': mean}]
def forward(self, x):
means, scales, logits = self.forward_fn(x)
compo = D.Normal(loc=means, scale=scales)
compo = D.Independent(compo, 1)
mix = D.Categorical(logits=logits)
gmm = D.MixtureSameFamily(mixture_distribution=mix,
component_distribution=compo)
return gmm
def mean_LL(self, x, target):
gmm_dist = self.forward(x)
# return mean, log_prob of the gmm
return gmm_dist.mean, gmm_dist.log_prob(target)
def loss_fn(self, gmm, target, reduction='mean'):
log_probs = gmm.log_prob(target)
loss = -log_probs
if reduction == 'mean':
return loss.mean() * self.loss_coef
elif reduction == 'none':
return loss * self.loss_coef
elif reduction == 'sum':
return loss.sum() * self.loss_coef
else:
raise NotImplementedError
|
agenthive-dev
|
scripts/bc/gmm_policy.py
|
import torch
from rlhive.rl_envs import RoboHiveEnv
from rlhive.sim_algos.helpers.rrl_transform import RRLTransform
from torchrl.envs import (
CatTensors,
DoubleToFloat,
ObservationNorm,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode
def make_env(task, visual_transform, reward_scaling, device):
assert visual_transform in ("rrl", "r3m")
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(
env=base_env, reward_scaling=reward_scaling, visual_transform=visual_transform
)
print(env)
# exit()
return env
def make_transformed_env(
env,
reward_scaling=5.0,
visual_transform="r3m",
stats=None,
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
env = TransformedEnv(env, SelectTransform("solved", "pixels", "observation"))
if visual_transform == "rrl":
vec_keys = ["rrl_vec"]
selected_keys = ["observation", "rrl_vec"]
env.append_transform(
Compose(
RRLTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif visual_transform == "r3m":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
else:
raise NotImplementedError
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
# we normalize the states
if stats is None:
_stats = {"loc": 0.0, "scale": 1.0}
else:
_stats = stats
env.append_transform(
ObservationNorm(**_stats, in_keys=[out_key], standard_normal=True)
)
env.append_transform(DoubleToFloat(in_keys=[out_key], in_keys_inv=[]))
return env
env = make_env(
task="visual_franka_slide_random-v3",
reward_scaling=5.0,
device=torch.device("cuda:0"),
visual_transform="rrl",
)
with torch.no_grad(), set_exploration_mode("random"):
td = env.reset()
td = env.rand_step()
print(td)
|
agenthive-dev
|
scripts/sac_mujoco/test.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import os
import hydra
import numpy as np
import torch
import torch.cuda
import tqdm
import wandb
from omegaconf import DictConfig
from rlhive.rl_envs import RoboHiveEnv
from rlhive.sim_algos.helpers.rrl_transform import RRLTransform
# from torchrl.objectives import SACLoss
from sac_loss import SACLoss
from torch import nn, optim
from torchrl.collectors import MultiaSyncDataCollector
from torchrl.data import TensorDictPrioritizedReplayBuffer, TensorDictReplayBuffer
from torchrl.data.replay_buffers.storages import LazyMemmapStorage
from torchrl.envs import (
CatTensors,
DoubleToFloat,
ObservationNorm,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode
from torchrl.modules import MLP, NormalParamWrapper, SafeModule
from torchrl.modules.distributions import TanhNormal
from torchrl.modules.tensordict_module.actors import ProbabilisticActor, ValueOperator
from torchrl.objectives import SoftUpdate
from torchrl.trainers import Recorder
os.environ["WANDB_MODE"] = "offline" # offline sync. TODO: Remove this behavior
def make_env(task, visual_transform, reward_scaling, device, from_pixels):
assert visual_transform in ("rrl", "r3m")
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(
env=base_env,
reward_scaling=reward_scaling,
visual_transform=visual_transform,
from_pixels=from_pixels,
)
print(env)
return env
def make_transformed_env(
env,
from_pixels,
reward_scaling=5.0,
visual_transform="r3m",
stats=None,
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
if from_pixels:
env = TransformedEnv(env, SelectTransform("solved", "pixels", "observation"))
if visual_transform == "rrl":
vec_keys = ["rrl_vec"]
selected_keys = ["observation", "rrl_vec"]
env.append_transform(
Compose(
RRLTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif visual_transform == "r3m":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
else:
raise NotImplementedError
else:
env = TransformedEnv(env, SelectTransform("solved", "observation"))
selected_keys = ["observation"]
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
# we normalize the states
if stats is None:
_stats = {"loc": 0.0, "scale": 1.0}
else:
_stats = stats
env.append_transform(
ObservationNorm(**_stats, in_keys=[out_key], standard_normal=True)
)
env.append_transform(DoubleToFloat(in_keys=[out_key], in_keys_inv=[]))
return env
def make_recorder(
task: str,
frame_skip: int,
record_interval: int,
actor_model_explore: object,
eval_traj: int,
env_configs: dict,
):
test_env = make_env(task=task, **env_configs)
recorder_obj = Recorder(
record_frames=eval_traj * test_env.horizon,
frame_skip=frame_skip,
policy_exploration=actor_model_explore,
recorder=test_env,
exploration_mode="mean",
record_interval=record_interval,
log_keys=["reward", "solved"],
out_keys={"reward": "r_evaluation", "solved": "success"},
)
return recorder_obj
def make_replay_buffer(
prb: bool,
buffer_size: int,
buffer_scratch_dir: str,
device: torch.device,
make_replay_buffer: int = 3,
):
if prb:
replay_buffer = TensorDictPrioritizedReplayBuffer(
alpha=0.7,
beta=0.5,
pin_memory=False,
prefetch=make_replay_buffer,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
else:
replay_buffer = TensorDictReplayBuffer(
pin_memory=False,
prefetch=make_replay_buffer,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
return replay_buffer
def evaluate_success(env_success_fn, td_record: dict, eval_traj: int):
td_record["success"] = td_record["success"].reshape((eval_traj, -1))
paths = []
for solved_traj in td_record["success"]:
path = {"env_infos": {"solved": solved_traj.data.cpu().numpy()}}
paths.append(path)
success_percentage = env_success_fn(paths)
return success_percentage
@hydra.main(config_name="sac.yaml", config_path="config")
def main(args: DictConfig):
device = (
torch.device("cuda:0")
if torch.cuda.is_available()
and torch.cuda.device_count() > 0
and args.device == "cuda:0"
else torch.device("cpu")
)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Create Environment
env_configs = {
"reward_scaling": args.reward_scaling,
"visual_transform": args.visual_transform,
"device": args.device,
"from_pixels": args.from_pixels,
}
train_env = make_env(task=args.task, **env_configs)
# Create Agent
# Define Actor Network
in_keys = ["observation_vector"]
action_spec = train_env.action_spec
actor_net_kwargs = {
"num_cells": [256, 256],
"out_features": 2 * action_spec.shape[-1],
"activation_class": nn.ReLU,
}
actor_net = MLP(**actor_net_kwargs)
dist_class = TanhNormal
dist_kwargs = {
"min": action_spec.space.minimum,
"max": action_spec.space.maximum,
"tanh_loc": False,
}
actor_net = NormalParamWrapper(
actor_net,
scale_mapping=f"biased_softplus_{1.0}",
scale_lb=0.1,
)
in_keys_actor = in_keys
actor_module = SafeModule(
actor_net,
in_keys=in_keys_actor,
out_keys=[
"loc",
"scale",
],
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=dist_class,
distribution_kwargs=dist_kwargs,
default_interaction_mode="random",
return_log_prob=False,
)
# Define Critic Network
qvalue_net_kwargs = {
"num_cells": [256, 256],
"out_features": 1,
"activation_class": nn.ReLU,
}
qvalue_net = MLP(
**qvalue_net_kwargs,
)
qvalue = ValueOperator(
in_keys=["action"] + in_keys,
module=qvalue_net,
)
model = nn.ModuleList([actor, qvalue]).to(device)
# add forward pass for initialization with proof env
proof_env = make_env(task=args.task, **env_configs)
# init nets
with torch.no_grad(), set_exploration_mode("random"):
td = proof_env.reset()
td = td.to(device)
for net in model:
net(td)
del td
proof_env.close()
actor_model_explore = model[0]
# Create SAC loss
loss_module = SACLoss(
actor_network=model[0],
qvalue_network=model[1],
num_qvalue_nets=2,
gamma=args.gamma,
loss_function="smooth_l1",
)
# Define Target Network Updater
target_net_updater = SoftUpdate(loss_module, args.target_update_polyak)
# Make Off-Policy Collector
collector = MultiaSyncDataCollector(
create_env_fn=[train_env],
policy=actor_model_explore,
total_frames=args.total_frames,
max_frames_per_traj=args.frames_per_batch,
frames_per_batch=args.env_per_collector * args.frames_per_batch,
init_random_frames=args.init_random_frames,
reset_at_each_iter=False,
postproc=None,
split_trajs=True,
devices=[device], # device for execution
passing_devices=[device], # device where data will be stored and passed
seed=None,
pin_memory=False,
update_at_each_batch=False,
exploration_mode="random",
)
collector.set_seed(args.seed)
# Make Replay Buffer
replay_buffer = make_replay_buffer(
prb=args.prb,
buffer_size=args.buffer_size,
buffer_scratch_dir=args.buffer_scratch_dir,
device=device,
)
# Trajectory recorder for evaluation
recorder = make_recorder(
task=args.task,
frame_skip=args.frame_skip,
record_interval=args.record_interval,
actor_model_explore=actor_model_explore,
eval_traj=args.eval_traj,
env_configs=env_configs,
)
# Optimizers
params = list(loss_module.parameters()) + [loss_module.log_alpha]
optimizer_actor = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
rewards = []
rewards_eval = []
# Main loop
target_net_updater.init_()
collected_frames = 0
episodes = 0
pbar = tqdm.tqdm(total=args.total_frames)
r0 = None
loss = None
with wandb.init(project="SAC_TorchRL", name=args.exp_name, config=args):
for i, tensordict in enumerate(collector):
# update weights of the inference policy
collector.update_policy_weights_()
if r0 is None:
r0 = tensordict["reward"].sum(-1).mean().item()
pbar.update(tensordict.numel())
# extend the replay buffer with the new data
if "mask" in tensordict.keys():
# if multi-step, a mask is present to help filter padded values
current_frames = tensordict["mask"].sum()
tensordict = tensordict[tensordict.get("mask").squeeze(-1)]
else:
tensordict = tensordict.view(-1)
current_frames = tensordict.numel()
collected_frames += current_frames
episodes += args.env_per_collector
replay_buffer.extend(tensordict.cpu())
# optimization steps
if collected_frames >= args.init_random_frames:
(
total_losses,
actor_losses,
q_losses,
alpha_losses,
alphas,
entropies,
) = ([], [], [], [], [], [])
for _ in range(
args.env_per_collector * args.frames_per_batch * args.utd_ratio
):
# sample from replay buffer
sampled_tensordict = replay_buffer.sample(args.batch_size).clone()
loss_td = loss_module(sampled_tensordict)
actor_loss = loss_td["loss_actor"]
q_loss = loss_td["loss_qvalue"]
alpha_loss = loss_td["loss_alpha"]
loss = actor_loss + q_loss + alpha_loss
optimizer_actor.zero_grad()
loss.backward()
optimizer_actor.step()
# update qnet_target params
target_net_updater.step()
# update priority
if args.prb:
replay_buffer.update_priority(sampled_tensordict)
total_losses.append(loss.item())
actor_losses.append(actor_loss.item())
q_losses.append(q_loss.item())
alpha_losses.append(alpha_loss.item())
alphas.append(loss_td["alpha"].item())
entropies.append(loss_td["entropy"].item())
rewards.append(
(i, tensordict["reward"].sum().item() / args.env_per_collector)
)
wandb.log(
{
"train_reward": rewards[-1][1],
"collected_frames": collected_frames,
"episodes": episodes,
}
)
if loss is not None:
wandb.log(
{
"total_loss": np.mean(total_losses),
"actor_loss": np.mean(actor_losses),
"q_loss": np.mean(q_losses),
"alpha_loss": np.mean(alpha_losses),
"alpha": np.mean(alphas),
"entropy": np.mean(entropies),
}
)
td_record = recorder(None)
success_percentage = evaluate_success(
env_success_fn=train_env.evaluate_success,
td_record=td_record,
eval_traj=args.eval_traj,
)
if td_record is not None:
rewards_eval.append(
(
i,
td_record["total_r_evaluation"]
/ 1, # divide by number of eval worker
)
)
wandb.log({"test_reward": rewards_eval[-1][1]})
wandb.log({"success": success_percentage})
if len(rewards_eval):
pbar.set_description(
f"reward: {rewards[-1][1]: 4.4f} (r0 = {r0: 4.4f}), test reward: {rewards_eval[-1][1]: 4.4f}"
)
del tensordict
gc.collect()
collector.shutdown()
if __name__ == "__main__":
main()
|
agenthive-dev
|
scripts/sac_mujoco/sac.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from numbers import Number
from typing import Union
import numpy as np
import torch
from tensordict.nn import TensorDictSequential
from tensordict.tensordict import TensorDict, TensorDictBase
from torch import Tensor
from torchrl.envs.utils import set_exploration_mode, step_mdp
from torchrl.modules import SafeModule
from torchrl.objectives.common import LossModule
from torchrl.objectives.utils import (
distance_loss,
next_state_value as get_next_state_value,
)
try:
from functorch import vmap
FUNCTORCH_ERR = ""
_has_functorch = True
except ImportError as err:
FUNCTORCH_ERR = str(err)
_has_functorch = False
class SACLoss(LossModule):
"""SAC Loss module.
Args:
actor_network (SafeModule): the actor to be trained
qvalue_network (SafeModule): a single Q-value network that will be multiplicated as many times as needed.
num_qvalue_nets (int, optional): Number of Q-value networks to be trained. Default is 10.
gamma (Number, optional): gamma decay factor. Default is 0.99.
priotity_key (str, optional): Key where to write the priority value for prioritized replay buffers. Default is
`"td_error"`.
loss_function (str, optional): loss function to be used for the Q-value. Can be one of `"smooth_l1"`, "l2",
"l1", Default is "smooth_l1".
alpha_init (float, optional): initial entropy multiplier.
Default is 1.0.
min_alpha (float, optional): min value of alpha.
Default is 0.1.
max_alpha (float, optional): max value of alpha.
Default is 10.0.
fixed_alpha (bool, optional): whether alpha should be trained to match a target entropy. Default is :obj:`False`.
target_entropy (Union[str, Number], optional): Target entropy for the stochastic policy. Default is "auto".
delay_qvalue (bool, optional): Whether to separate the target Q value networks from the Q value networks used
for data collection. Default is :obj:`False`.
gSDE (bool, optional): Knowing if gSDE is used is necessary to create random noise variables.
Default is False
"""
delay_actor: bool = False
_explicit: bool = True
def __init__(
self,
actor_network: SafeModule,
qvalue_network: SafeModule,
num_qvalue_nets: int = 2,
gamma: Number = 0.99,
priotity_key: str = "td_error",
loss_function: str = "smooth_l1",
alpha_init: float = 1.0,
min_alpha: float = 0.1,
max_alpha: float = 10.0,
fixed_alpha: bool = False,
target_entropy: Union[str, Number] = "auto",
delay_qvalue: bool = True,
gSDE: bool = False,
):
if not _has_functorch:
raise ImportError(
f"Failed to import functorch with error message:\n{FUNCTORCH_ERR}"
)
super().__init__()
self.convert_to_functional(
actor_network,
"actor_network",
create_target_params=self.delay_actor,
funs_to_decorate=["forward", "get_dist_params"],
)
# let's make sure that actor_network has `return_log_prob` set to True
self.actor_network.return_log_prob = True
self.delay_qvalue = delay_qvalue
self.convert_to_functional(
qvalue_network,
"qvalue_network",
num_qvalue_nets,
create_target_params=self.delay_qvalue,
compare_against=list(actor_network.parameters()),
)
self.num_qvalue_nets = num_qvalue_nets
self.register_buffer("gamma", torch.tensor(gamma))
self.priority_key = priotity_key
self.loss_function = loss_function
try:
device = next(self.parameters()).device
except AttributeError:
device = torch.device("cpu")
self.register_buffer("alpha_init", torch.tensor(alpha_init, device=device))
self.register_buffer(
"min_log_alpha", torch.tensor(min_alpha, device=device).log()
)
self.register_buffer(
"max_log_alpha", torch.tensor(max_alpha, device=device).log()
)
self.fixed_alpha = fixed_alpha
if fixed_alpha:
self.register_buffer(
"log_alpha", torch.tensor(math.log(alpha_init), device=device)
)
else:
self.register_parameter(
"log_alpha",
torch.nn.Parameter(torch.tensor(math.log(alpha_init), device=device)),
)
if target_entropy == "auto":
if actor_network.spec["action"] is None:
raise RuntimeError(
"Cannot infer the dimensionality of the action. Consider providing "
"the target entropy explicitely or provide the spec of the "
"action tensor in the actor network."
)
target_entropy = -float(np.prod(actor_network.spec["action"].shape))
self.register_buffer(
"target_entropy", torch.tensor(target_entropy, device=device)
)
self.gSDE = gSDE
@property
def alpha(self):
self.log_alpha.data.clamp_(self.min_log_alpha, self.max_log_alpha)
with torch.no_grad():
alpha = self.log_alpha.exp()
return alpha
def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
if self._explicit:
# slow but explicit version
return self._forward_explicit(tensordict)
else:
return self._forward_vectorized(tensordict)
def _loss_alpha(self, log_pi: Tensor) -> Tensor:
if torch.is_grad_enabled() and not log_pi.requires_grad:
raise RuntimeError(
"expected log_pi to require gradient for the alpha loss)"
)
if self.target_entropy is not None:
# we can compute this loss even if log_alpha is not a parameter
alpha_loss = -self.log_alpha.exp() * (log_pi.detach() + self.target_entropy)
else:
# placeholder
alpha_loss = torch.zeros_like(log_pi)
return alpha_loss
def _forward_vectorized(self, tensordict: TensorDictBase) -> TensorDictBase:
obs_keys = self.actor_network.in_keys
tensordict_select = tensordict.select(
"reward", "done", "next", *obs_keys, "action"
)
actor_params = torch.stack(
[self.actor_network_params, self.target_actor_network_params], 0
)
tensordict_actor_grad = tensordict_select.select(
*obs_keys
) # to avoid overwriting keys
next_td_actor = step_mdp(tensordict_select).select(
*self.actor_network.in_keys
) # next_observation ->
tensordict_actor = torch.stack([tensordict_actor_grad, next_td_actor], 0)
tensordict_actor = tensordict_actor.contiguous()
with set_exploration_mode("random"):
if self.gSDE:
tensordict_actor.set(
"_eps_gSDE",
torch.zeros(tensordict_actor.shape, device=tensordict_actor.device),
)
# vmap doesn't support sampling, so we take it out from the vmap
td_params = vmap(self.actor_network.get_dist_params)(
tensordict_actor,
actor_params,
)
if isinstance(self.actor_network, TensorDictSequential):
sample_key = self.actor_network[-1].out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
else:
sample_key = self.actor_network.out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
tensordict_actor[sample_key] = self._rsample(tensordict_actor_dist)
tensordict_actor["sample_log_prob"] = tensordict_actor_dist.log_prob(
tensordict_actor[sample_key]
)
# repeat tensordict_actor to match the qvalue size
_actor_loss_td = (
tensordict_actor[0]
.select(*self.qvalue_network.in_keys)
.expand(self.num_qvalue_nets, *tensordict_actor[0].batch_size)
) # for actor loss
_qval_td = tensordict_select.select(*self.qvalue_network.in_keys).expand(
self.num_qvalue_nets,
*tensordict_select.select(*self.qvalue_network.in_keys).batch_size,
) # for qvalue loss
_next_val_td = (
tensordict_actor[1]
.select(*self.qvalue_network.in_keys)
.expand(self.num_qvalue_nets, *tensordict_actor[1].batch_size)
) # for next value estimation
tensordict_qval = torch.cat(
[
_actor_loss_td,
_next_val_td,
_qval_td,
],
0,
)
# cat params
q_params_detach = self.qvalue_network_params.detach()
qvalue_params = torch.cat(
[
q_params_detach,
self.target_qvalue_network_params,
self.qvalue_network_params,
],
0,
)
tensordict_qval = vmap(self.qvalue_network)(
tensordict_qval,
qvalue_params,
)
state_action_value = tensordict_qval.get("state_action_value").squeeze(-1)
(
state_action_value_actor,
next_state_action_value_qvalue,
state_action_value_qvalue,
) = state_action_value.split(
[self.num_qvalue_nets, self.num_qvalue_nets, self.num_qvalue_nets],
dim=0,
)
sample_log_prob = tensordict_actor.get("sample_log_prob").squeeze(-1)
(
action_log_prob_actor,
next_action_log_prob_qvalue,
) = sample_log_prob.unbind(0)
# E[alpha * log_pi(a) - Q(s, a)] where a is reparameterized
loss_actor = -(
state_action_value_actor.min(0)[0] - self.alpha * action_log_prob_actor
).mean()
next_state_value = (
next_state_action_value_qvalue.min(0)[0]
- self.alpha * next_action_log_prob_qvalue
)
target_value = get_next_state_value(
tensordict,
gamma=self.gamma,
pred_next_val=next_state_value,
)
pred_val = state_action_value_qvalue
td_error = (pred_val - target_value).pow(2)
loss_qval = (
distance_loss(
pred_val,
target_value.expand_as(pred_val),
loss_function=self.loss_function,
)
.mean(-1)
.sum()
* 0.5
)
tensordict.set("td_error", td_error.detach().max(0)[0])
loss_alpha = self._loss_alpha(sample_log_prob)
if not loss_qval.shape == loss_actor.shape:
raise RuntimeError(
f"QVal and actor loss have different shape: {loss_qval.shape} and {loss_actor.shape}"
)
td_out = TensorDict(
{
"loss_actor": loss_actor.mean(),
"loss_qvalue": loss_qval.mean(),
"loss_alpha": loss_alpha.mean(),
"alpha": self.alpha.detach(),
"entropy": -sample_log_prob.mean().detach(),
"state_action_value_actor": state_action_value_actor.mean().detach(),
"action_log_prob_actor": action_log_prob_actor.mean().detach(),
"next.state_value": next_state_value.mean().detach(),
"target_value": target_value.mean().detach(),
},
[],
)
return td_out
def _forward_explicit(self, tensordict: TensorDictBase) -> TensorDictBase:
loss_actor, sample_log_prob = self._loss_actor_explicit(tensordict.clone(False))
loss_qval, td_error = self._loss_qval_explicit(tensordict.clone(False))
tensordict.set("td_error", td_error.detach().max(0)[0])
loss_alpha = self._loss_alpha(sample_log_prob)
td_out = TensorDict(
{
"loss_actor": loss_actor.mean(),
"loss_qvalue": loss_qval.mean(),
"loss_alpha": loss_alpha.mean(),
"alpha": self.alpha.detach(),
"entropy": -sample_log_prob.mean().detach(),
# "state_action_value_actor": state_action_value_actor.mean().detach(),
# "action_log_prob_actor": action_log_prob_actor.mean().detach(),
# "next.state_value": next_state_value.mean().detach(),
# "target_value": target_value.mean().detach(),
},
[],
)
return td_out
def _rsample(
self,
dist,
):
# separated only for the purpose of making the sampling
# deterministic to compare methods
return dist.rsample()
def _sample_reparam(self, tensordict, params):
"""Given a policy param batch and input data in a tensordict, writes a reparam sample and log-prob key."""
with set_exploration_mode("random"):
if self.gSDE:
raise NotImplementedError
# vmap doesn't support sampling, so we take it out from the vmap
td_params = self.actor_network.get_dist_params(
tensordict,
params,
)
if isinstance(self.actor_network, TensorDictSequential):
sample_key = self.actor_network[-1].out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
else:
sample_key = self.actor_network.out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
tensordict[sample_key] = self._rsample(tensordict_actor_dist)
tensordict["sample_log_prob"] = tensordict_actor_dist.log_prob(
tensordict[sample_key]
)
return tensordict
def _loss_actor_explicit(self, tensordict):
tensordict_actor = tensordict.clone(False)
actor_params = self.actor_network_params
tensordict_actor = self._sample_reparam(tensordict_actor, actor_params)
action_log_prob_actor = tensordict_actor["sample_log_prob"]
tensordict_qval = tensordict_actor.select(*self.qvalue_network.in_keys).expand(
self.num_qvalue_nets, *tensordict_actor.batch_size
) # for actor loss
qvalue_params = self.qvalue_network_params.detach()
tensordict_qval = vmap(self.qvalue_network)(
tensordict_qval,
qvalue_params,
)
state_action_value_actor = tensordict_qval.get("state_action_value").squeeze(-1)
state_action_value_actor = state_action_value_actor.min(0)[0]
# E[alpha * log_pi(a) - Q(s, a)] where a is reparameterized
loss_actor = (
self.alpha * action_log_prob_actor - state_action_value_actor
).mean()
return loss_actor, action_log_prob_actor
def _loss_qval_explicit(self, tensordict):
next_tensordict = step_mdp(tensordict)
next_tensordict = self._sample_reparam(
next_tensordict, self.target_actor_network_params
)
next_action_log_prob_qvalue = next_tensordict["sample_log_prob"]
next_state_action_value_qvalue = vmap(self.qvalue_network, (None, 0))(
next_tensordict,
self.target_qvalue_network_params,
)["state_action_value"].squeeze(-1)
next_state_value = (
next_state_action_value_qvalue.min(0)[0]
- self.alpha * next_action_log_prob_qvalue
)
pred_val = vmap(self.qvalue_network, (None, 0))(
tensordict,
self.qvalue_network_params,
)["state_action_value"].squeeze(-1)
target_value = get_next_state_value(
tensordict,
gamma=self.gamma,
pred_next_val=next_state_value,
)
# 1/2 * E[Q(s,a) - (r + gamma * (Q(s,a)-alpha log pi(s, a)))
loss_qval = (
distance_loss(
pred_val,
target_value.expand_as(pred_val),
loss_function=self.loss_function,
)
.mean(-1)
.sum()
* 0.5
)
td_error = (pred_val - target_value).pow(2)
return loss_qval, td_error
if __name__ == "__main__":
from tensordict.nn import TensorDictModule
from torch import nn
from torchrl.data import BoundedTensorSpec
# Tests the vectorized version of SAC-v2 against plain implementation
from torchrl.modules import ProbabilisticActor, ValueOperator
from torchrl.modules.distributions import TanhNormal
torch.manual_seed(0)
action_spec = BoundedTensorSpec(-1, 1, shape=(3,))
class Splitter(nn.Linear):
def forward(self, x):
loc, scale = super().forward(x).chunk(2, -1)
return loc, scale.exp()
actor_module = TensorDictModule(
Splitter(6, 6), in_keys=["obs"], out_keys=["loc", "scale"]
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=TanhNormal,
default_interaction_mode="random",
return_log_prob=False,
)
class QVal(nn.Linear):
def forward(self, s: Tensor, a: Tensor) -> Tensor:
return super().forward(torch.cat([s, a], -1))
qvalue = ValueOperator(QVal(9, 1), in_keys=["obs", "action"])
_rsample_old = SACLoss._rsample
def _rsample_new(self, dist):
return torch.ones_like(_rsample_old(self, dist))
SACLoss._rsample = _rsample_new
loss = SACLoss(actor, qvalue)
for batch in ((), (2, 3)):
td_input = TensorDict(
{
"obs": torch.rand(*batch, 6),
"action": torch.rand(*batch, 3).clamp(-1, 1),
"next": {"obs": torch.rand(*batch, 6)},
"reward": torch.rand(*batch, 1),
"done": torch.zeros(*batch, 1, dtype=torch.bool),
},
batch,
)
loss._explicit = True
loss0 = loss(td_input)
loss._explicit = False
loss1 = loss(td_input)
print("a", loss0["loss_actor"] - loss1["loss_actor"])
print("q", loss0["loss_qvalue"] - loss1["loss_qvalue"])
|
agenthive-dev
|
scripts/sac_mujoco/sac_loss.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.