python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
from functools import partial
from einops.layers.torch import Rearrange, Reduce
import torch.nn as nn
from crlapi.sl.architectures.mixture_model import MixtureLayer,SoftMaxGateModule,HardSoftMaxGateModule,Gate,MoE,MoE_RandomGrow,MoE_UsageGrow
class LinearSoftGate(Gate):
def __init__(self,input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape,n_experts)
assert len(input_shape)==1
self.module=SoftMaxGateModule(nn.Linear(input_shape[0],n_experts))
def forward(self,x):
print(x.size())
return self.module(x)
class LinearHardGate(Gate):
def __init__(self,input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape,n_experts)
assert len(input_shape)==1
self.module=HardSoftMaxGateModule(nn.Linear(input_shape[0],n_experts))
def forward(self,x):
return self.module(x)
class PreNormResidual(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
return self.fn(self.norm(x)) + x
def FeedForward(dim, expansion_factor = 4, dropout = 0., dense = nn.Linear):
return nn.Sequential(
dense(dim, dim * expansion_factor),
PrintModule("After dense"),
nn.GELU(),
nn.Dropout(dropout),
dense(dim * expansion_factor, dim),
nn.Dropout(dropout)
)
class PrintModule(nn.Module):
def __init__(self,msg=""):
super().__init__()
self.msg=msg
def forward(self,x):
print(self.msg," : ",x.size())
return x
def MLPMixer(task, patch_size, dim, depth, expansion_factor = 4, dropout = 0.):
image_size= task.input_shape[1]
assert image_size==task.input_shape[2]
channels=task.input_shape[0]
num_classes=task.n_classes
assert (image_size % patch_size) == 0, 'image must be divisible by patch size'
num_patches = (image_size // patch_size) ** 2
chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear
return nn.Sequential(
PrintModule("L1"),
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
PrintModule("L2"),
nn.Linear((patch_size ** 2) * channels, dim),
PrintModule("L3"),
*[nn.Sequential(
PreNormResidual(dim, FeedForward(num_patches, expansion_factor, dropout, chan_first)),
PreNormResidual(dim, FeedForward(dim, expansion_factor, dropout, chan_last)),
PrintModule("L."),
) for _ in range(depth)],
PrintModule("L4"),
nn.LayerNorm(dim),
Reduce('b n c -> b c', 'mean'),
nn.Linear(dim, num_classes)
)
|
alma-main
|
crlapi/sl/architectures/mlpmixer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import numpy as np
import time
import itertools
from copy import deepcopy
from pydoc import locate
from fvcore.nn import FlopCountAnalysis as FCA
from torchvision.models import *
from . import sp
# ----------------------------------------------------------------
# Models
# ----------------------------------------------------------------
class module_list_wrapper(nn.Module):
def __init__(self, layers):
super().__init__()
self.layer = nn.Sequential(*layers)
def forward(self, x):
out = self.layer(x)
return out
def __getitem__(self, i):
return self.layer[i]
def __len__(self):
return len(self.layer)
def sp_vgg(model, n_classes=10, dimh=16, method='none'):
cfgs = {
'vgg11': [1, 'M', 2, 'M', 4, 4, 'M', 8, 8, 'M', 8, 8, 'M'],
'vgg14': [1, 1, 'M', 2, 2, 'M', 4, 4, 'M', 8, 8, 'M', 8, 8, 'M'],
'vgg16': [1, 1, 'M', 2, 2, 'M', 4, 4, 4, 'M', 8, 8, 8, 'M', 8, 8, 8, 'M'],
'vgg19': [1, 1, 'M', 2, 2, 'M', 4, 4, 4, 4, 'M', 8, 8, 8, 8, 'M', 8, 8, 8, 8, 'M'],
}
cfg = cfgs[model]
next_layers = {}
prev_idx = -1
in_channels = 3
net = []
n = len(cfg)
for i, x in enumerate(cfg):
if x == 'M':
net.append(nn.MaxPool2d(kernel_size=2, stride=2))
elif x == 'A':
net.append(nn.AvgPool2d(kernel_size=2, stride=2))
else:
if method == 'none':
net.append(sp.Conv2d(in_channels, 64*x, kernel_size=3, padding=1, actv_fn='relu', has_bn=True))
in_channels = 64*x
else:
net.append(sp.Conv2d(in_channels, dimh, kernel_size=3, padding=1, actv_fn='relu', has_bn=True))
in_channels = dimh
if prev_idx >= 0: next_layers[prev_idx] = [i]
prev_idx = i
net.append(sp.Conv2d(in_channels, n_classes, kernel_size=1, padding=0, actv_fn='none', can_split=False))
net.append(nn.Flatten())
net = module_list_wrapper(net)
old_fwd = net.forward
next_layers[prev_idx] = [n]
layer2split = list(next_layers.keys())
return net, next_layers, layer2split
|
alma-main
|
crlapi/sl/architectures/firefly_vgg/models.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# *** MODULES taken from original code https://github.com/klightz/Firefly
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Normal
from torch.optim import *
from crlapi.sl.architectures.firefly_vgg import sp
###############################################################################
#
# Split Network
#
###############################################################################
class SpNet(nn.Module):
def __init__(self):
super(SpNet, self).__init__()
self.net = None
self.next_layers = {}
self.previous_layers = {}
self.layers_to_split = []
self.verbose = True
self.n_elites = 0
self.num_group = 1
def create_optimizer(self):
pass
def forward(self, x):
pass
def split(self):
pass
def clear(self):
for layer in self.net:
if isinstance(layer, sp.SpModule):
layer.clear()
def get_num_elites(self):
n = 0
for i in self.layers_to_split:
n += self.net[i].module.weight.shape[0]
self.n_elites = int(n * self.grow_ratio)
def get_num_elites_group(self, group_num):
for g in range(group_num):
n = 0
for i in self.layers_to_split_group[g]:
n += self.net[i].module.weight.shape[0]
try:
self.n_elites_group[g] = int(n * self.grow_ratio)
except:
self.n_elites_group = {}
self.n_elites_group[g] = int(n * self.grow_ratio)
def sp_threshold(self):
ws, wi = torch.sort(torch.cat([self.net[i].w for i in self.layers_to_split]).reshape(-1))
total= ws.shape[0]
threshold = ws[self.n_elites]
return threshold
def sp_threshold_group(self, group_num):
ws, wi = torch.sort(torch.cat([self.net[i].w for i in self.layers_to_split_group[group_num]]).reshape(-1))
total= ws.shape[0]
threshold = ws[self.n_elites_group[group_num]]
return threshold
def save(self, path='./tmp.pt'):
torch.save(self.state_dict(), path)
def load(self, path='./tmp.pt'):
self.load_state_dict(torch.load(path))
def get_num_params(self):
model_n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
return model_n_params
def spe(self, dataloader, n_batches):
pass
def spf(self, dataloader, n_batches):
pass
def spff(self, dataloader, n_batches):
pass
def split(self, split_method, dataset, n_batches=-1):
self.num_group = 1 if self.config.model != 'mobile' else 2
if split_method not in ['random', 'exact', 'fast', 'firefly', 'fireflyn']:
raise NotImplementedError
if self.verbose:
print('[INFO] start splitting ...')
start_time = time.time()
self.net.eval()
if self.num_group == 1:
self.get_num_elites()
else:
self.get_num_elites_group(self.num_group)
split_fn = {
'exact': self.spe,
'fast': self.spf,
'firefly': self.spff,
'fireflyn': self.spffn,
}
if split_method != 'random':
split_fn[split_method](dataset, n_batches)
n_neurons_added = {}
if split_method == 'random':
n_layers = len(self.layers_to_split)
n_total_neurons = 0
threshold = 0.
for l in self.layers_to_split:
n_total_neurons += self.net[l].get_n_neurons()
n_grow = int(n_total_neurons * self.grow_ratio)
n_new1 = np.random.choice(n_grow, n_layers, replace=False)
n_new1 = np.sort(n_new1)
n_news = []
for i in range(len(n_new1) - 1):
if i == 0:
n_news.append(n_new1[i])
n_news.append(n_new1[i + 1] - n_new1[i])
else:
n_news.append(n_new1[i + 1] - n_new1[i])
n_news[-1] += 1
for i, n_new_ in zip(reversed(self.layers_to_split), n_news):
if isinstance(self.net[i], sp.SpModule) and self.net[i].can_split:
n_new, idx = self.net[i].random_split(n_new_)
n_neurons_added[i] = n_new
if n_new > 0: # we have indeed splitted this layer
for j in self.next_layers[i]:
self.net[j].passive_split(idx)
elif split_method == 'fireflyn':
if self.num_group == 1:
threshold = self.sp_threshold()
for i in reversed(self.layers_to_split):
if isinstance(self.net[i], sp.SpModule) and self.net[i].can_split:
if self.num_group != 1:
group = self.total_group[i]
threshold = self.sp_threshold_group(group)
n_new, split_idx, new_idx = self.net[i].spffn_active_grow(threshold)
sp_new = split_idx.shape[0] if split_idx is not None else 0
n_neurons_added[i] = (sp_new, n_new-sp_new)
if self.net[i].kh == 1:
isfirst = True
else:
isfirst = False
for j in self.next_layers[i]:
print('passive', self.net[j].module.weight.shape)
self.net[j].spffn_passive_grow(split_idx, new_idx)
else:
threshold= self.sp_threshold()
# actual splitting
for i in reversed(self.layers_to_split):
if isinstance(self.net[i], sp.SpModule) and self.net[i].can_split:
n_new, idx = self.net[i].active_split(threshold)
n_neurons_added[i] = n_new
if n_new > 0: # we have indeed splitted this layer
for j in self.next_layers[i]:
self.net[j].passive_split(idx)
self.net.train()
self.clear() # cleanup auxiliaries
self.create_optimizer() # re-initialize optimizer
end_time = time.time()
if self.verbose:
print('[INFO] splitting takes %10.4f sec. Threshold value is %10.9f' % (
end_time - start_time, threshold))
if split_method == 'fireflyn':
print('[INFO] number of added neurons: \n%s\n' % \
'\n'.join(['-- %d grows (sp %d | new %d)' % (x, y1, y2) for x, (y1, y2) in n_neurons_added.items()]))
else:
print('[INFO] number of added neurons: \n%s\n' % \
'\n'.join(['-- %d grows %d neurons' % (x, y) for x, y in n_neurons_added.items()]))
return n_neurons_added
|
alma-main
|
crlapi/sl/architectures/firefly_vgg/sp/net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# *** MODULES taken from original code https://github.com/klightz/Firefly
from .conv import Conv2d
from .net import SpNet
from .module import SpModule
__all__ = [
'SpNet', 'SpModule',
'Conv2d',
]
|
alma-main
|
crlapi/sl/architectures/firefly_vgg/sp/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# *** MODULES taken from original code https://github.com/klightz/Firefly
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Normal
from torch.optim import *
###############################################################################
#
# Split Block Abstract Class
#
###############################################################################
class SpModule(nn.Module):
def __init__(self,
can_split=True,
actv_fn='relu',
has_bn=False,
has_bias=True,
rescale=1.0):
super(SpModule, self).__init__()
# properties
self.can_split = can_split
self.actv_fn = actv_fn
self.has_bn = has_bn
self.has_bias = has_bias
self.epsilon = 1e-2
self.K = 70
# modules
self.module = None
self.bn = None
# auxiliaries
self.w = None
self.v = None
self.y = None
self.S = []
self.leaky_alpha = 0.2
def clear(self):
del self.w
del self.v
del self.y
del self.S
try:
del self.vni
except:
pass
try:
del self.vno
except:
pass
self.w = self.v = self.y = None
self.S = []
def get_device(self):
try:
return 'cuda' if self.module.weight.data.is_cuda else 'cpu'
except:
raise Exception('[ERROR] no module initialized')
def _d2_actv(self, x, beta=3.):
if self.actv_fn == 'relu':
# use 2nd order derivative of softplus for approximation
s = torch.sigmoid(x*beta)
return beta*s*(1.-s)
elif self.actv_fn == 'softplus':
s = torch.sigmoid(x)
return s*(1.-s)
elif self.actv_fn == 'rbf':
return (x.pow(2)-1)*(-x.pow(2)/2).exp()
elif self.actv_fn == 'leaky_relu':
s = torch.sigmoid(x*beta)
return beta*s*(1.-s)*(1.-self.leaky_alpha)
elif self.actv_fn == 'swish':
s = torch.sigmoid(x)
return s*(1.-s) + s + x*s*(1.-s) - (s.pow(2) + 2.*x*s.pow(2)*(1.-s))
elif self.actv_fn == 'sigmoid':
s = torch.sigmoid(x)
return (s-s.pow(2)) * (1.-s).pow(2)
elif self.actv_fn == 'tanh':
h = torch.tanh(x)
return -2.*h * (1-h.pow(2))
elif self.actv_fn == 'none':
return torch.ones_like(x)
else:
raise NotImplementedError
def _activate(self, x):
if self.actv_fn == 'relu':
return F.relu(x)
elif self.actv_fn == 'leaky_relu':
return F.leaky_relu(x, self.leaky_alpha)
elif self.actv_fn == 'swish':
return x * torch.sigmoid(x)
elif self.actv_fn == 'rbf':
return (-x.pow(2)/2).exp()
elif self.actv_fn == 'sigmoid':
return torch.sigmoid(x)
elif self.actv_fn == 'tanh':
return torch.tanh(x)
elif self.actv_fn == 'softplus':
return F.softplus(x)
elif self.actv_fn == 'none':
return x
else:
raise NotImplementedError
def forward(self, x):
x = self.module(x)
if self.has_bn:
x = self.bn(x)
return self._activate(x)
def active_split(self, threshold):
pass
def passive_split(self, idx):
pass
|
alma-main
|
crlapi/sl/architectures/firefly_vgg/sp/module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# *** MODULES taken from original code https://github.com/klightz/Firefly
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, grad
from torch.distributions import Normal
from torch.optim import *
from .module import SpModule
from crlapi.sl.architectures.firefly_vgg import sp
###############################################################################
#
# Conv2d Split Layer
#
###############################################################################
class Conv2d(SpModule):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
groups = 1,
can_split=True,
bias=True,
actv_fn='relu',
has_bn=False,
rescale=1.0):
super().__init__(can_split=can_split,
actv_fn=actv_fn,
has_bn=has_bn,
has_bias=bias,
rescale=rescale)
if has_bn:
self.bn = nn.BatchNorm2d(out_channels)
self.has_bias = False
if isinstance(kernel_size, int):
self.kh = self.kw = kernel_size
else:
assert len(kernel_size) == 2
self.kh, self.kw = kernel_size
if isinstance(padding, int):
self.ph = self.pw = padding
else:
assert len(padding) == 2
self.ph, self.pw = padding
if isinstance(stride, int):
self.dh = self.dw = stride
else:
assert len(stride) == 2
self.dh, self.dw = stride
self.groups = groups
self.module = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups = groups,
stride=stride,
padding=padding,
bias=self.has_bias)
def get_conv_patches(self, x):
x = F.pad(x, (self.pw, self.pw, self.ph, self.ph)) # pad (left, right, top, bottom)
# get all image windows of size (kh, kw) and stride (dh, dw)
patches = x.unfold(2, self.kh, self.dh).unfold(3, self.kw, self.dw)
patches = patches.permute(0, 2, 3, 1, 4, 5).contiguous() # [B, H, W, C_in, kh, kw]
return patches
###########################################################################
# fast split
###########################################################################
def spf_reset(self):
# y is a dummy variable for storing gradients of v
W = self.module.weight.data
self.y = nn.Parameter(torch.zeros_like(W))
self.y.retain_grad()
self.v = nn.Parameter(torch.zeros_like(W))
self.v.data.uniform_(-1e-1, 1e-1)
self.v.retain_grad()
self.w = 0.
def spf_update_v(self):
v = self.v
sv = self.y.grad
vv = v.pow(2).sum([1,2,3], keepdim=True)
vsv = (sv * v).sum([1,2,3], keepdim=True)
v_grad = 2. * (sv * vv - v * vsv) / vv.pow(2)
self.v.grad = v_grad
self.y.grad = None
def spf_update_w(self, n=1.):
v = self.v
sv = self.y.grad
vv = v.pow(2).sum([1,2,3])
vsv = (sv * v).sum([1,2,3])
self.w += (vsv / vv).data.clone() / n
def spf_forward(self, x):
out = self.module(x) # [B, C_out]
bn_coef = 1.
if self.has_bn:
self.bn.eval() # fix running mean/variance
out = self.bn(out)
# calculate bn_coef
bn_coef = 1. / torch.sqrt(self.bn.running_var + 1e-5) * self.bn.weight
bn_coef = bn_coef.view(1, -1, 1, 1) # [1, C_out, 1, 1]
# normalize v
v_norm = self.v.pow(2).sum([1,2,3], keepdim=True).sqrt().data
self.v.data = self.v.data / v_norm
patches = self.get_conv_patches(x)
B, H, W, C_in, kh, kw = patches.size()
x = patches.reshape(B*H*W, -1)
left = x.mm(self.y.view(-1, C_in*kh*kw).t()).view(B, H, W, -1).permute(0,3,1,2)
right = x.mm(self.v.view(-1, C_in*kh*kw).t()).view(B, H, W, -1).permute(0,3,1,2)
aux = self._d2_actv(out) * (bn_coef*left) * (bn_coef*right)
out = self._activate(out) + aux
return out
###########################################################################
# firefly split + new neurons
###########################################################################
def spffn_add_new(self, enlarge_out=True, enlarge_in=True):
self.eout = self.K if enlarge_out else 0
self.ein = self.K if enlarge_in else 0
if self.groups == 1:
C_out, C_in = self.module.weight.data.shape[:2]
else:
C_out, _ = self.module.weight.data.shape[:2]
C_in = C_out
device = self.get_device()
if self.has_bn and self.eout > 0:
new_bn = nn.BatchNorm2d(C_out+self.eout).to(device)
new_bn.weight.data[:C_out] = self.bn.weight.data.clone()
new_bn.bias.data[:C_out] = self.bn.bias.data.clone()
new_bn.running_mean.data[:C_out] = self.bn.running_mean.data.clone()
new_bn.running_var.data[:C_out] = self.bn.running_var.data.clone()
new_bn.weight.data[C_out:] = 1.
new_bn.bias.data[C_out:] = 0.
self.bn = new_bn
self.bn.eval()
if self.groups != 1:
self.groups += self.K
new_layer = nn.Conv2d(in_channels=C_in+self.ein,
out_channels=C_out+self.eout,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias, groups = self.groups).to(device)
new_layer.weight.data[:C_out, :C_in, :, :] = self.module.weight.data.clone()
if self.ein > 0:
new_layer.weight.data[:, C_in:, :, :] = 0.
if self.eout > 0:
new_layer.weight.data[C_out:, :, :, :] = 0.
self.module = new_layer
self.module.eval()
def spffn_penalty(self):
penalty = 0.
if self.can_split: penalty += self.v.pow(2).sum()
if self.eout > 0: penalty += 1e-2 * self.vno.pow(2).sum()
if penalty > 0: (penalty * 1e-2).backward()
def spffn_clip(self):
if self.ein > 0: # since output is just 1
self.vni.data.clamp_(-1e-2, 1e-2)
def spffn_reset(self):
if self.groups == 1:
C_out, C_in, kh, kw = self.module.weight.data.shape
else:
C_out, C_in, kh, kw = self.module.weight.data.shape
C_in = C_out
device = self.get_device()
self.y = nn.Parameter(torch.zeros(1,C_out,1,1)).to(device)
self.y.retain_grad()
self.w = 0.
if self.can_split:
v = torch.zeros(C_out-self.eout, C_in-self.ein,kh,kw).to(device)
v.uniform_(-1e-1, 1e-1)
self.v = nn.Parameter(v)
if self.ein > 0:
vni = torch.zeros(C_out, self.ein, kh, kw).to(device)
vni.uniform_(-1e-2, 1e-2)
self.vni = nn.Parameter(vni)
if self.eout > 0:
vno = torch.zeros(self.eout, C_in-self.ein, kh, kw).to(device)
n = kh * kw * (C_in - self.ein)
stdv = 1. / math.sqrt(n)
#vno.uniform_(-stdv, stdv)
vno.normal_(0, 0.1)
self.vno = nn.Parameter(vno)
def spffn_update_w(self, d, output = False):
if not output:
self.w += (self.y.grad.data / d).view(-1)
self.y.grad = None
else:
y_grad = grad(self.output.mean(), self.y)
self.w += (self.y.grad.data / y_grad[0].data / d).view(-1)
self.y.grad = None
def spffn_forward(self, x, alpha=-1):
out = self.module(x) # [out+eout, in+ein, H, W]
patches = self.get_conv_patches(x)
B, H, W, C_in, kh, kw = patches.size()
C_out = out.shape[1]
cin, cout = C_in - self.ein, C_out - self.eout
x = patches.view(B*H*W, -1, kh*kw)
if self.ein > 0:
x1, x2 = x[:,:cin,:].view(B*H*W, -1), x[:,cin:,:].view(B*H*W,-1)
else:
x1 = x.view(B*H*W, -1)
if self.can_split:
noise_v = x1.mm(self.v.view(-1, cin*kh*kw).t()).view(B,H,W,-1).permute(0,3,1,2) # [B,cout,H,W]
if alpha >= 0.:
noise_v = (noise_v.detach() * self.y[:,:cout,:,:] + noise_v * alpha)
if self.eout > 0:
noise_vo = x1.mm(self.vno.view(-1, cin*kh*kw).t()).view(B,H,W,-1).permute(0,3,1,2)
if alpha >= 0.:
noise_vo = (noise_vo.detach() * self.y[:,cout:,:,:] + noise_vo * alpha)
if self.ein > 0:
noise_vi1 = x2.mm(self.vni.view(-1, self.ein*kh*kw).t())
if self.eout > 0:
noise_vi1, noise_vi2 = noise_vi1[:,:cout], noise_vi1[:,cout:] # [B*H*W, cout/eout]
noise_vi1 = noise_vi1.view(B,H,W,-1).permute(0,3,1,2)
noise_vi2 = noise_vi2.view(B,H,W,-1).permute(0,3,1,2)
else:
noise_vi1 = noise_vi1.view(B,H,W,-1).permute(0,3,1,2)
o1_plus = o1_minus = o2 = 0.
if self.can_split:
o1_plus = out[:,:cout,:,:] + noise_v # [B, cout, H, W]
o1_minus = out[:,:cout,:,:] - noise_v # [B, cout, H, W]
if self.eout > 0:
o2 = out[:,cout:,:,:] + noise_vo
if self.ein > 0:
o1_plus = o1_plus + noise_vi1
o1_minus = o1_minus + noise_vi1
if self.eout > 0:
o2 = o2 + noise_vi2
if self.eout > 0:
o1_plus = torch.cat((o1_plus, o2), 1)
o1_minus = torch.cat((o1_minus, o2), 1)
if self.has_bn:
o1_plus = self.bn(o1_plus)
o1_minus = self.bn(o1_minus)
o1_plus = self._activate(o1_plus)
o1_minus = self._activate(o1_minus)
output = (o1_plus + o1_minus) / 2.
else:
o1 = out[:,:cout,:,:]
if self.eout > 0:
o2 = out[:,cout:,:,:] + noise_vo
if self.ein > 0:
o2 = o2 + noise_vi2
if self.ein > 0:
o1 = o1 + noise_vi1
if self.eout > 0:
o1 = torch.cat((o1, o2), 1)
if self.has_bn:
o1 = self.bn(o1)
output = self._activate(o1)
self.output = output
return output
###########################################################################
# firefly split
###########################################################################
def spff_reset(self):
W = self.module.weight.data
device = self.get_device()
self.y = nn.Parameter(torch.zeros(1, W.shape[0], 1, 1)).to(device)
self.y.retain_grad()
self.v = nn.Parameter(torch.zeros_like(W))
self.v.data.uniform_(-1e-1, 1e-1)
self.w = 0.
def spff_update_w(self, d):
self.w += (self.y.grad.data/d).view(-1)
def spff_scale_v(self):
self.v.data = self.v.data * 1e2
def spff_forward(self, x, alpha=-1):
out = self.module(x)
patches = self.get_conv_patches(x)
B, H, W, C_in, kh, kw = patches.size()
x = patches.view(B*H*W, -1)
if alpha >= 0.:
noise_out = x.mm(self.v.view(-1, C_in*kh*kw).t())
noise_out = noise_out.view(B, H, W, -1).permute(0, 3, 1, 2)
noise_out = (self.y * noise_out.detach() + noise_out * alpha)
else:
noise_out = x.mm(self.v.view(-1, C_in*kh*kw).t())
noise_out = noise_out.view(B, H, W, -1).permute(0, 3, 1, 2)
out_plus = out + noise_out
out_minus = out - noise_out
if self.has_bn:
self.bn.eval()
out_plus = self.bn(out_plus)
out_minus = self.bn(out_minus)
out_plus = self._activate(out_plus)
out_minus = self._activate(out_minus)
return (out_plus + out_minus) / 2.
###########################################################################
# exact split
###########################################################################
def spe_forward(self, x):
out = self.module(x) # [B, C_out, H, W]
if self.has_bn:
self.bn.eval() # fix running mean/variance
out = self.bn(out)
# calculate bn_coff
bn_coff = 1. / torch.sqrt(self.bn.running_var + 1e-5) * self.bn.weight
bn_coff = bn_coff.view(1, -1, 1, 1) # [1, C_out, 1, 1]
first_run = (len(self.S) == 0)
# calculate 2nd order derivative of the activation
nabla2_out = self._d2_actv(out) # [B, C_out, H, W]
patches = self.get_conv_patches(x)
B, H, W, C_in, KH, KW = patches.size()
C_out = out.shape[1]
D = C_in * KH * KW
x = patches.view(B, H, W, D)
device = self.get_device()
auxs = [] # separate calculations for each neuron for space efficiency
for neuron_idx in range(C_out):
c = bn_coff[:, neuron_idx:neuron_idx+1, :, :] if self.has_bn else 1.
l = c * x
if first_run:
S = Variable(torch.zeros(D, D).to(device), requires_grad=True) # [H_in, H_in]
self.S.append(S)
else:
S = self.S[neuron_idx]
aux = l.view(-1, D).mm(S).unsqueeze(1).bmm(l.view(-1, D, 1)).squeeze(-1) # (Bx)S(Bx^T), [B*H*W,1]
aux = aux.view(B, 1, H, W)
auxs.append(aux)
auxs = torch.cat(auxs, 1) # [B, C_out, H, W]
auxs = auxs * nabla2_out # [B, C_out, H, W]
out = self._activate(out) + auxs
return out
def spe_eigen(self, avg_over=1.):
A = np.array([item.grad.data.cpu().numpy() for item in self.S]) # [C_out, D, D]
A /= avg_over
A = (A + np.transpose(A, [0, 2, 1])) / 2
w, v = np.linalg.eig(A) # [C_out, K], [C_out, D, K]
w = np.real(w)
v = np.real(v)
min_idx = np.argmin(w, axis=1)
w_min = np.min(w, axis=1) # [C_out,]
v_min = v[np.arange(w_min.shape[0]), :, min_idx] # [C_out, D]
self.w = w_min
self.v = v_min
device = self.get_device()
self.w = torch.FloatTensor(w_min).to(device)
self.v = torch.FloatTensor(v_min).to(device)
self.v = self.v.view(*self.module.weight.data.shape)
del A
## below are for copying weights and actual splitting
def get_n_neurons(self):
return self.module.weight.data.shape[0]
def random_split(self, C_new):
if C_new == 0:
return 0, None
C_out, C_in, kh, kw = self.module.weight.shape
idx = np.random.choice(C_out, C_new)
device = self.get_device()
delta1 = F.normalize(torch.randn(C_new, C_in, kh, kw).to(device), p=2, dim=-1)
delta2 = F.normalize(torch.randn(C_new, C_in, kh, kw).to(device), p=2, dim=-1)
delta1 = delta1 * 1e-2
delta2 = delta2 * 1e-2
idx = torch.LongTensor(idx).to(device)
new_layer = nn.Conv2d(in_channels=C_in,
out_channels=C_out+C_new,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
# for current layer
new_layer.weight.data[:C_out, ...] = self.module.weight.data.clone()
new_layer.weight.data[C_out:, ...] = self.module.weight.data[idx, ...]
new_layer.weight.data[idx, ...] += delta1
new_layer.weight.data[C_out:, ...] -= delta2
if self.has_bias:
new_layer.bias.data[:C_out, ...] = self.module.bias.data.clone()
new_layer.bias.data[C_out:, ...] = self.module.bias.data[idx]
self.module = new_layer
# for batchnorm layer
if self.has_bn:
new_bn = nn.BatchNorm2d(C_out+C_new).to(device)
new_bn.weight.data[:C_out] = self.bn.weight.data.clone()
new_bn.weight.data[C_out:] = self.bn.weight.data[idx]
new_bn.bias.data[:C_out] = self.bn.bias.data.clone()
new_bn.bias.data[C_out:] = self.bn.bias.data[idx]
new_bn.running_mean.data[:C_out] = self.bn.running_mean.data.clone()
new_bn.running_mean.data[C_out:] = self.bn.running_mean.data[idx]
new_bn.running_var.data[:C_out] = self.bn.running_var.data.clone()
new_bn.running_var.data[C_out:] = self.bn.running_var.data[idx]
self.bn = new_bn
return C_new, idx
def rdinit_grow_output(self):
C_out, C_in, kh, kw = self.module.weight.shape
device = self.get_device()
new_layer = nn.Conv2d(in_channels=C_in,
out_channels=C_out+1,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
new_layer.weight.data[:C_out, ...] = self.module.weight.data.clone()
self.module = new_layer
def rdinit_grow_input(self):
C_out, C_in, kh, kw = self.module.weight.shape
device = self.get_device()
new_layer = nn.Conv2d(in_channels=C_in+1,
out_channels=C_out,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
new_layer.weight.data[:,:C_in, ...] = self.module.weight.data.clone()
self.module = new_layer
def active_split(self, threshold):
idx = torch.nonzero((self.w <= threshold).float()).view(-1)
C_new = idx.shape[0]
if C_new == 0:
return 0, None
C_out, C_in, kh, kw = self.module.weight.shape
device = self.get_device()
delta = self.v[idx, ...] * 1e-2
delta = delta.view(C_new, C_in, kh, kw)
new_layer = nn.Conv2d(in_channels=C_in,
out_channels=C_out+C_new,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
# for current layer
new_layer.weight.data[:C_out, ...] = self.module.weight.data.clone()
new_layer.weight.data[C_out:, ...] = self.module.weight.data[idx, ...]
new_layer.weight.data[idx, ...] += delta
new_layer.weight.data[C_out:, ...] -= delta
if self.has_bias:
new_layer.bias.data[:C_out, ...] = self.module.bias.data.clone()
new_layer.bias.data[C_out:, ...] = self.module.bias.data[idx]
self.module = new_layer
# for batchnorm layer
if self.has_bn:
new_bn = nn.BatchNorm2d(C_out+C_new).to(device)
new_bn.weight.data[:C_out] = self.bn.weight.data.clone()
new_bn.weight.data[C_out:] = self.bn.weight.data[idx]
new_bn.bias.data[:C_out] = self.bn.bias.data.clone()
new_bn.bias.data[C_out:] = self.bn.bias.data[idx]
new_bn.running_mean.data[:C_out] = self.bn.running_mean.data.clone()
new_bn.running_mean.data[C_out:] = self.bn.running_mean.data[idx]
new_bn.running_var.data[:C_out] = self.bn.running_var.data.clone()
new_bn.running_var.data[C_out:] = self.bn.running_var.data[idx]
self.bn = new_bn
return C_new, idx
def passive_split(self, idx):
C_new = idx.shape[0]
C_out, C_in, _, _ = self.module.weight.shape
device = self.get_device()
new_layer = nn.Conv2d(in_channels=C_in+C_new,
out_channels=C_out,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
new_layer.weight.data[:, :C_in, ...] = self.module.weight.data.clone()
new_layer.weight.data[:, C_in:, ...] = self.module.weight.data[:, idx, ...] / 2.
new_layer.weight.data[:, idx, ...] /= 2.
if self.has_bias:
new_layer.bias.data = self.module.bias.data.clone()
self.module = new_layer
def spffn_active_grow(self, threshold):
idx = torch.nonzero((self.w <= threshold).float()).view(-1)
C_out, C_in, kh, kw = self.module.weight.shape
c1 = C_out - self.eout
c3 = C_in - self.ein
split_idx, new_idx = idx[idx < c1], idx[idx >= c1]
n_split = split_idx.shape[0]
n_new = new_idx.shape[0]
c2 = c1 + n_split
device = self.get_device()
delta = self.v[split_idx, ...]
new_layer = nn.Conv2d(in_channels=C_in,
out_channels=c1+n_split+n_new,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
# for current layer [--original--c1--split_new--c2--add new--]
old_W = self.module.weight.data.clone()
try:
old_W[:, C_in - self.ein:, :, :] = self.vni.clone()
except:
pass
try:
old_W[C_out-self.eout:, :C_in-self.ein, :, :] = self.vno.clone()
except:
pass
new_layer.weight.data[:c1, ...] = old_W[:c1,...]
if n_split > 0:
new_layer.weight.data[c1:c2, ...] = old_W[split_idx, ...]
new_layer.weight.data[split_idx,:c3,...] += delta
new_layer.weight.data[c1:c2:,:c3,...] -= delta
if n_new > 0:
new_layer.weight.data[c2:, ...] = old_W[new_idx, ...]
if self.has_bias:
old_b = self.module.bias.data.clone()
new_layer.bias.data[:c1, ...] = old_b[:c1,...].clone()
if n_split > 0:
new_layer.bias.data[c1:c2, ...] = old_b[split_idx]
if n_new > 0:
new_layer.bias.data[c2:,...] = 0.
self.module = new_layer
# for batchnorm layer
if self.has_bn:
new_bn = nn.BatchNorm2d(c1+n_split+n_new).to(device)
new_bn.weight.data[:c1] = self.bn.weight.data[:c1].clone()
new_bn.bias.data[:c1] = self.bn.bias.data[:c1].clone()
new_bn.running_mean.data[:c1] = self.bn.running_mean.data[:c1].clone()
new_bn.running_var.data[:c1] = self.bn.running_var.data[:c1].clone()
if n_split > 0:
new_bn.weight.data[c1:c2] = self.bn.weight.data[split_idx]
new_bn.bias.data[c1:c2] = self.bn.bias.data[split_idx]
new_bn.running_mean.data[c1:c2] = self.bn.running_mean.data[split_idx]
new_bn.running_var.data[c1:c2] = self.bn.running_var.data[split_idx]
if n_new > 0:
new_bn.weight.data[c2:] = self.bn.weight.data[new_idx]
new_bn.bias.data[c2:] = self.bn.bias.data[new_idx]
new_bn.running_mean.data[c2:] = self.bn.running_mean.data[new_idx]
new_bn.running_var.data[c2:] = self.bn.running_var.data[new_idx]
self.bn = new_bn
return n_split+n_new, split_idx, new_idx
def spffn_passive_grow(self, split_idx, new_idx):
n_split = split_idx.shape[0] if split_idx is not None else 0
n_new = new_idx.shape[0] if new_idx is not None else 0
C_out, C_in, _, _ = self.module.weight.shape
if self.groups != 1:
C_in = C_out
device = self.get_device()
c1 = C_in-self.ein
if n_split == 0 and n_new == self.ein:
return
if self.groups != 1:
self.groups = c1 + n_split + n_new
C_out = self.groups
new_layer = nn.Conv2d(in_channels=c1+n_split+n_new,
out_channels=C_out,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias, groups = self.groups).to(device)
c2 = c1 + n_split
if self.has_bias:
new_layer.bias.data = self.module.bias.data.clone()
if self.groups != 1:
new_layer.weight.data[:c1,:,...] = self.module.weight.data[:c1,:,...].clone()
else:
new_layer.weight.data[:,:c1,...] = self.module.weight.data[:,:c1,...].clone()
if n_split > 0:
if self.groups == 1:
new_layer.weight.data[:,c1:c2,:,:] = self.module.weight.data[:,split_idx,:,:] / 2.
new_layer.weight.data[:,split_idx,...] /= 2.
else:
new_layer.weight.data[c1:c2, :,...] = self.module.weight.data[split_idx, :,...]
if self.groups != 1:
new_bn = nn.BatchNorm2d(C_out).to(device)
out = C_out - n_new - n_split
out1 = out + n_split
out2 = out1 + n_new
new_bn.weight.data[:out] = self.bn.weight.data.clone()[:out]
new_bn.bias.data[:out] = self.bn.bias.data.clone()[:out]
new_bn.running_mean.data[:out] = self.bn.running_mean.data.clone()[:out]
new_bn.running_var.data[:out] = self.bn.running_var.data.clone()[:out]
if n_split > 0:
out1 = out + n_split
new_bn.weight.data[out:out1] = self.bn.weight.data[split_idx]
new_bn.bias.data[out:out1] = self.bn.bias.data[split_idx]
new_bn.running_mean.data[out:out1] = self.bn.running_mean.data[split_idx]
new_bn.running_var.data[out:out1] = self.bn.running_var.data[split_idx]
if n_new > 0:
new_bn.weight.data[out1:out2] = self.bn.weight.data[new_idx]
new_bn.bias.data[out1:out2] = self.bn.bias.data[new_idx]
new_bn.running_mean.data[out1:out2] = self.bn.running_mean.data[new_idx]
new_bn.running_var.data[out1:out2] = self.bn.running_var.data[new_idx]
self.bn = new_bn
if n_new > 0:
if self.groups != 1:
new_layer.weight.data[c2:,:,...] = self.module.weight.data[new_idx, :,...]
else:
new_layer.weight.data[:,c2:,...] = self.module.weight.data[:,new_idx,...]
self.module = new_layer
|
alma-main
|
crlapi/sl/architectures/firefly_vgg/sp/conv.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import copy
import time
from pydoc import locate
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.core import SupervisedCLModel
from crlapi.sl.architectures.firefly_vgg import sp
from crlapi.sl.architectures.firefly_vgg.models import sp_vgg
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
# --- Firefly Implementation. Since we do not plan to extend this method,
# --- everything is self-contained here.
class Firefly(SupervisedCLModel):
def __init__(self, stream, clmodel_args):
super().__init__()
self.models = []
self.config = clmodel_args
self.verbose = True
def build_initial_net(self, task, **model_args):
# only support the custom VGG backbone for now
model, next_layers, layers_to_split = \
sp_vgg('vgg19',
n_classes=task.n_classes,
dimh=model_args['n_channels'],
method='fireflyn')
# Hacky AF
model.next_layers = next_layers
model.layers_to_split = layers_to_split
return model
def get_prediction_net(self,task):
if task.task_descriptor() is None:
model = self.models[-1]
else:
model = self.models[task.task_descriptor]
model.eval()
return model
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
if len(self.models)==0:
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
elif (task.task_descriptor() % self.config.grow_every) == 0:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
print('growing')
base_gr = self.config.model.grow_ratio
grow_ratio = (base_gr * task.task_descriptor() + 1) / (base_gr * (task.task_descriptor() - 1) + 1) - 1
n_pre = sum(np.prod(x.shape) for x in model.parameters())
added = self.split(model, training_loader, grow_ratio)
n_post = sum(np.prod(x.shape) for x in model.parameters())
assert n_post > n_pre
print(f'from {n_pre} to {n_post}')
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
flops_per_input = self.count_flops(task, model)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config["device"]
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config["max_epochs"])+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config["max_epochs"]):
# Make sure model is ready for train
model.train()
#Training loop
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Loss {validation_loss:.4}")
if patience_count == patience:
break
self.models.append(best_model)
logger.message("Training Done...")
logger.add_scalar('train/model_params', sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6, 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
return self
# --------------------------------------------
# Firefly specific methods (from Classifier)
# --------------------------------------------
def spffn_forward(self, net, x, alpha):
for layer in net:
#if isinstance(layer, sp.SpModule) and layer.can_split:
prev_x = x.cpu().data.numpy()
if isinstance(layer, sp.SpModule):
x = layer.spffn_forward(x, alpha=alpha)
else:
x = layer(x)
return x.view(x.shape[0], -1)
def spffn_loss_fn(self, net, x, y, alpha=-1):
scores = self.spffn_forward(net, x, alpha=alpha)
loss = F.cross_entropy(scores, y)
return loss
## -- firefly new split -- ##
def spffn(self, net, loader, n_batches):
v_params = []
for i, layer in enumerate(net):
if isinstance(layer, sp.SpModule):
enlarge_in = (i > 0)
enlarge_out = (i < len(net)-1)
net[i].spffn_add_new(enlarge_in=enlarge_in, enlarge_out=enlarge_out)
net[i].spffn_reset()
if layer.can_split:
v_params += [net[i].v]
if enlarge_in:
v_params += [net[i].vni]
if enlarge_out:
v_params += [net[i].vno]
opt_v = torch.optim.RMSprop(nn.ParameterList(v_params), lr=1e-3, momentum=0.1, alpha=0.9)
self.device = next(iter(net.parameters())).device
torch.cuda.empty_cache()
n_batches = 0
for i, (x, y) in enumerate(loader):
n_batches += 1
x, y = x.to(self.device), y.to(self.device)
loss = self.spffn_loss_fn(net, x, y)
opt_v.zero_grad()
loss.backward()
for layer in net:
if isinstance(layer, sp.SpModule):
layer.spffn_penalty()
opt_v.step()
self.config.model.granularity = 1
alphas = np.linspace(0, 1, self.config.model.granularity*2+1)
for alpha in alphas[1::2]:
for x, y in loader:
x, y = x.to(self.device), y.to(self.device)
loss = self.spffn_loss_fn(net, x, y, alpha=1.0)
opt_v.zero_grad()
loss.backward()
# for i in self.layers_to_split:
for i in net.layers_to_split:
net[i].spffn_update_w(self.config.model.granularity * n_batches, output = False)
# --------------------------------------------
# Firefly specific methods (from SpNet)
# --------------------------------------------
def clear(self, net):
for layer in net:
if isinstance(layer, sp.SpModule):
layer.clear()
def get_num_elites(self, net, grow_ratio):
n = 0
# for i in self.layers_to_split:
for i in net.layers_to_split:
n += net[i].module.weight.shape[0]
self.n_elites = int(n * grow_ratio)
def get_num_elites_group(self, net, group_num, grow_ratio):
for g in range(group_num):
n = 0
for i in self.layers_to_split_group[g]:
n += net[i].module.weight.shape[0]
try:
self.n_elites_group[g] = int(n * grow_ratio)
except:
self.n_elites_group = {}
self.n_elites_group[g] = int(n * grow_ratio)
def sp_threshold(self, net):
# ws, wi = torch.sort(torch.cat([net[i].w for i in self.layers_to_split]).reshape(-1))
ws, wi = torch.sort(torch.cat([net[i].w for i in net.layers_to_split]).reshape(-1))
total= ws.shape[0]
threshold = ws[self.n_elites]
return threshold
def sp_threshold_group(self, net, group_num):
# ws, wi = torch.sort(torch.cat([net[i].w for i in self.layers_to_split_group[group_num]]).reshape(-1))
ws, wi = torch.sort(torch.cat([net[i].w for i in net.layers_to_split_group[group_num]]).reshape(-1))
total= ws.shape[0]
threshold = ws[self.n_elites_group[group_num]]
return threshold
def split(self, net, loader, grow_ratio, n_batches=-1, split_method='fireflyn'):
self.num_group = 1# if self.config.backbne != 'mobile' else 2
if split_method not in ['random', 'exact', 'fast', 'firefly', 'fireflyn']:
raise NotImplementedError
if self.verbose:
print('[INFO] start splitting ...')
start_time = time.time()
net.eval()
if self.num_group == 1:
self.get_num_elites(net, grow_ratio)
else:
self.get_num_elites_group(net, grow_ratio, self.num_group)
split_fn = {
#'exact': self.spe,
#'fast': self.spf,
#'firefly': self.spff,
'fireflyn': self.spffn,
}
if split_method != 'random':
split_fn[split_method](net, loader, n_batches)
n_neurons_added = {}
if split_method == 'random':
# n_layers = len(self.layers_to_split)
n_layers = len(net.layers_to_split)
n_total_neurons = 0
threshold = 0.
# for l in self.layers_to_split:
for l in net.layers_to_split:
n_total_neurons += net[l].get_n_neurons()
n_grow = int(n_total_neurons * grow_ratio)
n_new1 = np.random.choice(n_grow, n_layers, replace=False)
n_new1 = np.sort(n_new1)
n_news = []
for i in range(len(n_new1) - 1):
if i == 0:
n_news.append(n_new1[i])
n_news.append(n_new1[i + 1] - n_new1[i])
else:
n_news.append(n_new1[i + 1] - n_new1[i])
n_news[-1] += 1
# for i, n_new_ in zip(reversed(self.layers_to_split), n_news):
for i, n_new_ in zip(reversed(net.layers_to_split), n_news):
if isinstance(net[i], sp.SpModule) and net[i].can_split:
n_new, idx = net[i].random_split(n_new_)
n_neurons_added[i] = n_new
if n_new > 0: # we have indeed splitted this layer
# for j in self.next_layers[i]:
for j in net.next_layers[i]:
net[j].passive_split(idx)
elif split_method == 'fireflyn':
if self.num_group == 1:
threshold = self.sp_threshold(net)
# for i in reversed(self.layers_to_split):
for i in reversed(net.layers_to_split):
if isinstance(net[i], sp.SpModule) and net[i].can_split:
if self.num_group != 1:
group = self.total_group[i]
threshold = self.sp_threshold_group(net, group)
n_new, split_idx, new_idx = net[i].spffn_active_grow(threshold)
sp_new = split_idx.shape[0] if split_idx is not None else 0
n_neurons_added[i] = (sp_new, n_new-sp_new)
if net[i].kh == 1:
isfirst = True
else:
isfirst = False
# for j in self.next_layers[i]:
for j in net.next_layers[i]:
print('passive', net[j].module.weight.shape)
net[j].spffn_passive_grow(split_idx, new_idx)
else:
threshold= self.sp_threshold()
# actual splitting
# for i in reversed(self.layers_to_split):
for i in reversed(net.layers_to_split):
if isinstance(net[i], sp.SpModule) and net[i].can_split:
n_new, idx = net[i].active_split(threshold)
n_neurons_added[i] = n_new
if n_new > 0: # we have indeed splitted this layer
# for j in self.next_layers[i]:
for j in net.next_layers[i]:
net[j].passive_split(idx)
net.train()
self.clear(net) # cleanup auxiliaries
end_time = time.time()
if self.verbose:
print('[INFO] splitting takes %10.4f sec. Threshold value is %10.9f' % (
end_time - start_time, threshold))
if split_method == 'fireflyn':
print('[INFO] number of added neurons: \n%s\n' % \
'\n'.join(['-- %d grows (sp %d | new %d)' % (x, y1, y2) for x, (y1, y2) in n_neurons_added.items()]))
else:
print('[INFO] number of added neurons: \n%s\n' % \
'\n'.join(['-- %d grows %d neurons' % (x, y) for x, y in n_neurons_added.items()]))
return n_neurons_added
|
alma-main
|
crlapi/sl/clmodels/firefly.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
class IndexDataset(torch.utils.data.Dataset):
def __init__(self, og_dataset):
self.og_dataset = og_dataset
def __getitem__(self, index):
data, target = self.og_dataset[index]
return data, target, index
def __len__(self):
return len(self.og_dataset)
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class AdaBoost(Finetune):
def get_prediction_net(self,task):
for i, model in enumerate(self.models):
model.eval()
self.models[i] = model.to(self.config.device)
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs) * self.model_weights.reshape((-1,) + (1,) * outs[0].ndim)
out = out.mean(0)
return out
def compute_errors(self, loader, model):
unshuffled_loader = torch.utils.data.DataLoader(
loader.dataset, batch_size=loader.batch_size, drop_last=False, shuffle=False)
device=self.config.device
model.to(device)
model.eval()
# --- Upweighting
err = []
# eval mode
with torch.no_grad():
for x, y in unshuffled_loader:
x, y = x.to(device), y.to(device)
err += [~model(x).argmax(1).eq(y)]
err = torch.cat(err).float() # (DS, )
return err
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
# Creating datasets and loaders
og_training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
training_loader = torch.utils.data.DataLoader(
IndexDataset(og_training_loader.dataset),
batch_size=og_training_loader.batch_size,
shuffle=True
)
to_print = []
# --- step 1 : Initialize the observation weights uniformly
ds_len = len(og_training_loader.dataset)
sample_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
for around in range(self.config.n_rounds):
# --- 2. a) Fit new classifier on weighted data
# init model
model_args=self.config.model
model, best_model = [self.build_initial_net(task,**model_args) for _ in range(2)]
flops_per_input = self.count_flops(task, model)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(f'new model has {n_params} params')
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
epoch = 0
while True: # Run until convergence
epoch += 1
# Make sure model is ready for train
model.train()
# Training loop
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y, idx) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
weight_x = sample_weights[idx]
n += y.size(0)
# apply transformations
x = train_aug(raw_x)
predicted = model(x)
loss = F.cross_entropy(predicted, y, reduction='none')
loss = (loss * weight_x).mean() * ds_len
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
# Validation
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print(f"\t Round {around}. Found best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience or epoch == self.config.max_epochs:
break
del model
# --- Step 2 b) Compute the new classifier errors
all_errs = self.compute_errors(og_training_loader, best_model) # 1, ds_size
assert all_errs.shape == (ds_len, )
cls_err = (all_errs * sample_weights).sum() / sample_weights.sum()
# --- Step 2 c) Compute the new classifier weight
K = task.n_classes
cls_alpha = torch.log((K - 1) * (1 - cls_err) / cls_err)
# --- Step 2 d) Update the sample weights
sample_weights = sample_weights * torch.exp(cls_alpha * all_errs)
sample_weights /= sample_weights.sum()
print(f'sample weights min {sample_weights.min():.6f}\t max {sample_weights.max():.6f} \t median {sample_weights.median():.6f}')
print(torch.multinomial(sample_weights, ds_len, replacement=True).bincount().bincount())
# store best model
self.models.append(best_model)
cls_alpha = cls_alpha.reshape(1)
# store classifier weights
if not hasattr(self, 'model_weights'):
self.model_weights = cls_alpha
else:
self.model_weights = torch.cat((self.model_weights, cls_alpha))
print(self.model_weights)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
to_print += [fill(accs) + '\t' + str(ensemble)]
for item in to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', len(self.models) * flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
alma-main
|
crlapi/sl/clmodels/adaboost.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
from itertools import chain
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class AggEnsemble(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
print(f'voting {self.config.vote}')
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs).sum(0)
return out
def _validation_loop(self,nets,device,dataloader):
[net.eval() for net in nets]
[net.to(device) for net in nets]
with torch.no_grad():
loss_values=[]
nb_ok=0
nb_total=0
for x,y in dataloader:
x,y=x.to(device),y.to(device)
predicted=0
for net in nets:
predicted += net(x)
loss=F.cross_entropy(predicted,y)
loss_values.append(loss.item())
nb_ok+=predicted.max(1)[1].eq(y).float().sum().item()
nb_total+=x.size()[0]
loss=np.mean(loss_values)
accuracy=nb_ok/nb_total
net = net.train()
return {"loss":loss,"accuracy":accuracy}
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
models = [self.build_initial_net(task,**model_args) for _ in range(self.config.k)]
n_params = sum(np.prod(x.shape) for x in models[0].parameters())
print(f'new model has {n_params} params')
else:
# get the last k models
models = [copy.deepcopy(model) for model in self.models]
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, models[0]) * self.config.k
# Creating datasets and loaders
training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
best_models = [copy.deepcopy(model) for model in models]
best_loss, best_acc = 1e10, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
models = [model.to(device) for model in models]
optimizer = self.get_optimizer(chain(*[model.parameters() for model in models]))
#Launching training procedure
logger.message("Start training for " + str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
[model.train() for model in models]
# Keep a single track of these for now
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
# apply transformations
x = train_aug(raw_x)
predicted = 0.
for model in models:
predicted += model(x)
loss = F.cross_entropy(predicted,y)
nb_ok = predicted.max(1)[1].eq(y).float().sum().item()
acc = nb_ok/x.size()[0]
accuracy = acc
loss_ = loss.item()
training_accuracy += accuracy
training_loss += loss_
n += x.size(0)
n_fwd_samples += x.size(0)
logger.add_scalar("train/loss",loss_,iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
#Validation
training_accuracy /= i
training_loss /= i
out=self._validation_loop(models,device,validation_loader)
validation_loss, validation_accuracy = out["loss"], out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
if best_acc is None or validation_accuracy > (best_acc):
best_acc = validation_accuracy
for model_idx in range(self.config.k):
best_models[model_idx].load_state_dict(_state_dict(models[model_idx],"cpu"))
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience:
break
# overwrite the best models
self.models = nn.ModuleList(best_models)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
alma-main
|
crlapi/sl/clmodels/agg_ensemble.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
from itertools import chain
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class BaggingSampler(torch.utils.data.Sampler):
""" Simulate a Dataset Sampled with Replacement """
def __init__(self, indices, real_ds_size):
self.size = real_ds_size
self.indices = indices
weights = torch.zeros(size=(self.size,)).float()
weights[self.indices] = 1
self.weights = weights
# do this here so that each epoch sees same sample dist
samples = torch.multinomial(weights, self.size, replacement=True)
self.samples = samples
unique_samples = samples.unique()
counts = samples.bincount().bincount()
assert (counts * torch.arange(counts.size(0)))[1:].sum().item() == self.size
print(counts, unique_samples.size(0), self.indices.size(0))
for ss in unique_samples:
assert (ss == unique_samples).sum() > 0
def __iter__(self):
samples = self.samples
samples = samples[torch.randperm(samples.size(0))]
# RESAMPLING
samples = torch.multinomial(self.weights, self.size, replacement=True)
for sample in samples:
yield sample.item()
class Bagging(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs).mean(0)
return out
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
models = [self.build_initial_net(task,**model_args) for _ in range(self.config.k)]
n_params = sum(np.prod(x.shape) for x in models[0].parameters())
print(f'new model has {n_params} params')
else:
# get the last k models
models = [copy.deepcopy(self.models[-i]) for i in range(self.config.k)]
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, models[0]) * self.config.k
# Creating datasets and loaders
training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
ds_len = len(training_loader.dataset)
training_loaders = []
# build boosted loaders
for _ in range(self.config.k):
all_idx = torch.arange(ds_len)
idx = torch.multinomial(
torch.ones_like(all_idx).float(),
int(self.config.subsample_p * ds_len),
replacement=False
)
sampler = BaggingSampler(idx, ds_len)
loader = torch.utils.data.DataLoader(
training_loader.dataset,
batch_size=training_loader.batch_size,
sampler=sampler
)
training_loaders += [loader]
best_models = [copy.deepcopy(model) for model in models]
best_losses, best_accs = [1e10] * self.config.k, [0] * self.config.k
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
device=self.config.device
models = [model.to(device) for model in models]
optimizer = self.get_optimizer(chain(*[model.parameters() for model in models]))
#Launching training procedure
logger.message("Start training for " + str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
[model.train() for model in models]
# Keep a single track of these for now
training_loss=0.0
training_accuracy=0.0
n=0
for i, items in enumerate(zip(*training_loaders)):
xs, ys = [], []
for item in items:
x, y = item
x, y = x.to(device), y.to(device)
xs += [train_aug(x)]
ys += [y]
xs = torch.stack(xs)
ys = torch.stack(ys)
loss, acc = 0, 0
for model_idx in range(self.config.k):
model, x, y = models[model_idx], xs[model_idx], ys[model_idx]
predicted = model(x)
loss += F.cross_entropy(predicted,y)
nb_ok = predicted.max(1)[1].eq(y).float().sum().item()
acc += nb_ok/x.size()[0]
accuracy = acc / self.config.k
loss_ = loss.item() / self.config.k
training_accuracy += accuracy
training_loss += loss_
n += xs.size(1)
n_fwd_samples += xs.size(1)
logger.add_scalar("train/loss",loss_,iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
#Validation
training_accuracy /= i
training_loss /= i
outs = [self._validation_loop(model,device,validation_loader) for model in models]
validation_losses = [x['loss'] for x in outs]
validation_accuracies = [x['accuracy'] for x in outs]
validation_loss, validation_accuracy = np.mean(validation_losses), np.mean(validation_accuracies)
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
for model_idx in range(self.config.k):
if validation_accuracies[model_idx] > best_accs[model_idx]:
print("\tFound best model at epoch ",epoch, '\t', model_idx)
best_models[model_idx].load_state_dict(_state_dict(models[model_idx],"cpu"))
best_accs[model_idx] = validation_accuracies[model_idx]
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
for best_model in best_models:
self.models.append(best_model)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', np.mean(best_accs), 0)
return self
|
alma-main
|
crlapi/sl/clmodels/bagging.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import time
import copy
import numpy as np
from pydoc import locate
class IndexDataset(torch.utils.data.Dataset):
""" Wrapper that additionally returns the index for each sample """
def __init__(self, og_dataset):
self.og_dataset = og_dataset
def __getitem__(self, index):
data, target = self.og_dataset[index]
return data, target, index
def __len__(self):
return len(self.og_dataset)
class BoostingSampler(torch.utils.data.Sampler):
""" Upsample points based on sample weight """
def __init__(self, weights):
self.weights = weights
def __iter__(self):
assert -1e-5 < self.weights.sum().item() - 1 < 1e-5
samples = torch.multinomial(self.weights, self.weights.size(0), replacement=True)
if not hasattr(self, 'epoch'):
print('sampling with replacement counts', samples.bincount().bincount())
self.epoch = 0
else:
self.epoch += 1
for sample in samples:
yield sample.item()
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class AdaBoost(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
self.prog_pred_stats = []
def get_prediction_net(self,task):
for i, model in enumerate(self.models):
model.eval()
self.models[i] = model.to(self.config.device)
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs) * self.model_alphas.reshape((-1,) + (1,) * outs[0].ndim)
u_out = torch.stack(outs)
print('diff weighted / unw', (out.sum(0).argmax(-1) != u_out.sum(0).argmax(-1)).float().mean())
prog_pred = out.cumsum(0).argmax(-1)
diff_from_last = prog_pred[-1, :] != prog_pred
diff_from_last = torch.flip(diff_from_last, dims=(0,)) # n_models, bs : with oldest model as idx = 0
last_conseq_steps_with_same_pred = (diff_from_last.int().cumsum(0) == 0).int().sum(0)
useful_steps = len(self.models) - last_conseq_steps_with_same_pred + 1
self.prog_pred_stats += [useful_steps.bincount(minlength=len(self.models) + 1)]
return out.sum(0)
"""
# --- actually let's pick the most confident model
out = u_out
max_prob = F.softmax(out,-1).max(-1)[0] # n_models, BS
model_idx = max_prob.argmax(0) #BS,
N_CLS = out.size(-1)
idx = torch.arange(model_idx.size(0)).cuda() * len(self.models) + model_idx
out = out.transpose(1,0) # BS, n_models, C
out = out.reshape(-1,N_CLS)[idx].reshape(-1,N_CLS)
return out
# ----
#return u_out.sum(0)
#return out.sum(0)
"""
def weighted_validation_loop(self,net,device, dataloader, weights):
""" weight loss and accuracy using sample specific weights """
net = net.eval()
# Return indices for the dataset
loader = torch.utils.data.DataLoader(
IndexDataset(dataloader.dataset),
batch_size=dataloader.batch_size,
shuffle=False
)
ds_len = len(dataloader.dataset)
with torch.no_grad():
loss_values=[]
acc = 0
for i, (x,y,idx) in enumerate(loader):
x, y, idx = x.to(device),y.to(device), idx.to(device)
weight_x = weights[idx]
predicted=net(x)
loss = F.cross_entropy(predicted,y, reduction='none')
loss = (loss * weight_x).mean() * ds_len
loss_values.append(loss.item())
acc += (predicted.argmax(1).eq(y).float() * weight_x).sum()
loss=np.mean(loss_values)
net = net.train()
return {"loss":loss,"accuracy":acc.item()}
def _all_validation_loop(self, device, dataloader,task):
""" weight loss and accuracy using sample specific weights """
self.get_prediction_net(task)
ds_len = len(dataloader.dataset)
acc = 0
with torch.no_grad():
loss_values=[]
acc = 0
for i, (x,y) in enumerate(dataloader):
x, y= x.to(device),y.to(device)
out = []
for model in self.models:
out += [model(x)]
out = torch.stack(out).argmax(-1)
acc += (out == y.view(1,-1)).int().max(0)[0].float().sum().item()
return acc / ds_len
def compute_errors(self, loader, models):
""" given a loader and a list of models, returns a per_model x per_sample error matrix """
unshuffled_loader = torch.utils.data.DataLoader(
loader.dataset, batch_size=loader.batch_size, drop_last=False, shuffle=False)
device=self.config.device
# --- Upweighting
all_errs = []
# eval mode
[x.eval() for x in models]
with torch.no_grad():
for x, y in unshuffled_loader:
x, y = x.to(device), y.to(device)
for i, model in enumerate(models):
if i == 0:
err = [~model(x).argmax(1).eq(y)]
else:
err += [~model(x).argmax(1).eq(y)]
err = torch.stack(err) # n_models, bs
all_errs += [err]
all_errs = torch.cat(all_errs, dim=1).float() # n_models, DS
return all_errs
def compute_model_and_sample_weights(self, err_matrix, task):
""" compound sample and model models w.r.t to each model's performance """
n_models, ds_len = err_matrix.size()
sample_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
model_alphas = []
for model_idx in range(n_models):
model_err = err_matrix[model_idx]
weighted_model_err = (sample_weights * model_err).sum() / sample_weights.sum()
model_alpha = torch.log((1 - weighted_model_err) / weighted_model_err) + np.log(task.n_classes - 1)
model_alphas += [model_alpha.reshape(1)]
sample_weights = sample_weights * torch.exp(model_alpha * model_err)
sample_weights /= sample_weights.sum()
return sample_weights, model_alphas
def update(self, task, logger):
""" train model on new MB """
task_id = task.task_descriptor()
assert isinstance(task_id, int)
self.validation_outputs = None
if task_id == 0 or self.config.init == 'scratch':
# create model
model_args = self.config.model
model = self.build_initial_net(task,**model_args)
elif self.config.init == 'last':
model = copy.deepcopy(self.models[-1])
elif self.config.init == 'first':
model = copy.deepcopy(self.models[0])
# Creating datasets and loaders
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
og_training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
ds_len = len(og_training_loader.dataset)
# --- get per sample weights
if task_id == 0:
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(model)
print(f'new model has {n_params} params')
sample_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
model_alphas = []
err_matrix = val_err_matrix = None
else:
err_matrix = self.compute_errors(og_training_loader, self.models)
sample_weights, model_alphas = self.compute_model_and_sample_weights(err_matrix, task)
val_err_matrix = self.compute_errors(validation_loader, self.models)
val_sample_weights, val_model_alphas = self.compute_model_and_sample_weights(val_err_matrix, task)
print('tr sample weights',torch.multinomial(sample_weights, sample_weights.size(0), replacement=True).bincount().bincount())
print('val sample weights',torch.multinomial(val_sample_weights, val_sample_weights.size(0), replacement=True).bincount().bincount())
if self.config.compute_model_weights_on_val:
model_alphas = val_model_alphas
if self.config.boosting == 'weighting' or task_id == 0:
# sample normally, but weight each point
sampler = None
training_weights = sample_weights
#like ensembleat thispoint
#print('UNIFORM WEIGHTS')
#training_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
elif self.config.boosting == 'sampling':
# oversample points with high weight --> no need to upweight them
# print('UNIFORM WEIGHTS')
#sample_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
sampler = BoostingSampler(sample_weights)
training_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
else:
raise ValueError
# Return indices for the dataset
training_loader = torch.utils.data.DataLoader(
IndexDataset(og_training_loader.dataset),
batch_size=og_training_loader.batch_size,
shuffle=sampler is None,
sampler=sampler
)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
# Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration = 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
# Training loop
training_loss=0.0
training_accuracy=0.0
n=0
start = time.time()
for i, (raw_x, y, idx) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
weight_x = training_weights[idx]
n += y.size(0)
# apply transformations
x = train_aug(raw_x)
predicted = model(x)
loss = F.cross_entropy(predicted, y, reduction='none')
loss = (loss * weight_x).mean() * ds_len
nb_ok = predicted.argmax(1).eq(y).sum().item()
accuracy = nb_ok/x.size(0)
training_accuracy += nb_ok
training_loss += loss.item()
logger.add_scalar("train/loss",loss.item(), iteration)
logger.add_scalar("train/accuracy",accuracy, iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
# Validation
epoch_time = time.time() - start
training_accuracy /= n
training_loss /= n
if task_id == 0 or self.config.validation == 'normal':
out=self._validation_loop(model,device,validation_loader)
elif self.config.validation == 'weighted':
out=self.weighted_validation_loop(model,device,validation_loader, val_sample_weights)
else:
raise ValueError
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar('training/one_epoch_time', epoch_time, epoch)
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}\t Time {epoch_time:.4f}")
if patience_count == patience:
break
# Store best model
self.models.append(best_model)
# Before making predictions, we need to calculate the weight of the new model
if self.config.compute_model_weights_on_val:
final_loader, err_mat = validation_loader, val_err_matrix
else:
final_loader, err_mat = og_training_loader, err_matrix
new_model_err = self.compute_errors(final_loader, [best_model]) # (1, DS)
if err_mat is not None:
err_mat = torch.cat((err_mat, new_model_err))
else:
err_mat = new_model_err
_, model_alphas = self.compute_model_and_sample_weights(err_mat, task)
self.model_alphas = torch.cat(model_alphas)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
best=self._all_validation_loop(device,validation_loader,task)
print('among best ', best)
pred_stats = torch.stack(self.prog_pred_stats).sum(0).float()
pred_stats /= pred_stats.sum()
print('model weights', self.model_alphas)
print('pred stats', pred_stats)
for i in range(pred_stats.size(0)):
logger.add_scalar('model/prediction_depth', pred_stats[i].item(), i)
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6, 0)
logger.add_scalar('train/total_megaflops', n * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
alma-main
|
crlapi/sl/clmodels/boosting.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
import numpy as np
from pydoc import locate
from random import shuffle
from crlapi.core import CLModel
from fvcore.nn import FlopCountAnalysis as FCA
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi import instantiate_class,get_class,get_arguments
import torch.utils.data
class SupervisedCLModel(CLModel, nn.Module):
""" A CLmodel based on a pytorch model, for supervised task over dataset
Args:
CLModel ([type]): [description]
"""
def __init__(self):
nn.Module.__init__(self)
self.memory_training_set=None
self.memory_validation_set=None
def update(self, task, logger):
raise NotImplementedError
def get_prediction_net(self,task):
raise NotImplementedError
def count_flops(self, task, model=None):
if model is None:
model = self.get_prediction_model(task)
# don't mess up BN stats!
model = model.eval()
input = torch.FloatTensor(size=(1, *task.input_shape)).to(self.config['device']).normal_()
model = model.to(self.config['device'])
flops = FCA(model, input).total()
return flops
def _validation_loop(self,net,device,dataloader):
net = net.eval()
net.to(device)
with torch.no_grad():
loss_values=[]
nb_ok=0
nb_total=0
for x,y in dataloader:
x,y=x.to(device),y.to(device)
predicted=net(x)
loss=F.cross_entropy(predicted,y)
loss_values.append(loss.item())
nb_ok+=predicted.max(1)[1].eq(y).float().sum().item()
nb_total+=x.size()[0]
loss=np.mean(loss_values)
accuracy=nb_ok/nb_total
net = net.train()
return {"loss":loss,"accuracy":accuracy}
def evaluate(self,task,logger,evaluation_args):
logger.message("Evaluating...")
evaluation_dataset = task.task_resources().make()
#Building dataloader for both
evaluation_loader = torch.utils.data.DataLoader(
evaluation_dataset,
batch_size=evaluation_args["batch_size"],
num_workers=evaluation_args["num_workers"],
)
# TODO: is deepcopy here necessary ?
evaluation_model=copy.deepcopy(self.get_prediction_net(task))
evaluation_model.eval()
device=evaluation_args["device"]
evaluation_model.to(device)
with torch.no_grad():
loss_values=[]
nb_ok=0
nb_total=0
for x,y in evaluation_loader:
x,y=x.to(device),y.to(device)
predicted=evaluation_model(x)
loss=F.cross_entropy(predicted,y).item()
nb_ok+=predicted.max(1)[1].eq(y).float().sum().item()
nb_total+=x.size()[0]
loss_values.append(loss)
evaluation_loss=np.mean(loss_values)
accuracy=nb_ok/nb_total
r={"loss":evaluation_loss,"accuracy":accuracy}
logger.debug(str(r))
return r
def build_initial_net(self,task,**model_args):
from importlib import import_module
classname=model_args["class_name"]
del model_args["class_name"]
module_path, class_name = classname.rsplit(".", 1)
module = import_module(module_path)
c = getattr(module, class_name)
return c(task, **model_args)
# -- Helpers
def get_train_and_validation_loaders(self, dataset):
val_size = int(len(dataset) * self.config.validation_proportion)
tr_size = len(dataset) - val_size
training_dataset, validation_dataset = torch.utils.data.random_split(dataset, [tr_size, val_size])
if self.config.train_replay_proportion>0.0:
if not self.memory_training_set is None:
l=int(len(self.memory_training_set)*self.config.train_replay_proportion)
m,_= torch.utils.data.random_split(self.memory_training_set,[l,len(self.memory_training_set)-l])
training_dataset=torch.utils.data.ConcatDataset([training_dataset,m])
if self.config.validation_replay_proportion>0.0:
if not self.memory_validation_set is None:
l=int(len(self.memory_validation_set)*self.config.validation_replay_proportion)
m,_= torch.utils.data.random_split(self.memory_validation_set,[l,len(self.memory_validation_set)-l])
validation_dataset=torch.utils.data.ConcatDataset([validation_dataset,m])
print("Training set size = ",len(training_dataset))
print("Validation set size = ",len(validation_dataset))
self.memory_training_set=training_dataset
self.memory_validation_set=validation_dataset
training_loader = torch.utils.data.DataLoader(
training_dataset,
batch_size=self.config.training_batch_size,
num_workers=self.config.training_num_workers,
persistent_workers=self.config.training_num_workers>0,
shuffle=True,
# pin_memory=self.config['device'] != 'cpu'
)
validation_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=self.config.validation_batch_size,
num_workers=self.config.validation_num_workers,
persistent_workers=self.config.validation_num_workers>0,
shuffle=False,
# pin_memory=self.config['device'] != 'cpu'
)
return training_loader,validation_loader
def get_optimizer(self, model_params):
c=get_class(self.config.optim)
args=get_arguments(self.config.optim)
return c(model_params,**args)
def get_train_augs(self):
if self.config.get('kornia_augs', None) is not None:
tfs = []
import kornia
for tf_cfg in self.config['kornia_augs']:
tf = locate(f'kornia.augmentation.{tf_cfg.name}')
args = dict(tf_cfg)
args.pop('name')
tfs += [tf(**args)]
tfs = nn.Sequential(*tfs)
else:
tfs = nn.Identity()
tfs = tfs.to(self.config['device'])
return tfs
|
alma-main
|
crlapi/sl/clmodels/core.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Ensemble(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
print(f'voting {self.config.vote}')
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs)
if self.config.vote:
votes = out.argmax(-1)
oh_votes = F.one_hot(votes, dim=-1, num_classes=out.size(-1))
vote_count = oh_votes.sum(0).float()
most_confident = out.max(0)[0].max(-1)[1]
# Break ties
vote_count[torch.arange(out.size(0)), most_confident] += 0.1
out = vote_count
else:
out = out.mean(0)
return out
def _all_validation_loop(self, device, dataloader,task):
""" weight loss and accuracy using sample specific weights """
self.get_prediction_net(task)
ds_len = len(dataloader.dataset)
acc = 0
with torch.no_grad():
loss_values=[]
acc = 0
for i, (x,y) in enumerate(dataloader):
x, y= x.to(device),y.to(device)
out = []
for model in self.models:
out += [model(x)]
out = torch.stack(out).argmax(-1)
acc += (out == y.view(1,-1)).int().max(0)[0].float().sum().item()
return acc / ds_len
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(f'new model has {n_params} params')
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
#Training loop
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
n+=raw_x.size()[0]
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience:
break
self.models.append(best_model)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
best=self._all_validation_loop(device,validation_loader,task)
print('among best ', best)
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
alma-main
|
crlapi/sl/clmodels/ensemble.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Ensemble(Finetune):
def get_prediction_net(self,task):
for i, model in enumerate(self.models):
model.eval()
self.models[i] = model.to(self.config.device)
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs)
out = out.mean(0)
return out
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(f'new model has {n_params} params')
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
# Creating datasets and loaders
training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
prev_sample_weights = None
to_print = []
for around in range(self.config.n_rounds):
# start new round with the best model of the last one
if around > 0:
model = copy.deepcopy(best_model)
best_model = copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
epoch = 0
while True: # Run until convergence
epoch += 1
# Make sure model is ready for train
model.train()
# Training loop
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
n+=raw_x.size()[0]
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
# Validation
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print(f"\t Round {around}. Found best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience or epoch == self.config.max_epochs:
break
self.models.append(best_model)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
to_print += [fill(accs) + '\t' + str(ensemble)]
for item in to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
alma-main
|
crlapi/sl/clmodels/debug_ensemble.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
from itertools import chain
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class KEnsemble(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
print(f'voting {self.config.vote}')
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs)
out = F.softmax(out, dim=-1)
if self.config.vote:
votes = out.argmax(-1)
oh_votes = F.one_hot(votes, num_classes=out.size(-1))
vote_count = oh_votes.sum(0).float()
most_confident = out.max(0)[0].max(-1)[1]
# Break ties
vote_count[torch.arange(vote_count.size(0)), most_confident] += 0.1
out = vote_count
else:
out = out.mean(0)
return out
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
models = [self.build_initial_net(task,**model_args) for _ in range(self.config.k)]
n_params = sum(np.prod(x.shape) for x in models[0].parameters())
print(f'new model has {n_params} params')
else:
# get the last k models
models = [copy.deepcopy(model) for model in self.models]
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, models[0]) * self.config.k
# Creating datasets and loaders
training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
training_loaders = []
for i in range(self.config.k):
training_loaders += [torch.utils.data.DataLoader(
training_loader.dataset,
batch_size=training_loader.batch_size,
shuffle=True
)]
best_models = [copy.deepcopy(model) for model in models]
best_losses, best_accs = [1e10] * self.config.k, [0] * self.config.k
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
models = [model.to(device) for model in models]
optimizer = self.get_optimizer(chain(*[model.parameters() for model in models]))
#Launching training procedure
logger.message("Start training for " + str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
[model.train() for model in models]
# Keep a single track of these for now
training_loss=0.0
training_accuracy=0.0
n=0
for i, items in enumerate(zip(*training_loaders)):
xs, ys = [], []
for item in items:
x, y = item
x, y = x.to(device), y.to(device)
xs += [train_aug(x)]
ys += [y]
xs = torch.stack(xs)
ys = torch.stack(ys)
loss, acc = 0, 0
for model_idx in range(self.config.k):
model, x, y = models[model_idx], xs[model_idx], ys[model_idx]
predicted = model(x)
loss += F.cross_entropy(predicted,y)
nb_ok = predicted.max(1)[1].eq(y).float().sum().item()
acc += nb_ok/x.size()[0]
accuracy = acc / self.config.k
loss_ = loss.item() / self.config.k
training_accuracy += accuracy
training_loss += loss_
n += xs.size(1)
n_fwd_samples += xs.size(1)
logger.add_scalar("train/loss",loss_,iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
#Validation
training_accuracy /= i
training_loss /= i
outs = [self._validation_loop(model,device,validation_loader) for model in models]
validation_losses = [x['loss'] for x in outs]
validation_accuracies = [x['accuracy'] for x in outs]
validation_loss, validation_accuracy = np.mean(validation_losses), np.mean(validation_accuracies)
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
found_best = False
for model_idx in range(self.config.k):
if validation_accuracies[model_idx] > best_accs[model_idx]:
print("\tFound best model at epoch ",epoch, '\t', model_idx)
best_models[model_idx].load_state_dict(_state_dict(models[model_idx],"cpu"))
best_accs[model_idx] = validation_accuracies[model_idx]
found_best = True
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if found_best:
patience_count = 0
else:
patience_count += 1
if patience_count == patience:
break
# overwrite the best models
self.models = nn.ModuleList(best_models)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
# TODO: FIX! this is wrong
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', np.mean(best_accs), 0)
return self
|
alma-main
|
crlapi/sl/clmodels/k_ensemble.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
import torch
import numpy as np
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.core import SupervisedCLModel
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Finetune_Grow(SupervisedCLModel):
def __init__(self, stream, clmodel_args):
super().__init__()
self.models=[]
self.config=clmodel_args
def get_prediction_net(self,task):
if task.task_descriptor() is None:
model = self.models[-1]
else:
model = self.models[task.task_descriptor]
model.eval()
return model
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
if len(self.models)==0:
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
elif (task.task_descriptor() % self.config['grow_every']) == 0:
print('growing')
model=copy.deepcopy(self.models[task.task_descriptor()-1])
model=model.grow(validation_loader,**self.config)
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
if getattr(self.config, 'init_from_scratch', False):
print('re-initializing the model')
def weight_reset(m):
try: m.reset_parameters()
except: pass
model.apply(weight_reset)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
flops_per_input = self.count_flops(task, model)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
#Training loop
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Loss {validation_loss:.4f}")
if patience_count == patience:
break
self.models.append(best_model)
logger.message("Training Done...")
logger.add_scalar('train/model_params', sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6, 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
logger.message("Training Done...")
return self
|
alma-main
|
crlapi/sl/clmodels/finetune_grow.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.core import SupervisedCLModel
import time
import copy
import numpy as np
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Finetune(SupervisedCLModel):
def __init__(self, stream, clmodel_args):
super().__init__()
self.models = nn.ModuleList()
self.config=clmodel_args
def get_prediction_net(self,task):
if task.task_descriptor() is None:
model = self.models[-1]
else:
model = self.models[task.task_descriptor]
model.eval()
return model
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(model)
print(f'new model has {n_params} params')
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
#Training loop
training_loss=0.0
training_accuracy=0.0
n=0
start = time.time()
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
n+=raw_x.size()[0]
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
epoch_time = time.time() - start
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar('training/one_epoch_time', epoch_time, epoch)
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}\t Time {epoch_time:.4f}")
if patience_count == patience:
break
self.models.append(best_model)
logger.message("Training Done...")
logger.add_scalar('train/model_params', sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6, 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
alma-main
|
crlapi/sl/clmodels/finetune.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import re
from datetime import datetime
import requests
def get_ad_archive_id(data):
"""
Extract ad_archive_id from ad_snapshot_url
"""
return re.search(r"/\?id=([0-9]+)", data["ad_snapshot_url"]).group(1)
class FbAdsLibraryTraversal:
default_url_pattern = (
"https://graph.facebook.com/{}/ads_archive?access_token={}&"
+ "fields={}&search_terms={}&ad_reached_countries={}&search_page_ids={}&"
+ "ad_active_status={}&limit={}"
)
default_api_version = "v14.0"
def __init__(
self,
access_token,
fields,
search_term,
country,
search_page_ids="",
ad_active_status="ALL",
after_date="1970-01-01",
page_limit=500,
api_version=None,
retry_limit=3,
):
self.page_count = 0
self.access_token = access_token
self.fields = fields
self.search_term = search_term
self.country = country
self.after_date = after_date
self.search_page_ids = search_page_ids
self.ad_active_status = ad_active_status
self.page_limit = page_limit
self.retry_limit = retry_limit
if api_version is None:
self.api_version = self.default_api_version
else:
self.api_version = api_version
def generate_ad_archives(self):
next_page_url = self.default_url_pattern.format(
self.api_version,
self.access_token,
self.fields,
self.search_term,
self.country,
self.search_page_ids,
self.ad_active_status,
self.page_limit,
)
return self.__class__._get_ad_archives_from_url(
next_page_url, after_date=self.after_date, retry_limit=self.retry_limit
)
@staticmethod
def _get_ad_archives_from_url(
next_page_url, after_date="1970-01-01", retry_limit=3
):
last_error_url = None
last_retry_count = 0
start_time_cutoff_after = datetime.strptime(after_date, "%Y-%m-%d").timestamp()
while next_page_url is not None:
response = requests.get(next_page_url)
response_data = json.loads(response.text)
if "error" in response_data:
if next_page_url == last_error_url:
# failed again
if last_retry_count >= retry_limit:
raise Exception(
"Error message: [{}], failed on URL: [{}]".format(
json.dumps(response_data["error"]), next_page_url
)
)
else:
last_error_url = next_page_url
last_retry_count = 0
last_retry_count += 1
continue
filtered = list(
filter(
lambda ad_archive: ("ad_delivery_start_time" in ad_archive)
and (
datetime.strptime(
ad_archive["ad_delivery_start_time"], "%Y-%m-%d"
).timestamp()
>= start_time_cutoff_after
),
response_data["data"],
)
)
if len(filtered) == 0:
# if no data after the after_date, break
next_page_url = None
break
yield filtered
if "paging" in response_data:
next_page_url = response_data["paging"]["next"]
else:
next_page_url = None
@classmethod
def generate_ad_archives_from_url(cls, failure_url, after_date="1970-01-01"):
"""
if we failed from error, later we can just continue from the last failure url
"""
return cls._get_ad_archives_from_url(failure_url, after_date=after_date)
|
Ad-Library-API-Script-Repository-main
|
python/fb_ads_library_api.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
from collections import Counter
def get_operators():
"""
Feel free to add your own 'operator' here;
The input will be:
generator_ad_archives: a generator of array of ad_archvie
args: extra arguments passed in from CLI
is_verbose: check this for debugging information
"""
return {
"count": count_ads,
"save": save_to_file,
"save_to_csv": save_to_csv,
"start_time_trending": count_start_time_trending,
}
def count_ads(generator_ad_archives, args, is_verbose=False):
"""
Count how many ad_archives match your query
"""
count = 0
for ad_archives in generator_ad_archives:
count += len(ad_archives)
if is_verbose:
print("counting %d" % count)
print("Total number of ads match the query: {}".format(count))
def save_to_file(generator_ad_archives, args, is_verbose=False):
"""
Save all retrieved ad_archives to the file; each ad_archive will be
stored in JSON format in a single line;
"""
if len(args) != 1:
raise Exception("save action requires exact 1 param: output file")
with open(args[0], "w+") as file:
count = 0
for ad_archives in generator_ad_archives:
for data in ad_archives:
file.write(json.dumps(data))
file.write("\n")
count += len(ad_archives)
if is_verbose:
print("Items wrote: %d" % count)
print("Total number of ads wrote: %d" % count)
def save_to_csv(generator_ad_archives, args, fields, is_verbose=False):
"""
Save all retrieved ad_archives to the output file. Each ad_archive will be
stored as a row in the CSV
"""
if len(args) != 1:
raise Exception("save_to_csv action takes 1 argument: output_file")
delimiter = ","
total_count = 0
output = fields + "\n"
output_file = args[0]
for ad_archives in generator_ad_archives:
total_count += len(ad_archives)
if is_verbose:
print("Items processed: %d" % total_count)
for ad_archive in ad_archives:
for field in list(fields.split(delimiter)):
if field in ad_archive:
value = ad_archive[field]
if (type(value) == list and type(value[0]) == dict) or type(
value
) == dict:
value = json.dumps(value)
elif type(value) == list:
value = delimiter.join(value)
output += (
'"' + value.replace("\n", "").replace('"', "") + '"' + delimiter
)
else:
output += delimiter
output = output.rstrip(",") + "\n"
with open(output_file, "w") as csvfile:
csvfile.write(output)
print("Successfully wrote data to file: %s" % output_file)
def count_start_time_trending(generator_ad_archives, args, is_verbose=False):
"""
output the count trending of ads by start date;
Accept one parameters:
output_file: path to write the csv
"""
if len(args) != 1:
raise Exception("start_time_trending action takes 1 arguments: output_file")
total_count = 0
output_file = args[0]
date_to_count = Counter({})
for ad_archives in generator_ad_archives:
total_count += len(ad_archives)
if is_verbose:
print("Item processed: %d" % total_count)
start_dates = list(
map(
lambda data: datetime.datetime.strptime(
data["ad_delivery_start_time"], "%Y-%m-%d"
).strftime("%Y-%m-%d"),
ad_archives,
)
)
date_to_count.update(start_dates)
with open(output_file, "w") as csvfile:
csvfile.write("date, count\n")
for date in date_to_count.keys():
csvfile.write("%s, %s\n" % (date, date_to_count[date]))
print("Successfully wrote data to file: %s" % output_file)
|
Ad-Library-API-Script-Repository-main
|
python/fb_ads_library_api_operators.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from iso3166 import countries
supported_countries = [
"AT",
"BE",
"BG",
"CA",
"CY",
"CZ",
"DE",
"DK",
"EE",
"ES",
"FI",
"FR",
"GB",
"GR",
"HR",
"HU",
"IE",
"IL",
"IN",
"IT",
"LT",
"LU",
"LV",
"MT",
"NL",
"PL",
"PT",
"RO",
"SE",
"SI",
"SK",
"UA",
"US",
]
valid_query_fields = [
"ad_creation_time",
"ad_creative_body",
"ad_creative_bodies",
"ad_creative_link_caption",
"ad_creative_link_captions",
"ad_creative_link_description",
"ad_creative_link_descriptions",
"ad_creative_link_title",
"ad_creative_link_titles",
"ad_delivery_start_time",
"ad_delivery_stop_time",
"ad_snapshot_url",
"currency",
"delivery_by_region",
"demographic_distribution",
"bylines",
"id",
"impressions",
"languages",
"page_id",
"page_name",
"potential_reach",
"publisher_platforms",
"region_distribution",
"spend",
]
def get_country_code(country_str):
"""
Convert the country input to valid country code
"""
global supported_countries
try:
country = countries.get(country_str)
except Exception:
country = None
if not country or country.alpha2 not in supported_countries:
return None
return country.alpha2
def is_valid_fields(field):
"""
The Facebook Ads Library API has a list of supported fields
"""
global valid_query_fields
return field in valid_query_fields
|
Ad-Library-API-Script-Repository-main
|
python/fb_ads_library_api_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from fb_ads_library_api import FbAdsLibraryTraversal
from fb_ads_library_api_operators import get_operators, save_to_csv
from fb_ads_library_api_utils import get_country_code, is_valid_fields
def get_parser():
parser = argparse.ArgumentParser(
description="The Facebook Ads Library API CLI Utility"
)
parser.add_argument(
"-t",
"--access-token",
help="The Facebook developer access token",
required=True,
)
parser.add_argument(
"-f",
"--fields",
help="Fields to retrieve from the Ad Library API",
required=True,
type=validate_fields_param,
)
parser.add_argument("-s", "--search-term", help="The term you want to search for")
parser.add_argument(
"-c",
"--country",
help="Comma-separated country code (no spaces)",
required=True,
type=validate_country_param,
)
parser.add_argument(
"--search-page-ids", help="The specific Facebook Page you want to search"
)
parser.add_argument(
"--ad-active-status",
help="Filter by the current status of the ads at the moment the script runs",
)
parser.add_argument(
"--after-date", help="Only return ads that started delivery after this date"
)
parser.add_argument("--batch-size", type=int, help="Batch size")
parser.add_argument(
"--retry-limit",
type=int,
help="When an error occurs, the script will abort if it fails to get the same batch this amount of times",
)
parser.add_argument("-v", "--verbose", action="store_true")
actions = ",".join(get_operators().keys())
parser.add_argument(
"action", help="Action to take on the ads, possible values: %s" % actions
)
parser.add_argument(
"args", nargs=argparse.REMAINDER, help="The parameter for the specific action"
)
return parser
def validate_country_param(country_input):
if not country_input:
return ""
country_list = list(filter(lambda x: x.strip(), country_input.split(",")))
if not country_list:
raise argparse.ArgumentTypeError("Country cannot be empty")
valid_country_codes = list(map(lambda x: get_country_code(x), country_list))
invalid_inputs = {
key: value
for (key, value) in zip(country_list, valid_country_codes)
if value is None
}
if invalid_inputs:
raise argparse.ArgumentTypeError(
"Invalid/unsupported country code: %s" % (",".join(invalid_inputs.keys()))
)
else:
return ",".join(valid_country_codes)
def validate_fields_param(fields_input):
if not fields_input:
return False
fields_list = list(
filter(lambda x: x, map(lambda x: x.strip(), fields_input.split(",")))
)
if not fields_list:
raise argparse.ArgumentTypeError("Fields cannot be empty")
invalid_fields = list(filter(lambda x: not is_valid_fields(x), fields_list))
if not invalid_fields:
return ",".join(fields_list)
else:
raise argparse.ArgumentTypeError(
"Unsupported fields: %s" % (",".join(invalid_fields))
)
def main():
parser = get_parser()
opts = parser.parse_args()
if not opts.search_term and not opts.search_page_ids:
print("At least one must be set: --search-term, --search-page-ids")
sys.exit(1)
if not opts.search_term:
search_term = "."
else:
search_term = opts.search_term
api = FbAdsLibraryTraversal(
opts.access_token, opts.fields, search_term, opts.country
)
if opts.search_page_ids:
api.search_page_ids = opts.search_page_ids
if opts.ad_active_status:
api.ad_active_status = opts.ad_active_status
if opts.batch_size:
api.page_limit = opts.batch_size
if opts.retry_limit:
api.retry_limit = opts.retry_limit
if opts.after_date:
api.after_date = opts.after_date
generator_ad_archives = api.generate_ad_archives()
if opts.action in get_operators():
if opts.action == "save_to_csv":
save_to_csv(
generator_ad_archives, opts.args, opts.fields, is_verbose=opts.verbose
)
else:
get_operators()[opts.action](
generator_ad_archives, opts.args, is_verbose=opts.verbose
)
else:
print("Invalid 'action' value: %s" % opts.action)
sys.exit(1)
if __name__ == "__main__":
main()
|
Ad-Library-API-Script-Repository-main
|
python/fb_ads_library_api_cli.py
|
import argparse
from utils.train_utils import add_flags_from_config
config_args = {
'training_config': {
'lr': (0.01, 'learning rate'),
'dropout': (0.0, 'dropout probability'),
'cuda': (-1, 'which cuda device to use (-1 for cpu training)'),
'epochs': (5000, 'maximum number of epochs to train for'),
'weight-decay': (0., 'l2 regularization strength'),
'optimizer': ('Adam', 'which optimizer to use, can be any of [Adam, RiemannianAdam]'),
'momentum': (0.999, 'momentum in optimizer'),
'patience': (100, 'patience for early stopping'),
'seed': (1234, 'seed for training'),
'log-freq': (1, 'how often to compute print train/val metrics (in epochs)'),
'eval-freq': (1, 'how often to compute val metrics (in epochs)'),
'save': (0, '1 to save model and logs and 0 otherwise'),
'save-dir': (None, 'path to save training logs and model weights (defaults to logs/task/date/run/)'),
'sweep-c': (0, ''),
'lr-reduce-freq': (None, 'reduce lr every lr-reduce-freq or None to keep lr constant'),
'gamma': (0.5, 'gamma for lr scheduler'),
'print-epoch': (True, ''),
'grad-clip': (None, 'max norm for gradient clipping, or None for no gradient clipping'),
'min-epochs': (100, 'do not early stop before min-epochs')
},
'model_config': {
'task': ('nc', 'which tasks to train on, can be any of [lp, nc]'),
'model': ('GCN', 'which encoder to use, can be any of [Shallow, MLP, HNN, GCN, GAT, HyperGCN]'),
'dim': (128, 'embedding dimension'),
'manifold': ('Euclidean', 'which manifold to use, can be any of [Euclidean, Hyperboloid, PoincareBall]'),
'c': (1.0, 'hyperbolic radius, set to None for trainable curvature'),
'r': (2., 'fermi-dirac decoder parameter for lp'),
't': (1., 'fermi-dirac decoder parameter for lp'),
'pretrained-embeddings': (None, 'path to pretrained embeddings (.npy file) for Shallow node classification'),
'pos-weight': (0, 'whether to upweight positive class in node classification tasks'),
'num-layers': (2, 'number of hidden layers in encoder'),
'bias': (1, 'whether to use bias (1) or not (0)'),
'act': ('relu', 'which activation function to use (or None for no activation)'),
'n-heads': (4, 'number of attention heads for graph attention networks, must be a divisor dim'),
'alpha': (0.2, 'alpha for leakyrelu in graph attention networks'),
'double-precision': ('0', 'whether to use double precision'),
'use-att': (0, 'whether to use hyperbolic attention or not'),
'local-agg': (0, 'whether to local tangent space aggregation or not')
},
'data_config': {
'dataset': ('cora', 'which dataset to use'),
'val-prop': (0.05, 'proportion of validation edges for link prediction'),
'test-prop': (0.1, 'proportion of test edges for link prediction'),
'use-feats': (1, 'whether to use node features or not'),
'normalize-feats': (1, 'whether to normalize input node features'),
'normalize-adj': (1, 'whether to row-normalize the adjacency matrix'),
'split-seed': (1234, 'seed for data splits (train/test/val)'),
}
}
parser = argparse.ArgumentParser()
for _, config_dict in config_args.items():
parser = add_flags_from_config(parser, config_dict)
|
hgcn-master
|
config.py
|
from __future__ import print_function
from __future__ import division
|
hgcn-master
|
__init__.py
|
from __future__ import division
from __future__ import print_function
import datetime
import json
import logging
import os
import pickle
import time
import numpy as np
import optimizers
import torch
from config import parser
from models.base_models import NCModel, LPModel
from utils.data_utils import load_data
from utils.train_utils import get_dir_name, format_metrics
def train(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if int(args.double_precision):
torch.set_default_dtype(torch.float64)
if int(args.cuda) >= 0:
torch.cuda.manual_seed(args.seed)
args.device = 'cuda:' + str(args.cuda) if int(args.cuda) >= 0 else 'cpu'
args.patience = args.epochs if not args.patience else int(args.patience)
logging.getLogger().setLevel(logging.INFO)
if args.save:
if not args.save_dir:
dt = datetime.datetime.now()
date = f"{dt.year}_{dt.month}_{dt.day}"
models_dir = os.path.join(os.environ['LOG_DIR'], args.task, date)
save_dir = get_dir_name(models_dir)
else:
save_dir = args.save_dir
logging.basicConfig(level=logging.INFO,
handlers=[
logging.FileHandler(os.path.join(save_dir, 'log.txt')),
logging.StreamHandler()
])
logging.info(f'Using: {args.device}')
logging.info("Using seed {}.".format(args.seed))
# Load data
data = load_data(args, os.path.join(os.environ['DATAPATH'], args.dataset))
args.n_nodes, args.feat_dim = data['features'].shape
if args.task == 'nc':
Model = NCModel
args.n_classes = int(data['labels'].max() + 1)
logging.info(f'Num classes: {args.n_classes}')
else:
args.nb_false_edges = len(data['train_edges_false'])
args.nb_edges = len(data['train_edges'])
if args.task == 'lp':
Model = LPModel
else:
Model = RECModel
# No validation for reconstruction task
args.eval_freq = args.epochs + 1
if not args.lr_reduce_freq:
args.lr_reduce_freq = args.epochs
# Model and optimizer
model = Model(args)
logging.info(str(model))
optimizer = getattr(optimizers, args.optimizer)(params=model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=int(args.lr_reduce_freq),
gamma=float(args.gamma)
)
tot_params = sum([np.prod(p.size()) for p in model.parameters()])
logging.info(f"Total number of parameters: {tot_params}")
if args.cuda is not None and int(args.cuda) >= 0 :
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)
model = model.to(args.device)
for x, val in data.items():
if torch.is_tensor(data[x]):
data[x] = data[x].to(args.device)
# Train model
t_total = time.time()
counter = 0
best_val_metrics = model.init_metric_dict()
best_test_metrics = None
best_emb = None
for epoch in range(args.epochs):
t = time.time()
model.train()
optimizer.zero_grad()
embeddings = model.encode(data['features'], data['adj_train_norm'])
train_metrics = model.compute_metrics(embeddings, data, 'train')
train_metrics['loss'].backward()
if args.grad_clip is not None:
max_norm = float(args.grad_clip)
all_params = list(model.parameters())
for param in all_params:
torch.nn.utils.clip_grad_norm_(param, max_norm)
optimizer.step()
lr_scheduler.step()
if (epoch + 1) % args.log_freq == 0:
logging.info(" ".join(['Epoch: {:04d}'.format(epoch + 1),
'lr: {}'.format(lr_scheduler.get_lr()[0]),
format_metrics(train_metrics, 'train'),
'time: {:.4f}s'.format(time.time() - t)
]))
if (epoch + 1) % args.eval_freq == 0:
model.eval()
embeddings = model.encode(data['features'], data['adj_train_norm'])
val_metrics = model.compute_metrics(embeddings, data, 'val')
if (epoch + 1) % args.log_freq == 0:
logging.info(" ".join(['Epoch: {:04d}'.format(epoch + 1), format_metrics(val_metrics, 'val')]))
if model.has_improved(best_val_metrics, val_metrics):
best_test_metrics = model.compute_metrics(embeddings, data, 'test')
best_emb = embeddings.cpu()
if args.save:
np.save(os.path.join(save_dir, 'embeddings.npy'), best_emb.detach().numpy())
best_val_metrics = val_metrics
counter = 0
else:
counter += 1
if counter == args.patience and epoch > args.min_epochs:
logging.info("Early stopping")
break
logging.info("Optimization Finished!")
logging.info("Total time elapsed: {:.4f}s".format(time.time() - t_total))
if not best_test_metrics:
model.eval()
best_emb = model.encode(data['features'], data['adj_train_norm'])
best_test_metrics = model.compute_metrics(best_emb, data, 'test')
logging.info(" ".join(["Val set results:", format_metrics(best_val_metrics, 'val')]))
logging.info(" ".join(["Test set results:", format_metrics(best_test_metrics, 'test')]))
if args.save:
np.save(os.path.join(save_dir, 'embeddings.npy'), best_emb.cpu().detach().numpy())
if hasattr(model.encoder, 'att_adj'):
filename = os.path.join(save_dir, args.dataset + '_att_adj.p')
pickle.dump(model.encoder.att_adj.cpu().to_dense(), open(filename, 'wb'))
print('Dumped attention adj: ' + filename)
json.dump(vars(args), open(os.path.join(save_dir, 'config.json'), 'w'))
torch.save(model.state_dict(), os.path.join(save_dir, 'model.pth'))
logging.info(f"Saved model in {save_dir}")
if __name__ == '__main__':
args = parser.parse_args()
train(args)
|
hgcn-master
|
train.py
|
"""Attention layers (some modules are copied from https://github.com/Diego999/pyGAT."""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class DenseAtt(nn.Module):
def __init__(self, in_features, dropout):
super(DenseAtt, self).__init__()
self.dropout = dropout
self.linear = nn.Linear(2 * in_features, 1, bias=True)
self.in_features = in_features
def forward (self, x, adj):
n = x.size(0)
# n x 1 x d
x_left = torch.unsqueeze(x, 1)
x_left = x_left.expand(-1, n, -1)
# 1 x n x d
x_right = torch.unsqueeze(x, 0)
x_right = x_right.expand(n, -1, -1)
x_cat = torch.cat((x_left, x_right), dim=2)
att_adj = self.linear(x_cat).squeeze()
att_adj = F.sigmoid(att_adj)
att_adj = torch.mul(adj.to_dense(), att_adj)
return att_adj
class SpecialSpmmFunction(torch.autograd.Function):
"""Special function for only sparse region backpropataion layer."""
@staticmethod
def forward(ctx, indices, values, shape, b):
assert indices.requires_grad == False
a = torch.sparse_coo_tensor(indices, values, shape)
ctx.save_for_backward(a, b)
ctx.N = shape[0]
return torch.matmul(a, b)
@staticmethod
def backward(ctx, grad_output):
a, b = ctx.saved_tensors
grad_values = grad_b = None
if ctx.needs_input_grad[1]:
grad_a_dense = grad_output.matmul(b.t())
edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]
grad_values = grad_a_dense.view(-1)[edge_idx]
if ctx.needs_input_grad[3]:
grad_b = a.t().matmul(grad_output)
return None, grad_values, None, grad_b
class SpecialSpmm(nn.Module):
def forward(self, indices, values, shape, b):
return SpecialSpmmFunction.apply(indices, values, shape, b)
class SpGraphAttentionLayer(nn.Module):
"""
Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, activation):
super(SpGraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_normal_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(1, 2 * out_features)))
nn.init.xavier_normal_(self.a.data, gain=1.414)
self.dropout = nn.Dropout(dropout)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.special_spmm = SpecialSpmm()
self.act = activation
def forward(self, input, adj):
N = input.size()[0]
edge = adj._indices()
h = torch.mm(input, self.W)
# h: N x out
assert not torch.isnan(h).any()
# Self-attention on the nodes - Shared attention mechanism
edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()
# edge: 2*D x E
edge_e = torch.exp(-self.leakyrelu(self.a.mm(edge_h).squeeze()))
assert not torch.isnan(edge_e).any()
# edge_e: E
ones = torch.ones(size=(N, 1))
if h.is_cuda:
ones = ones.cuda()
e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), ones)
# e_rowsum: N x 1
edge_e = self.dropout(edge_e)
# edge_e: E
h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)
assert not torch.isnan(h_prime).any()
# h_prime: N x out
h_prime = h_prime.div(e_rowsum)
# h_prime: N x out
assert not torch.isnan(h_prime).any()
return self.act(h_prime)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class GraphAttentionLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout, activation, alpha, nheads, concat):
"""Sparse version of GAT."""
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.output_dim = output_dim
self.attentions = [SpGraphAttentionLayer(input_dim,
output_dim,
dropout=dropout,
alpha=alpha,
activation=activation) for _ in range(nheads)]
self.concat = concat
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, input):
x, adj = input
x = F.dropout(x, self.dropout, training=self.training)
if self.concat:
h = torch.cat([att(x, adj) for att in self.attentions], dim=1)
else:
h_cat = torch.cat([att(x, adj).view((-1, self.output_dim, 1)) for att in self.attentions], dim=2)
h = torch.mean(h_cat, dim=2)
h = F.dropout(h, self.dropout, training=self.training)
return (h, adj)
|
hgcn-master
|
layers/att_layers.py
|
hgcn-master
|
layers/__init__.py
|
|
"""Hyperbolic layers."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.modules.module import Module
from layers.att_layers import DenseAtt
def get_dim_act_curv(args):
"""
Helper function to get dimension and activation at every layer.
:param args:
:return:
"""
if not args.act:
act = lambda x: x
else:
act = getattr(F, args.act)
acts = [act] * (args.num_layers - 1)
dims = [args.feat_dim] + ([args.dim] * (args.num_layers - 1))
if args.task in ['lp', 'rec']:
dims += [args.dim]
acts += [act]
n_curvatures = args.num_layers
else:
n_curvatures = args.num_layers - 1
if args.c is None:
# create list of trainable curvature parameters
curvatures = [nn.Parameter(torch.Tensor([1.])) for _ in range(n_curvatures)]
else:
# fixed curvature
curvatures = [torch.tensor([args.c]) for _ in range(n_curvatures)]
if not args.cuda == -1:
curvatures = [curv.to(args.device) for curv in curvatures]
return dims, acts, curvatures
class HNNLayer(nn.Module):
"""
Hyperbolic neural networks layer.
"""
def __init__(self, manifold, in_features, out_features, c, dropout, act, use_bias):
super(HNNLayer, self).__init__()
self.linear = HypLinear(manifold, in_features, out_features, c, dropout, use_bias)
self.hyp_act = HypAct(manifold, c, c, act)
def forward(self, x):
h = self.linear.forward(x)
h = self.hyp_act.forward(h)
return h
class HyperbolicGraphConvolution(nn.Module):
"""
Hyperbolic graph convolution layer.
"""
def __init__(self, manifold, in_features, out_features, c_in, c_out, dropout, act, use_bias, use_att, local_agg):
super(HyperbolicGraphConvolution, self).__init__()
self.linear = HypLinear(manifold, in_features, out_features, c_in, dropout, use_bias)
self.agg = HypAgg(manifold, c_in, out_features, dropout, use_att, local_agg)
self.hyp_act = HypAct(manifold, c_in, c_out, act)
def forward(self, input):
x, adj = input
h = self.linear.forward(x)
h = self.agg.forward(h, adj)
h = self.hyp_act.forward(h)
output = h, adj
return output
class HypLinear(nn.Module):
"""
Hyperbolic linear layer.
"""
def __init__(self, manifold, in_features, out_features, c, dropout, use_bias):
super(HypLinear, self).__init__()
self.manifold = manifold
self.in_features = in_features
self.out_features = out_features
self.c = c
self.dropout = dropout
self.use_bias = use_bias
self.bias = nn.Parameter(torch.Tensor(out_features))
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
init.xavier_uniform_(self.weight, gain=math.sqrt(2))
init.constant_(self.bias, 0)
def forward(self, x):
drop_weight = F.dropout(self.weight, self.dropout, training=self.training)
mv = self.manifold.mobius_matvec(drop_weight, x, self.c)
res = self.manifold.proj(mv, self.c)
if self.use_bias:
bias = self.manifold.proj_tan0(self.bias.view(1, -1), self.c)
hyp_bias = self.manifold.expmap0(bias, self.c)
hyp_bias = self.manifold.proj(hyp_bias, self.c)
res = self.manifold.mobius_add(res, hyp_bias, c=self.c)
res = self.manifold.proj(res, self.c)
return res
def extra_repr(self):
return 'in_features={}, out_features={}, c={}'.format(
self.in_features, self.out_features, self.c
)
class HypAgg(Module):
"""
Hyperbolic aggregation layer.
"""
def __init__(self, manifold, c, in_features, dropout, use_att, local_agg):
super(HypAgg, self).__init__()
self.manifold = manifold
self.c = c
self.in_features = in_features
self.dropout = dropout
self.local_agg = local_agg
self.use_att = use_att
if self.use_att:
self.att = DenseAtt(in_features, dropout)
def forward(self, x, adj):
x_tangent = self.manifold.logmap0(x, c=self.c)
if self.use_att:
if self.local_agg:
x_local_tangent = []
for i in range(x.size(0)):
x_local_tangent.append(self.manifold.logmap(x[i], x, c=self.c))
x_local_tangent = torch.stack(x_local_tangent, dim=0)
adj_att = self.att(x_tangent, adj)
att_rep = adj_att.unsqueeze(-1) * x_local_tangent
support_t = torch.sum(adj_att.unsqueeze(-1) * x_local_tangent, dim=1)
output = self.manifold.proj(self.manifold.expmap(x, support_t, c=self.c), c=self.c)
return output
else:
adj_att = self.att(x_tangent, adj)
support_t = torch.matmul(adj_att, x_tangent)
else:
support_t = torch.spmm(adj, x_tangent)
output = self.manifold.proj(self.manifold.expmap0(support_t, c=self.c), c=self.c)
return output
def extra_repr(self):
return 'c={}'.format(self.c)
class HypAct(Module):
"""
Hyperbolic activation layer.
"""
def __init__(self, manifold, c_in, c_out, act):
super(HypAct, self).__init__()
self.manifold = manifold
self.c_in = c_in
self.c_out = c_out
self.act = act
def forward(self, x):
xt = self.act(self.manifold.logmap0(x, c=self.c_in))
xt = self.manifold.proj_tan0(xt, c=self.c_out)
return self.manifold.proj(self.manifold.expmap0(xt, c=self.c_out), c=self.c_out)
def extra_repr(self):
return 'c_in={}, c_out={}'.format(
self.c_in, self.c_out
)
|
hgcn-master
|
layers/hyp_layers.py
|
"""Euclidean layers."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
def get_dim_act(args):
"""
Helper function to get dimension and activation at every layer.
:param args:
:return:
"""
if not args.act:
act = lambda x: x
else:
act = getattr(F, args.act)
acts = [act] * (args.num_layers - 1)
dims = [args.feat_dim] + ([args.dim] * (args.num_layers - 1))
if args.task in ['lp', 'rec']:
dims += [args.dim]
acts += [act]
return dims, acts
class GraphConvolution(Module):
"""
Simple GCN layer.
"""
def __init__(self, in_features, out_features, dropout, act, use_bias):
super(GraphConvolution, self).__init__()
self.dropout = dropout
self.linear = nn.Linear(in_features, out_features, use_bias)
self.act = act
self.in_features = in_features
self.out_features = out_features
def forward(self, input):
x, adj = input
hidden = self.linear.forward(x)
hidden = F.dropout(hidden, self.dropout, training=self.training)
if adj.is_sparse:
support = torch.spmm(adj, hidden)
else:
support = torch.mm(adj, hidden)
output = self.act(support), adj
return output
def extra_repr(self):
return 'input_dim={}, output_dim={}'.format(
self.in_features, self.out_features
)
class Linear(Module):
"""
Simple Linear layer with dropout.
"""
def __init__(self, in_features, out_features, dropout, act, use_bias):
super(Linear, self).__init__()
self.dropout = dropout
self.linear = nn.Linear(in_features, out_features, use_bias)
self.act = act
def forward(self, x):
hidden = self.linear.forward(x)
hidden = F.dropout(hidden, self.dropout, training=self.training)
out = self.act(hidden)
return out
class FermiDiracDecoder(Module):
"""Fermi Dirac to compute edge probabilities based on distances."""
def __init__(self, r, t):
super(FermiDiracDecoder, self).__init__()
self.r = r
self.t = t
def forward(self, dist):
probs = 1. / (torch.exp((dist - self.r) / self.t) + 1.0)
return probs
|
hgcn-master
|
layers/layers.py
|
"""Hyperboloid manifold."""
import torch
from manifolds.base import Manifold
from utils.math_utils import arcosh, cosh, sinh
class Hyperboloid(Manifold):
"""
Hyperboloid manifold class.
We use the following convention: -x0^2 + x1^2 + ... + xd^2 = -K
c = 1 / K is the hyperbolic curvature.
"""
def __init__(self):
super(Hyperboloid, self).__init__()
self.name = 'Hyperboloid'
self.eps = {torch.float32: 1e-7, torch.float64: 1e-15}
self.min_norm = 1e-15
self.max_norm = 1e6
def minkowski_dot(self, x, y, keepdim=True):
res = torch.sum(x * y, dim=-1) - 2 * x[..., 0] * y[..., 0]
if keepdim:
res = res.view(res.shape + (1,))
return res
def minkowski_norm(self, u, keepdim=True):
dot = self.minkowski_dot(u, u, keepdim=keepdim)
return torch.sqrt(torch.clamp(dot, min=self.eps[u.dtype]))
def sqdist(self, x, y, c):
K = 1. / c
prod = self.minkowski_dot(x, y)
theta = torch.clamp(-prod / K, min=1.0 + self.eps[x.dtype])
sqdist = K * arcosh(theta) ** 2
# clamp distance to avoid nans in Fermi-Dirac decoder
return torch.clamp(sqdist, max=50.0)
def proj(self, x, c):
K = 1. / c
d = x.size(-1) - 1
y = x.narrow(-1, 1, d)
y_sqnorm = torch.norm(y, p=2, dim=1, keepdim=True) ** 2
mask = torch.ones_like(x)
mask[:, 0] = 0
vals = torch.zeros_like(x)
vals[:, 0:1] = torch.sqrt(torch.clamp(K + y_sqnorm, min=self.eps[x.dtype]))
return vals + mask * x
def proj_tan(self, u, x, c):
K = 1. / c
d = x.size(1) - 1
ux = torch.sum(x.narrow(-1, 1, d) * u.narrow(-1, 1, d), dim=1, keepdim=True)
mask = torch.ones_like(u)
mask[:, 0] = 0
vals = torch.zeros_like(u)
vals[:, 0:1] = ux / torch.clamp(x[:, 0:1], min=self.eps[x.dtype])
return vals + mask * u
def proj_tan0(self, u, c):
narrowed = u.narrow(-1, 0, 1)
vals = torch.zeros_like(u)
vals[:, 0:1] = narrowed
return u - vals
def expmap(self, u, x, c):
K = 1. / c
sqrtK = K ** 0.5
normu = self.minkowski_norm(u)
normu = torch.clamp(normu, max=self.max_norm)
theta = normu / sqrtK
theta = torch.clamp(theta, min=self.min_norm)
result = cosh(theta) * x + sinh(theta) * u / theta
return self.proj(result, c)
def logmap(self, x, y, c):
K = 1. / c
xy = torch.clamp(self.minkowski_dot(x, y) + K, max=-self.eps[x.dtype]) - K
u = y + xy * x * c
normu = self.minkowski_norm(u)
normu = torch.clamp(normu, min=self.min_norm)
dist = self.sqdist(x, y, c) ** 0.5
result = dist * u / normu
return self.proj_tan(result, x, c)
def expmap0(self, u, c):
K = 1. / c
sqrtK = K ** 0.5
d = u.size(-1) - 1
x = u.narrow(-1, 1, d).view(-1, d)
x_norm = torch.norm(x, p=2, dim=1, keepdim=True)
x_norm = torch.clamp(x_norm, min=self.min_norm)
theta = x_norm / sqrtK
res = torch.ones_like(u)
res[:, 0:1] = sqrtK * cosh(theta)
res[:, 1:] = sqrtK * sinh(theta) * x / x_norm
return self.proj(res, c)
def logmap0(self, x, c):
K = 1. / c
sqrtK = K ** 0.5
d = x.size(-1) - 1
y = x.narrow(-1, 1, d).view(-1, d)
y_norm = torch.norm(y, p=2, dim=1, keepdim=True)
y_norm = torch.clamp(y_norm, min=self.min_norm)
res = torch.zeros_like(x)
theta = torch.clamp(x[:, 0:1] / sqrtK, min=1.0 + self.eps[x.dtype])
res[:, 1:] = sqrtK * arcosh(theta) * y / y_norm
return res
def mobius_add(self, x, y, c):
u = self.logmap0(y, c)
v = self.ptransp0(x, u, c)
return self.expmap(v, x, c)
def mobius_matvec(self, m, x, c):
u = self.logmap0(x, c)
mu = u @ m.transpose(-1, -2)
return self.expmap0(mu, c)
def ptransp(self, x, y, u, c):
logxy = self.logmap(x, y, c)
logyx = self.logmap(y, x, c)
sqdist = torch.clamp(self.sqdist(x, y, c), min=self.min_norm)
alpha = self.minkowski_dot(logxy, u) / sqdist
res = u - alpha * (logxy + logyx)
return self.proj_tan(res, y, c)
def ptransp0(self, x, u, c):
K = 1. / c
sqrtK = K ** 0.5
x0 = x.narrow(-1, 0, 1)
d = x.size(-1) - 1
y = x.narrow(-1, 1, d)
y_norm = torch.clamp(torch.norm(y, p=2, dim=1, keepdim=True), min=self.min_norm)
y_normalized = y / y_norm
v = torch.ones_like(x)
v[:, 0:1] = - y_norm
v[:, 1:] = (sqrtK - x0) * y_normalized
alpha = torch.sum(y_normalized * u[:, 1:], dim=1, keepdim=True) / sqrtK
res = u - alpha * v
return self.proj_tan(res, x, c)
def to_poincare(self, x, c):
K = 1. / c
sqrtK = K ** 0.5
d = x.size(-1) - 1
return sqrtK * x.narrow(-1, 1, d) / (x[:, 0:1] + sqrtK)
|
hgcn-master
|
manifolds/hyperboloid.py
|
"""Poincare ball manifold."""
import torch
from manifolds.base import Manifold
from utils.math_utils import artanh, tanh
class PoincareBall(Manifold):
"""
PoicareBall Manifold class.
We use the following convention: x0^2 + x1^2 + ... + xd^2 < 1 / c
Note that 1/sqrt(c) is the Poincare ball radius.
"""
def __init__(self, ):
super(PoincareBall, self).__init__()
self.name = 'PoincareBall'
self.min_norm = 1e-15
self.eps = {torch.float32: 4e-3, torch.float64: 1e-5}
def sqdist(self, p1, p2, c):
sqrt_c = c ** 0.5
dist_c = artanh(
sqrt_c * self.mobius_add(-p1, p2, c, dim=-1).norm(dim=-1, p=2, keepdim=False)
)
dist = dist_c * 2 / sqrt_c
return dist ** 2
def _lambda_x(self, x, c):
x_sqnorm = torch.sum(x.data.pow(2), dim=-1, keepdim=True)
return 2 / (1. - c * x_sqnorm).clamp_min(self.min_norm)
def egrad2rgrad(self, p, dp, c):
lambda_p = self._lambda_x(p, c)
dp /= lambda_p.pow(2)
return dp
def proj(self, x, c):
norm = torch.clamp_min(x.norm(dim=-1, keepdim=True, p=2), self.min_norm)
maxnorm = (1 - self.eps[x.dtype]) / (c ** 0.5)
cond = norm > maxnorm
projected = x / norm * maxnorm
return torch.where(cond, projected, x)
def proj_tan(self, u, p, c):
return u
def proj_tan0(self, u, c):
return u
def expmap(self, u, p, c):
sqrt_c = c ** 0.5
u_norm = u.norm(dim=-1, p=2, keepdim=True).clamp_min(self.min_norm)
second_term = (
tanh(sqrt_c / 2 * self._lambda_x(p, c) * u_norm)
* u
/ (sqrt_c * u_norm)
)
gamma_1 = self.mobius_add(p, second_term, c)
return gamma_1
def logmap(self, p1, p2, c):
sub = self.mobius_add(-p1, p2, c)
sub_norm = sub.norm(dim=-1, p=2, keepdim=True).clamp_min(self.min_norm)
lam = self._lambda_x(p1, c)
sqrt_c = c ** 0.5
return 2 / sqrt_c / lam * artanh(sqrt_c * sub_norm) * sub / sub_norm
def expmap0(self, u, c):
sqrt_c = c ** 0.5
u_norm = torch.clamp_min(u.norm(dim=-1, p=2, keepdim=True), self.min_norm)
gamma_1 = tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm)
return gamma_1
def logmap0(self, p, c):
sqrt_c = c ** 0.5
p_norm = p.norm(dim=-1, p=2, keepdim=True).clamp_min(self.min_norm)
scale = 1. / sqrt_c * artanh(sqrt_c * p_norm) / p_norm
return scale * p
def mobius_add(self, x, y, c, dim=-1):
x2 = x.pow(2).sum(dim=dim, keepdim=True)
y2 = y.pow(2).sum(dim=dim, keepdim=True)
xy = (x * y).sum(dim=dim, keepdim=True)
num = (1 + 2 * c * xy + c * y2) * x + (1 - c * x2) * y
denom = 1 + 2 * c * xy + c ** 2 * x2 * y2
return num / denom.clamp_min(self.min_norm)
def mobius_matvec(self, m, x, c):
sqrt_c = c ** 0.5
x_norm = x.norm(dim=-1, keepdim=True, p=2).clamp_min(self.min_norm)
mx = x @ m.transpose(-1, -2)
mx_norm = mx.norm(dim=-1, keepdim=True, p=2).clamp_min(self.min_norm)
res_c = tanh(mx_norm / x_norm * artanh(sqrt_c * x_norm)) * mx / (mx_norm * sqrt_c)
cond = (mx == 0).prod(-1, keepdim=True, dtype=torch.uint8)
res_0 = torch.zeros(1, dtype=res_c.dtype, device=res_c.device)
res = torch.where(cond, res_0, res_c)
return res
def init_weights(self, w, c, irange=1e-5):
w.data.uniform_(-irange, irange)
return w
def _gyration(self, u, v, w, c, dim: int = -1):
u2 = u.pow(2).sum(dim=dim, keepdim=True)
v2 = v.pow(2).sum(dim=dim, keepdim=True)
uv = (u * v).sum(dim=dim, keepdim=True)
uw = (u * w).sum(dim=dim, keepdim=True)
vw = (v * w).sum(dim=dim, keepdim=True)
c2 = c ** 2
a = -c2 * uw * v2 + c * vw + 2 * c2 * uv * vw
b = -c2 * vw * u2 - c * uw
d = 1 + 2 * c * uv + c2 * u2 * v2
return w + 2 * (a * u + b * v) / d.clamp_min(self.min_norm)
def inner(self, x, c, u, v=None, keepdim=False):
if v is None:
v = u
lambda_x = self._lambda_x(x, c)
return lambda_x ** 2 * (u * v).sum(dim=-1, keepdim=keepdim)
def ptransp(self, x, y, u, c):
lambda_x = self._lambda_x(x, c)
lambda_y = self._lambda_x(y, c)
return self._gyration(y, -x, u, c) * lambda_x / lambda_y
def ptransp_(self, x, y, u, c):
lambda_x = self._lambda_x(x, c)
lambda_y = self._lambda_x(y, c)
return self._gyration(y, -x, u, c) * lambda_x / lambda_y
def ptransp0(self, x, u, c):
lambda_x = self._lambda_x(x, c)
return 2 * u / lambda_x.clamp_min(self.min_norm)
def to_hyperboloid(self, x, c):
K = 1./ c
sqrtK = K ** 0.5
sqnorm = torch.norm(x, p=2, dim=1, keepdim=True) ** 2
return sqrtK * torch.cat([K + sqnorm, 2 * sqrtK * x], dim=1) / (K - sqnorm)
|
hgcn-master
|
manifolds/poincare.py
|
from .base import ManifoldParameter
from .euclidean import Euclidean
from .hyperboloid import Hyperboloid
from .poincare import PoincareBall
|
hgcn-master
|
manifolds/__init__.py
|
"""Base manifold."""
from torch.nn import Parameter
class Manifold(object):
"""
Abstract class to define operations on a manifold.
"""
def __init__(self):
super().__init__()
self.eps = 10e-8
def sqdist(self, p1, p2, c):
"""Squared distance between pairs of points."""
raise NotImplementedError
def egrad2rgrad(self, p, dp, c):
"""Converts Euclidean Gradient to Riemannian Gradients."""
raise NotImplementedError
def proj(self, p, c):
"""Projects point p on the manifold."""
raise NotImplementedError
def proj_tan(self, u, p, c):
"""Projects u on the tangent space of p."""
raise NotImplementedError
def proj_tan0(self, u, c):
"""Projects u on the tangent space of the origin."""
raise NotImplementedError
def expmap(self, u, p, c):
"""Exponential map of u at point p."""
raise NotImplementedError
def logmap(self, p1, p2, c):
"""Logarithmic map of point p1 at point p2."""
raise NotImplementedError
def expmap0(self, u, c):
"""Exponential map of u at the origin."""
raise NotImplementedError
def logmap0(self, p, c):
"""Logarithmic map of point p at the origin."""
raise NotImplementedError
def mobius_add(self, x, y, c, dim=-1):
"""Adds points x and y."""
raise NotImplementedError
def mobius_matvec(self, m, x, c):
"""Performs hyperboic martrix-vector multiplication."""
raise NotImplementedError
def init_weights(self, w, c, irange=1e-5):
"""Initializes random weigths on the manifold."""
raise NotImplementedError
def inner(self, p, c, u, v=None, keepdim=False):
"""Inner product for tangent vectors at point x."""
raise NotImplementedError
def ptransp(self, x, y, u, c):
"""Parallel transport of u from x to y."""
raise NotImplementedError
def ptransp0(self, x, u, c):
"""Parallel transport of u from the origin to y."""
raise NotImplementedError
class ManifoldParameter(Parameter):
"""
Subclass of torch.nn.Parameter for Riemannian optimization.
"""
def __new__(cls, data, requires_grad, manifold, c):
return Parameter.__new__(cls, data, requires_grad)
def __init__(self, data, requires_grad, manifold, c):
self.c = c
self.manifold = manifold
def __repr__(self):
return '{} Parameter containing:\n'.format(self.manifold.name) + super(Parameter, self).__repr__()
|
hgcn-master
|
manifolds/base.py
|
"""Euclidean manifold."""
from manifolds.base import Manifold
class Euclidean(Manifold):
"""
Euclidean Manifold class.
"""
def __init__(self):
super(Euclidean, self).__init__()
self.name = 'Euclidean'
def normalize(self, p):
dim = p.size(-1)
p.view(-1, dim).renorm_(2, 0, 1.)
return p
def sqdist(self, p1, p2, c):
return (p1 - p2).pow(2).sum(dim=-1)
def egrad2rgrad(self, p, dp, c):
return dp
def proj(self, p, c):
return p
def proj_tan(self, u, p, c):
return u
def proj_tan0(self, u, c):
return u
def expmap(self, u, p, c):
return p + u
def logmap(self, p1, p2, c):
return p2 - p1
def expmap0(self, u, c):
return u
def logmap0(self, p, c):
return p
def mobius_add(self, x, y, c, dim=-1):
return x + y
def mobius_matvec(self, m, x, c):
mx = x @ m.transpose(-1, -2)
return mx
def init_weights(self, w, c, irange=1e-5):
w.data.uniform_(-irange, irange)
return w
def inner(self, p, c, u, v=None, keepdim=False):
if v is None:
v = u
return (u * v).sum(dim=-1, keepdim=keepdim)
def ptransp(self, x, y, v, c):
return v
def ptransp0(self, x, v, c):
return x + v
|
hgcn-master
|
manifolds/euclidean.py
|
"""Math utils functions."""
import torch
def cosh(x, clamp=15):
return x.clamp(-clamp, clamp).cosh()
def sinh(x, clamp=15):
return x.clamp(-clamp, clamp).sinh()
def tanh(x, clamp=15):
return x.clamp(-clamp, clamp).tanh()
def arcosh(x):
return Arcosh.apply(x)
def arsinh(x):
return Arsinh.apply(x)
def artanh(x):
return Artanh.apply(x)
class Artanh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(-1 + 1e-15, 1 - 1e-15)
ctx.save_for_backward(x)
z = x.double()
return (torch.log_(1 + z).sub_(torch.log_(1 - z))).mul_(0.5).to(x.dtype)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / (1 - input ** 2)
class Arsinh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
z = x.double()
return (z + torch.sqrt_(1 + z.pow(2))).clamp_min_(1e-15).log_().to(x.dtype)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / (1 + input ** 2) ** 0.5
class Arcosh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(min=1.0 + 1e-15)
ctx.save_for_backward(x)
z = x.double()
return (z + torch.sqrt_(z.pow(2) - 1)).clamp_min_(1e-15).log_().to(x.dtype)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / (input ** 2 - 1) ** 0.5
|
hgcn-master
|
utils/math_utils.py
|
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn.modules.loss
def format_metrics(metrics, split):
"""Format metric in metric dict for logging."""
return " ".join(
["{}_{}: {:.4f}".format(split, metric_name, metric_val) for metric_name, metric_val in metrics.items()])
def get_dir_name(models_dir):
"""Gets a directory to save the model.
If the directory already exists, then append a new integer to the end of
it. This method is useful so that we don't overwrite existing models
when launching new jobs.
Args:
models_dir: The directory where all the models are.
Returns:
The name of a new directory to save the training logs and model weights.
"""
if not os.path.exists(models_dir):
save_dir = os.path.join(models_dir, '0')
os.makedirs(save_dir)
else:
existing_dirs = np.array(
[
d
for d in os.listdir(models_dir)
if os.path.isdir(os.path.join(models_dir, d))
]
).astype(np.int)
if len(existing_dirs) > 0:
dir_id = str(existing_dirs.max() + 1)
else:
dir_id = "1"
save_dir = os.path.join(models_dir, dir_id)
os.makedirs(save_dir)
return save_dir
def add_flags_from_config(parser, config_dict):
"""
Adds a flag (and default value) to an ArgumentParser for each parameter in a config
"""
def OrNone(default):
def func(x):
# Convert "none" to proper None object
if x.lower() == "none":
return None
# If default is None (and x is not None), return x without conversion as str
elif default is None:
return str(x)
# Otherwise, default has non-None type; convert x to that type
else:
return type(default)(x)
return func
for param in config_dict:
default, description = config_dict[param]
try:
if isinstance(default, dict):
parser = add_flags_from_config(parser, default)
elif isinstance(default, list):
if len(default) > 0:
# pass a list as argument
parser.add_argument(
f"--{param}",
action="append",
type=type(default[0]),
default=default,
help=description
)
else:
pass
parser.add_argument(f"--{param}", action="append", default=default, help=description)
else:
pass
parser.add_argument(f"--{param}", type=OrNone(default), default=default, help=description)
except argparse.ArgumentError:
print(
f"Could not add flag for param {param} because it was already present."
)
return parser
|
hgcn-master
|
utils/train_utils.py
|
hgcn-master
|
utils/__init__.py
|
|
import os
import pickle as pkl
import sys
import time
import networkx as nx
import numpy as np
from tqdm import tqdm
from utils.data_utils import load_data_lp
def hyperbolicity_sample(G, num_samples=50000):
curr_time = time.time()
hyps = []
for i in tqdm(range(num_samples)):
curr_time = time.time()
node_tuple = np.random.choice(G.nodes(), 4, replace=False)
s = []
try:
d01 = nx.shortest_path_length(G, source=node_tuple[0], target=node_tuple[1], weight=None)
d23 = nx.shortest_path_length(G, source=node_tuple[2], target=node_tuple[3], weight=None)
d02 = nx.shortest_path_length(G, source=node_tuple[0], target=node_tuple[2], weight=None)
d13 = nx.shortest_path_length(G, source=node_tuple[1], target=node_tuple[3], weight=None)
d03 = nx.shortest_path_length(G, source=node_tuple[0], target=node_tuple[3], weight=None)
d12 = nx.shortest_path_length(G, source=node_tuple[1], target=node_tuple[2], weight=None)
s.append(d01 + d23)
s.append(d02 + d13)
s.append(d03 + d12)
s.sort()
hyps.append((s[-1] - s[-2]) / 2)
except Exception as e:
continue
print('Time for hyp: ', time.time() - curr_time)
return max(hyps)
if __name__ == '__main__':
dataset = 'pubmed'
data_path = os.path.join(os.environ['DATAPATH'], dataset)
data = load_data_lp(dataset, use_feats=False, data_path=data_path)
graph = nx.from_scipy_sparse_matrix(data['adj_train'])
print('Computing hyperbolicity', graph.number_of_nodes(), graph.number_of_edges())
hyp = hyperbolicity_sample(graph)
print('Hyp: ', hyp)
|
hgcn-master
|
utils/hyperbolicity.py
|
"""Data utils functions for pre-processing and data loading."""
import os
import pickle as pkl
import sys
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
def load_data(args, datapath):
if args.task == 'nc':
data = load_data_nc(args.dataset, args.use_feats, datapath, args.split_seed)
else:
data = load_data_lp(args.dataset, args.use_feats, datapath)
adj = data['adj_train']
if args.task == 'lp':
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, test_edges, test_edges_false = mask_edges(
adj, args.val_prop, args.test_prop, args.split_seed
)
data['adj_train'] = adj_train
data['train_edges'], data['train_edges_false'] = train_edges, train_edges_false
data['val_edges'], data['val_edges_false'] = val_edges, val_edges_false
data['test_edges'], data['test_edges_false'] = test_edges, test_edges_false
data['adj_train_norm'], data['features'] = process(
data['adj_train'], data['features'], args.normalize_adj, args.normalize_feats
)
if args.dataset == 'airport':
data['features'] = augment(data['adj_train'], data['features'])
return data
# ############### FEATURES PROCESSING ####################################
def process(adj, features, normalize_adj, normalize_feats):
if sp.isspmatrix(features):
features = np.array(features.todense())
if normalize_feats:
features = normalize(features)
features = torch.Tensor(features)
if normalize_adj:
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
return adj, features
def normalize(mx):
"""Row-normalize sparse matrix."""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo()
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)
)
values = torch.Tensor(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def augment(adj, features, normalize_feats=True):
deg = np.squeeze(np.sum(adj, axis=0).astype(int))
deg[deg > 5] = 5
deg_onehot = torch.tensor(np.eye(6)[deg], dtype=torch.float).squeeze()
const_f = torch.ones(features.size(0), 1)
features = torch.cat((features, deg_onehot, const_f), dim=1)
return features
# ############### DATA SPLITS #####################################################
def mask_edges(adj, val_prop, test_prop, seed):
np.random.seed(seed) # get tp edges
x, y = sp.triu(adj).nonzero()
pos_edges = np.array(list(zip(x, y)))
np.random.shuffle(pos_edges)
# get tn edges
x, y = sp.triu(sp.csr_matrix(1. - adj.toarray())).nonzero()
neg_edges = np.array(list(zip(x, y)))
np.random.shuffle(neg_edges)
m_pos = len(pos_edges)
n_val = int(m_pos * val_prop)
n_test = int(m_pos * test_prop)
val_edges, test_edges, train_edges = pos_edges[:n_val], pos_edges[n_val:n_test + n_val], pos_edges[n_test + n_val:]
val_edges_false, test_edges_false = neg_edges[:n_val], neg_edges[n_val:n_test + n_val]
train_edges_false = np.concatenate([neg_edges, val_edges, test_edges], axis=0)
adj_train = sp.csr_matrix((np.ones(train_edges.shape[0]), (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)
adj_train = adj_train + adj_train.T
return adj_train, torch.LongTensor(train_edges), torch.LongTensor(train_edges_false), torch.LongTensor(val_edges), \
torch.LongTensor(val_edges_false), torch.LongTensor(test_edges), torch.LongTensor(
test_edges_false)
def split_data(labels, val_prop, test_prop, seed):
np.random.seed(seed)
nb_nodes = labels.shape[0]
all_idx = np.arange(nb_nodes)
pos_idx = labels.nonzero()[0]
neg_idx = (1. - labels).nonzero()[0]
np.random.shuffle(pos_idx)
np.random.shuffle(neg_idx)
pos_idx = pos_idx.tolist()
neg_idx = neg_idx.tolist()
nb_pos_neg = min(len(pos_idx), len(neg_idx))
nb_val = round(val_prop * nb_pos_neg)
nb_test = round(test_prop * nb_pos_neg)
idx_val_pos, idx_test_pos, idx_train_pos = pos_idx[:nb_val], pos_idx[nb_val:nb_val + nb_test], pos_idx[
nb_val + nb_test:]
idx_val_neg, idx_test_neg, idx_train_neg = neg_idx[:nb_val], neg_idx[nb_val:nb_val + nb_test], neg_idx[
nb_val + nb_test:]
return idx_val_pos + idx_val_neg, idx_test_pos + idx_test_neg, idx_train_pos + idx_train_neg
def bin_feat(feat, bins):
digitized = np.digitize(feat, bins)
return digitized - digitized.min()
# ############### LINK PREDICTION DATA LOADERS ####################################
def load_data_lp(dataset, use_feats, data_path):
if dataset in ['cora', 'pubmed']:
adj, features = load_citation_data(dataset, use_feats, data_path)[:2]
elif dataset == 'disease_lp':
adj, features = load_synthetic_data(dataset, use_feats, data_path)[:2]
elif dataset == 'airport':
adj, features = load_data_airport(dataset, data_path, return_label=False)
else:
raise FileNotFoundError('Dataset {} is not supported.'.format(dataset))
data = {'adj_train': adj, 'features': features}
return data
# ############### NODE CLASSIFICATION DATA LOADERS ####################################
def load_data_nc(dataset, use_feats, data_path, split_seed):
if dataset in ['cora', 'pubmed']:
adj, features, labels, idx_train, idx_val, idx_test = load_citation_data(
dataset, use_feats, data_path, split_seed
)
else:
if dataset == 'disease_nc':
adj, features, labels = load_synthetic_data(dataset, use_feats, data_path)
val_prop, test_prop = 0.10, 0.60
elif dataset == 'airport':
adj, features, labels = load_data_airport(dataset, data_path, return_label=True)
val_prop, test_prop = 0.15, 0.15
else:
raise FileNotFoundError('Dataset {} is not supported.'.format(dataset))
idx_val, idx_test, idx_train = split_data(labels, val_prop, test_prop, seed=split_seed)
labels = torch.LongTensor(labels)
data = {'adj_train': adj, 'features': features, 'labels': labels, 'idx_train': idx_train, 'idx_val': idx_val, 'idx_test': idx_test}
return data
# ############### DATASETS ####################################
def load_citation_data(dataset_str, use_feats, data_path, split_seed=None):
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open(os.path.join(data_path, "ind.{}.{}".format(dataset_str, names[i])), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file(os.path.join(data_path, "ind.{}.test.index".format(dataset_str)))
test_idx_range = np.sort(test_idx_reorder)
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
labels = np.argmax(labels, 1)
idx_test = test_idx_range.tolist()
idx_train = list(range(len(y)))
idx_val = range(len(y), len(y) + 500)
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
if not use_feats:
features = sp.eye(adj.shape[0])
return adj, features, labels, idx_train, idx_val, idx_test
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def load_synthetic_data(dataset_str, use_feats, data_path):
object_to_idx = {}
idx_counter = 0
edges = []
with open(os.path.join(data_path, "{}.edges.csv".format(dataset_str)), 'r') as f:
all_edges = f.readlines()
for line in all_edges:
n1, n2 = line.rstrip().split(',')
if n1 in object_to_idx:
i = object_to_idx[n1]
else:
i = idx_counter
object_to_idx[n1] = i
idx_counter += 1
if n2 in object_to_idx:
j = object_to_idx[n2]
else:
j = idx_counter
object_to_idx[n2] = j
idx_counter += 1
edges.append((i, j))
adj = np.zeros((len(object_to_idx), len(object_to_idx)))
for i, j in edges:
adj[i, j] = 1. # comment this line for directed adjacency matrix
adj[j, i] = 1.
if use_feats:
features = sp.load_npz(os.path.join(data_path, "{}.feats.npz".format(dataset_str)))
else:
features = sp.eye(adj.shape[0])
labels = np.load(os.path.join(data_path, "{}.labels.npy".format(dataset_str)))
return sp.csr_matrix(adj), features, labels
def load_data_airport(dataset_str, data_path, return_label=False):
graph = pkl.load(open(os.path.join(data_path, dataset_str + '.p'), 'rb'))
adj = nx.adjacency_matrix(graph)
features = np.array([graph.node[u]['feat'] for u in graph.nodes()])
if return_label:
label_idx = 4
labels = features[:, label_idx]
features = features[:, :label_idx]
labels = bin_feat(labels, bins=[7.0/7, 8.0/7, 9.0/7])
return sp.csr_matrix(adj), features, labels
else:
return sp.csr_matrix(adj), features
|
hgcn-master
|
utils/data_utils.py
|
from sklearn.metrics import average_precision_score, accuracy_score, f1_score
def acc_f1(output, labels, average='binary'):
preds = output.max(1)[1].type_as(labels)
if preds.is_cuda:
preds = preds.cpu()
labels = labels.cpu()
accuracy = accuracy_score(preds, labels)
f1 = f1_score(preds, labels, average=average)
return accuracy, f1
|
hgcn-master
|
utils/eval_utils.py
|
"""Base model class."""
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.layers import FermiDiracDecoder
import layers.hyp_layers as hyp_layers
import manifolds
import models.encoders as encoders
from models.decoders import model2decoder
from utils.eval_utils import acc_f1
class BaseModel(nn.Module):
"""
Base model for graph embedding tasks.
"""
def __init__(self, args):
super(BaseModel, self).__init__()
self.manifold_name = args.manifold
if args.c is not None:
self.c = torch.tensor([args.c])
if not args.cuda == -1:
self.c = self.c.to(args.device)
else:
self.c = nn.Parameter(torch.Tensor([1.]))
self.manifold = getattr(manifolds, self.manifold_name)()
if self.manifold.name == 'Hyperboloid':
args.feat_dim = args.feat_dim + 1
self.nnodes = args.n_nodes
self.encoder = getattr(encoders, args.model)(self.c, args)
def encode(self, x, adj):
if self.manifold.name == 'Hyperboloid':
o = torch.zeros_like(x)
x = torch.cat([o[:, 0:1], x], dim=1)
h = self.encoder.encode(x, adj)
return h
def compute_metrics(self, embeddings, data, split):
raise NotImplementedError
def init_metric_dict(self):
raise NotImplementedError
def has_improved(self, m1, m2):
raise NotImplementedError
class NCModel(BaseModel):
"""
Base model for node classification task.
"""
def __init__(self, args):
super(NCModel, self).__init__(args)
self.decoder = model2decoder[args.model](self.c, args)
if args.n_classes > 2:
self.f1_average = 'micro'
else:
self.f1_average = 'binary'
if args.pos_weight:
self.weights = torch.Tensor([1., 1. / data['labels'][idx_train].mean()])
else:
self.weights = torch.Tensor([1.] * args.n_classes)
if not args.cuda == -1:
self.weights = self.weights.to(args.device)
def decode(self, h, adj, idx):
output = self.decoder.decode(h, adj)
return F.log_softmax(output[idx], dim=1)
def compute_metrics(self, embeddings, data, split):
idx = data[f'idx_{split}']
output = self.decode(embeddings, data['adj_train_norm'], idx)
loss = F.nll_loss(output, data['labels'][idx], self.weights)
acc, f1 = acc_f1(output, data['labels'][idx], average=self.f1_average)
metrics = {'loss': loss, 'acc': acc, 'f1': f1}
return metrics
def init_metric_dict(self):
return {'acc': -1, 'f1': -1}
def has_improved(self, m1, m2):
return m1["f1"] < m2["f1"]
class LPModel(BaseModel):
"""
Base model for link prediction task.
"""
def __init__(self, args):
super(LPModel, self).__init__(args)
self.dc = FermiDiracDecoder(r=args.r, t=args.t)
self.nb_false_edges = args.nb_false_edges
self.nb_edges = args.nb_edges
def decode(self, h, idx):
if self.manifold_name == 'Euclidean':
h = self.manifold.normalize(h)
emb_in = h[idx[:, 0], :]
emb_out = h[idx[:, 1], :]
sqdist = self.manifold.sqdist(emb_in, emb_out, self.c)
probs = self.dc.forward(sqdist)
return probs
def compute_metrics(self, embeddings, data, split):
if split == 'train':
edges_false = data[f'{split}_edges_false'][np.random.randint(0, self.nb_false_edges, self.nb_edges)]
else:
edges_false = data[f'{split}_edges_false']
pos_scores = self.decode(embeddings, data[f'{split}_edges'])
neg_scores = self.decode(embeddings, edges_false)
loss = F.binary_cross_entropy(pos_scores, torch.ones_like(pos_scores))
loss += F.binary_cross_entropy(neg_scores, torch.zeros_like(neg_scores))
if pos_scores.is_cuda:
pos_scores = pos_scores.cpu()
neg_scores = neg_scores.cpu()
labels = [1] * pos_scores.shape[0] + [0] * neg_scores.shape[0]
preds = list(pos_scores.data.numpy()) + list(neg_scores.data.numpy())
roc = roc_auc_score(labels, preds)
ap = average_precision_score(labels, preds)
metrics = {'loss': loss, 'roc': roc, 'ap': ap}
return metrics
def init_metric_dict(self):
return {'roc': -1, 'ap': -1}
def has_improved(self, m1, m2):
return 0.5 * (m1['roc'] + m1['ap']) < 0.5 * (m2['roc'] + m2['ap'])
|
hgcn-master
|
models/base_models.py
|
"""Graph decoders."""
import manifolds
import torch.nn as nn
import torch.nn.functional as F
from layers.att_layers import GraphAttentionLayer
from layers.layers import GraphConvolution, Linear
class Decoder(nn.Module):
"""
Decoder abstract class for node classification tasks.
"""
def __init__(self, c):
super(Decoder, self).__init__()
self.c = c
def decode(self, x, adj):
if self.decode_adj:
input = (x, adj)
probs, _ = self.cls.forward(input)
else:
probs = self.cls.forward(x)
return probs
class GCNDecoder(Decoder):
"""
Graph Convolution Decoder.
"""
def __init__(self, c, args):
super(GCNDecoder, self).__init__(c)
act = lambda x: x
self.cls = GraphConvolution(args.dim, args.n_classes, args.dropout, act, args.bias)
self.decode_adj = True
class GATDecoder(Decoder):
"""
Graph Attention Decoder.
"""
def __init__(self, c, args):
super(GATDecoder, self).__init__(c)
self.cls = GraphAttentionLayer(args.dim, args.n_classes, args.dropout, F.elu, args.alpha, 1, True)
self.decode_adj = True
class LinearDecoder(Decoder):
"""
MLP Decoder for Hyperbolic/Euclidean node classification models.
"""
def __init__(self, c, args):
super(LinearDecoder, self).__init__(c)
self.manifold = getattr(manifolds, args.manifold)()
self.input_dim = args.dim
self.output_dim = args.n_classes
self.bias = args.bias
self.cls = Linear(self.input_dim, self.output_dim, args.dropout, lambda x: x, self.bias)
self.decode_adj = False
def decode(self, x, adj):
h = self.manifold.proj_tan0(self.manifold.logmap0(x, c=self.c), c=self.c)
return super(LinearDecoder, self).decode(h, adj)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}, c={}'.format(
self.input_dim, self.output_dim, self.bias, self.c
)
model2decoder = {
'GCN': GCNDecoder,
'GAT': GATDecoder,
'HNN': LinearDecoder,
'HGCN': LinearDecoder,
'MLP': LinearDecoder,
'Shallow': LinearDecoder,
}
|
hgcn-master
|
models/decoders.py
|
hgcn-master
|
models/__init__.py
|
|
"""Graph encoders."""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import manifolds
from layers.att_layers import GraphAttentionLayer
import layers.hyp_layers as hyp_layers
from layers.layers import GraphConvolution, Linear, get_dim_act
import utils.math_utils as pmath
class Encoder(nn.Module):
"""
Encoder abstract class.
"""
def __init__(self, c):
super(Encoder, self).__init__()
self.c = c
def encode(self, x, adj):
if self.encode_graph:
input = (x, adj)
output, _ = self.layers.forward(input)
else:
output = self.layers.forward(x)
return output
class MLP(Encoder):
"""
Multi-layer perceptron.
"""
def __init__(self, c, args):
super(MLP, self).__init__(c)
assert args.num_layers > 0
dims, acts = get_dim_act(args)
layers = []
for i in range(len(dims) - 1):
in_dim, out_dim = dims[i], dims[i + 1]
act = acts[i]
layers.append(Linear(in_dim, out_dim, args.dropout, act, args.bias))
self.layers = nn.Sequential(*layers)
self.encode_graph = False
class HNN(Encoder):
"""
Hyperbolic Neural Networks.
"""
def __init__(self, c, args):
super(HNN, self).__init__(c)
self.manifold = getattr(manifolds, args.manifold)()
assert args.num_layers > 1
dims, acts, _ = hyp_layers.get_dim_act_curv(args)
hnn_layers = []
for i in range(len(dims) - 1):
in_dim, out_dim = dims[i], dims[i + 1]
act = acts[i]
hnn_layers.append(
hyp_layers.HNNLayer(
self.manifold, in_dim, out_dim, self.c, args.dropout, act, args.bias)
)
self.layers = nn.Sequential(*hnn_layers)
self.encode_graph = False
def encode(self, x, adj):
x_hyp = self.manifold.proj(self.manifold.expmap0(self.manifold.proj_tan0(x, self.c), c=self.c), c=self.c)
return super(HNN, self).encode(x_hyp, adj)
class GCN(Encoder):
"""
Graph Convolution Networks.
"""
def __init__(self, c, args):
super(GCN, self).__init__(c)
assert args.num_layers > 0
dims, acts = get_dim_act(args)
gc_layers = []
for i in range(len(dims) - 1):
in_dim, out_dim = dims[i], dims[i + 1]
act = acts[i]
gc_layers.append(GraphConvolution(in_dim, out_dim, args.dropout, act, args.bias))
self.layers = nn.Sequential(*gc_layers)
self.encode_graph = True
class HGCN(Encoder):
"""
Hyperbolic-GCN.
"""
def __init__(self, c, args):
super(HGCN, self).__init__(c)
self.manifold = getattr(manifolds, args.manifold)()
assert args.num_layers > 1
dims, acts, self.curvatures = hyp_layers.get_dim_act_curv(args)
self.curvatures.append(self.c)
hgc_layers = []
for i in range(len(dims) - 1):
c_in, c_out = self.curvatures[i], self.curvatures[i + 1]
in_dim, out_dim = dims[i], dims[i + 1]
act = acts[i]
hgc_layers.append(
hyp_layers.HyperbolicGraphConvolution(
self.manifold, in_dim, out_dim, c_in, c_out, args.dropout, act, args.bias, args.use_att, args.local_agg
)
)
self.layers = nn.Sequential(*hgc_layers)
self.encode_graph = True
def encode(self, x, adj):
x_tan = self.manifold.proj_tan0(x, self.curvatures[0])
x_hyp = self.manifold.expmap0(x_tan, c=self.curvatures[0])
x_hyp = self.manifold.proj(x_hyp, c=self.curvatures[0])
return super(HGCN, self).encode(x_hyp, adj)
class GAT(Encoder):
"""
Graph Attention Networks.
"""
def __init__(self, c, args):
super(GAT, self).__init__(c)
assert args.num_layers > 0
dims, acts = get_dim_act(args)
gat_layers = []
for i in range(len(dims) - 1):
in_dim, out_dim = dims[i], dims[i + 1]
act = acts[i]
assert dims[i + 1] % args.n_heads == 0
out_dim = dims[i + 1] // args.n_heads
concat = True
gat_layers.append(
GraphAttentionLayer(in_dim, out_dim, args.dropout, act, args.alpha, args.n_heads, concat))
self.layers = nn.Sequential(*gat_layers)
self.encode_graph = True
class Shallow(Encoder):
"""
Shallow Embedding method.
Learns embeddings or loads pretrained embeddings and uses an MLP for classification.
"""
def __init__(self, c, args):
super(Shallow, self).__init__(c)
self.manifold = getattr(manifolds, args.manifold)()
self.use_feats = args.use_feats
weights = torch.Tensor(args.n_nodes, args.dim)
if not args.pretrained_embeddings:
weights = self.manifold.init_weights(weights, self.c)
trainable = True
else:
weights = torch.Tensor(np.load(args.pretrained_embeddings))
assert weights.shape[0] == args.n_nodes, "The embeddings you passed seem to be for another dataset."
trainable = False
self.lt = manifolds.ManifoldParameter(weights, trainable, self.manifold, self.c)
self.all_nodes = torch.LongTensor(list(range(args.n_nodes)))
layers = []
if args.pretrained_embeddings is not None and args.num_layers > 0:
# MLP layers after pre-trained embeddings
dims, acts = get_dim_act(args)
if self.use_feats:
dims[0] = args.feat_dim + weights.shape[1]
else:
dims[0] = weights.shape[1]
for i in range(len(dims) - 1):
in_dim, out_dim = dims[i], dims[i + 1]
act = acts[i]
layers.append(Linear(in_dim, out_dim, args.dropout, act, args.bias))
self.layers = nn.Sequential(*layers)
self.encode_graph = False
def encode(self, x, adj):
h = self.lt[self.all_nodes, :]
if self.use_feats:
h = torch.cat((h, x), 1)
return super(Shallow, self).encode(h, adj)
|
hgcn-master
|
models/encoders.py
|
from torch.optim import Adam
from .radam import RiemannianAdam
|
hgcn-master
|
optimizers/__init__.py
|
"""Riemannian adam optimizer geoopt implementation (https://github.com/geoopt/)."""
import torch.optim
from manifolds import Euclidean, ManifoldParameter
# in order not to create it at each iteration
_default_manifold = Euclidean()
class OptimMixin(object):
def __init__(self, *args, stabilize=None, **kwargs):
self._stabilize = stabilize
super().__init__(*args, **kwargs)
def stabilize_group(self, group):
pass
def stabilize(self):
"""Stabilize parameters if they are off-manifold due to numerical reasons
"""
for group in self.param_groups:
self.stabilize_group(group)
def copy_or_set_(dest, source):
"""
A workaround to respect strides of :code:`dest` when copying :code:`source`
(https://github.com/geoopt/geoopt/issues/70)
Parameters
----------
dest : torch.Tensor
Destination tensor where to store new data
source : torch.Tensor
Source data to put in the new tensor
Returns
-------
dest
torch.Tensor, modified inplace
"""
if dest.stride() != source.stride():
return dest.copy_(source)
else:
return dest.set_(source)
class RiemannianAdam(OptimMixin, torch.optim.Adam):
r"""Riemannian Adam with the same API as :class:`torch.optim.Adam`
Parameters
----------
params : iterable
iterable of parameters to optimize or dicts defining
parameter groups
lr : float (optional)
learning rate (default: 1e-3)
betas : Tuple[float, float] (optional)
coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps : float (optional)
term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay : float (optional)
weight decay (L2 penalty) (default: 0)
amsgrad : bool (optional)
whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
Other Parameters
----------------
stabilize : int
Stabilize parameters if they are off-manifold due to numerical
reasons every ``stabilize`` steps (default: ``None`` -- no stabilize)
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def step(self, closure=None):
"""Performs a single optimization step.
Arguments
---------
closure : callable (optional)
A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
with torch.no_grad():
for group in self.param_groups:
if "step" not in group:
group["step"] = 0
betas = group["betas"]
weight_decay = group["weight_decay"]
eps = group["eps"]
learning_rate = group["lr"]
amsgrad = group["amsgrad"]
for point in group["params"]:
grad = point.grad
if grad is None:
continue
if isinstance(point, (ManifoldParameter)):
manifold = point.manifold
c = point.c
else:
manifold = _default_manifold
c = None
if grad.is_sparse:
raise RuntimeError(
"Riemannian Adam does not support sparse gradients yet (PR is welcome)"
)
state = self.state[point]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(point)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(point)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(point)
# make local variables for easy access
exp_avg = state["exp_avg"]
exp_avg_sq = state["exp_avg_sq"]
# actual step
grad.add_(weight_decay, point)
grad = manifold.egrad2rgrad(point, grad, c)
exp_avg.mul_(betas[0]).add_(1 - betas[0], grad)
exp_avg_sq.mul_(betas[1]).add_(
1 - betas[1], manifold.inner(point, c, grad, keepdim=True)
)
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(eps)
else:
denom = exp_avg_sq.sqrt().add_(eps)
group["step"] += 1
bias_correction1 = 1 - betas[0] ** group["step"]
bias_correction2 = 1 - betas[1] ** group["step"]
step_size = (
learning_rate * bias_correction2 ** 0.5 / bias_correction1
)
# copy the state, we need it for retraction
# get the direction for ascend
direction = exp_avg / denom
# transport the exponential averaging to the new point
new_point = manifold.proj(manifold.expmap(-step_size * direction, point, c), c)
exp_avg_new = manifold.ptransp(point, new_point, exp_avg, c)
# use copy only for user facing point
copy_or_set_(point, new_point)
exp_avg.set_(exp_avg_new)
group["step"] += 1
if self._stabilize is not None and group["step"] % self._stabilize == 0:
self.stabilize_group(group)
return loss
@torch.no_grad()
def stabilize_group(self, group):
for p in group["params"]:
if not isinstance(p, ManifoldParameter):
continue
state = self.state[p]
if not state: # due to None grads
continue
manifold = p.manifold
c = p.c
exp_avg = state["exp_avg"]
copy_or_set_(p, manifold.proj(p, c))
exp_avg.set_(manifold.proj_tan(exp_avg, u, c))
|
hgcn-master
|
optimizers/radam.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
import setuptools
requirements = [
r for r in Path("requirements.txt").read_text().splitlines() if "@" not in r
]
extra_requirements = {
module: [
r
for r in Path(os.path.join("augly", module, "requirements.txt"))
.read_text()
.splitlines()
if "@" not in r
]
for module in ["audio", "image", "text", "video"]
}
extra_requirements["video"].extend(
extra_requirements["audio"] + extra_requirements["image"]
)
extra_requirements["all"] = list(
{r for reqs in extra_requirements.values() for r in reqs}
)
with open("README.md", encoding="utf8") as f:
readme = f.read()
with open("version.txt", "r") as f:
version = f.read().strip()
setuptools.setup(
name="augly",
version=version,
description="A data augmentations library for audio, image, text, & video.",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/AugLy",
author="Zoe Papakipos and Joanna Bitton",
author_email="zoep@fb.com",
packages=setuptools.find_packages(exclude=["augly.tests"]),
include_package_data=True,
install_requires=requirements,
extras_require=extra_requirements,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
|
AugLy-main
|
setup.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "AugLy"
copyright = "2021, Joanna Bitton and Zoe Papakipos"
author = "Joanna Bitton and Zoe Papakipos"
# The full version, including alpha/beta/rc tags
release = open("../../version.txt", "r").read().strip()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx_epytext",
"sphinx_autodoc_typehints",
"myst_parser",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
# TODO: Deal with this
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_logo = "_static/logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {"logo_only": True}
# -- Preventing member skips -------------------------------------------------
specials = ["__init__", "__call__"]
def skip(app, what, name, obj, would_skip, options):
if name in specials:
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
|
AugLy-main
|
docs/source/conf.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
AugLy-main
|
augly/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
from augly import audio as audaugs, utils
from augly.video import functional as F
from augly.video.augmenters import ffmpeg as af
from augly.video.helpers import identity_function
"""
Base Classes for Transforms
"""
class VidAugBaseClass:
def __init__(self, p: float = 1.0):
"""
@param p: the probability of the transform being applied; default value is 1.0
"""
assert 0 <= p <= 1.0, "p must be a value in the range [0, 1]"
self.p = p
def __call__(self, *args, **kwargs) -> Any:
"""
This function is to be implemented in the child classes.
From this function, call the transform to be applied
"""
raise NotImplementedError()
class BaseTransform(VidAugBaseClass):
def __call__(
self,
video_path: str,
output_path: Optional[str] = None,
force: bool = False,
seed: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param force: if set to True, the transform will be applied. Otherwise,
application is determined by the probability set
@param seed: if provided, the random seed will be set to this before calling
the transform
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
assert type(force) == bool, "Expected type bool for variable `force`"
if not force and random.random() > self.p:
return video_path
if seed is not None:
random.seed(seed)
return self.apply_transform(video_path, output_path or video_path, metadata)
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
This function is to be implemented in the child classes.
From this function, call the augmentation function with the
parameters specified
"""
raise NotImplementedError()
class BaseRandomRangeTransform(BaseTransform):
def __init__(self, min_val: float, max_val: float, p: float = 1.0):
"""
@param min_val: the lower value of the range
@param max_val: the upper value of the range
@param p: the probability of the transform being applied; default value is 1.0
"""
self.min_val = min_val
self.max_val = max_val
self.chosen_value = None
super().__init__(p)
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
self.chosen_value = (
random.random() * (self.max_val - self.min_val)
) + self.min_val
return self.apply_random_transform(video_path, output_path, metadata)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
This function is to be implemented in the child classes. It has
access to `self.chosen_value` which is the randomly chosen value
from the range specified to pass into the augmentation function
"""
raise NotImplementedError()
"""
Non-Random Transforms
These classes below are essentially class-based versions of the augmentation
functions previously defined. These classes were developed such that they can
be used with Composition operators (such as `torchvision`'s) and to support
use cases where a specific transform with specific attributes needs to be
applied multiple times.
Example:
>>> blur_tsfm = Blur(radius=5.0, p=0.5)
>>> blur_tsfm(video_path, output_path)
"""
class AddNoise(BaseTransform):
def __init__(self, level: int = 25, p: float = 1.0):
"""
@param level: noise strength for specific pixel component. Default value
is 25. Allowed range is [0, 100], where 0 indicates no change
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.level = level
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Adds noise to a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.add_noise(video_path, output_path, self.level, metadata=metadata)
class ApplyLambda(BaseTransform):
def __init__(
self,
aug_function: Callable[..., str] = identity_function,
p: float = 1.0,
**kwargs,
):
"""
@param aug_function: the augmentation function to be applied onto the video
(should expect a video path and output path as input and output the
augmented video to the output path, then return the output path)
@param p: the probability of the transform being applied; default value is 1.0
@param **kwargs: the input attributes to be passed into `aug_function`
"""
super().__init__(p)
assert callable(aug_function), (
repr(type(aug_function).__name__) + " object is not callable"
)
self.aug_function = aug_function
self.kwargs = kwargs
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Apply a user-defined lambda on a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return self.aug_function(
video_path, output_path, metadata=metadata, **self.kwargs
)
class AudioSwap(BaseTransform):
def __init__(self, audio_path: str, offset: float = 0.0, p: float = 1.0):
"""
@param audio_path: the iopath uri to the audio you'd like to swap with the
video's audio
@param offset: starting point in seconds such that an audio clip of offset to
offset + video_duration is used in the audio swap. Default value is zero
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.audio_path = audio_path
self.offset = offset
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Swaps the video audio for the audio passed in provided an offset
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.audio_swap(
video_path, self.audio_path, output_path, self.offset, metadata=metadata
)
class AugmentAudio(BaseTransform):
def __init__(
self,
audio_aug_function: Callable[
..., Tuple[np.ndarray, int]
] = audaugs.apply_lambda,
p: float = 1.0,
**audio_aug_kwargs,
):
"""
@param audio_aug_function: the augmentation function to be applied onto the
video's audio track. Should have the standard API of an AugLy audio
augmentation, i.e. expect input audio as a numpy array or path & output
path as input, and output the augmented audio to the output path
@param p: the probability of the transform being applied; default value is 1.0
@param audio_aug_kwargs: the input attributes to be passed into `audio_aug`
"""
super().__init__(p)
self.audio_aug_function = audio_aug_function
self.audio_aug_kwargs = audio_aug_kwargs
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Augments the audio track of the input video using a given AugLy audio
augmentation
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or
returned
@returns: the path to the augmented video
"""
return F.augment_audio(
video_path=video_path,
audio_aug_function=self.audio_aug_function,
output_path=output_path,
metadata=metadata,
**self.audio_aug_kwargs,
)
class BlendVideos(BaseTransform):
def __init__(
self,
overlay_path: str,
opacity: float = 0.5,
overlay_size: float = 1.0,
x_pos: float = 0.0,
y_pos: float = 0.0,
use_second_audio: bool = True,
p: float = 1.0,
):
"""
@param overlay_path: the path to the video that will be overlaid onto the
background video
@param opacity: the lower the opacity, the more transparent the overlaid video
@param overlay_size: size of the overlaid video is overlay_size * height of the
background video
@param x_pos: position of overlaid video relative to the background video width
@param y_pos: position of overlaid video relative to the background video height
@param use_second_audio: use the audio of the overlaid video rather than the
audio of the background video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.overlay_path = overlay_path
self.opacity = opacity
self.overlay_size = overlay_size
self.x_pos = x_pos
self.y_pos = y_pos
self.use_second_audio = use_second_audio
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays a video onto another video at position (width * x_pos, height * y_pos)
at a lower opacity
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.blend_videos(
video_path,
self.overlay_path,
output_path,
self.opacity,
self.overlay_size,
self.x_pos,
self.y_pos,
self.use_second_audio,
metadata=metadata,
)
class Blur(BaseTransform):
def __init__(self, sigma: float = 1.0, p: float = 1.0):
"""
@param sigma: horizontal sigma, standard deviation of Gaussian blur
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.sigma = sigma
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Blurs a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.blur(video_path, output_path, self.sigma, metadata=metadata)
class Brightness(BaseTransform):
def __init__(self, level: float = 0.15, p: float = 1.0):
"""
@param level: the value must be a float value in range -1.0 to 1.0, where a
negative value darkens and positive brightens
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.level = level
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Brightens or darkens a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.brightness(
video_path,
output_path,
level=self.level,
metadata=metadata,
)
class ChangeAspectRatio(BaseTransform):
def __init__(self, ratio: float = 1.0, p: float = 1.0):
"""
@param ratio: aspect ratio, i.e. width/height, of the new video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.ratio = ratio
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Changes the aspect ratio of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.change_aspect_ratio(
video_path, output_path, self.ratio, metadata=metadata
)
class ChangeVideoSpeed(BaseTransform):
def __init__(self, factor: float = 1.0, p: float = 1.0):
"""
@param factor: the factor by which to alter the speed of the video. A factor
less than one will slow down the video, a factor equal to one won't alter
the video, and a factor greater than one will speed up the video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Changes the speed of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.change_video_speed(
video_path, output_path, self.factor, metadata=metadata
)
class ColorJitter(BaseTransform):
def __init__(
self,
brightness_factor: float = 0,
contrast_factor: float = 1.0,
saturation_factor: float = 1.0,
p: float = 1.0,
):
"""
@param brightness_factor: set the brightness expression. The value must be
a float value in range -1.0 to 1.0. The default value is 0
@param contrast_factor: set the contrast expression. The value must be a
float value in range -1000.0 to 1000.0. The default value is 1
@param saturation_factor: set the saturation expression. The value must be a
float in range 0.0 to 3.0. The default value is 1
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.brightness_factor = brightness_factor
self.contrast_factor = contrast_factor
self.saturation_factor = saturation_factor
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Color jitters the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.color_jitter(
video_path,
output_path,
self.brightness_factor,
self.contrast_factor,
self.saturation_factor,
metadata=metadata,
)
class Concat(BaseTransform):
def __init__(
self,
other_video_paths: List[str],
src_video_path_index: int = 0,
transition: Optional[af.TransitionConfig] = None,
p: float = 1.0,
):
"""
@param other_video_paths: a list of paths to the videos to be concatenated (in
order) with the given video_path when called (which will be inserted in with
this list of video paths at index src_video_path_index)
@param src_video_path_index: for metadata purposes, this indicates which video in
the list `video_paths` should be considered the `source` or original video
@param transition: optional transition config between the clips
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.other_video_paths = other_video_paths
self.src_video_path_index = src_video_path_index
self.transition = transition
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Concatenates videos together. Resizes all other videos to the size of the
`source` video (video_paths[src_video_path_index]), and modifies the sample
aspect ratios to match (ffmpeg will fail to concat if SARs don't match)
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
video_paths = (
self.other_video_paths[: self.src_video_path_index]
+ [video_path]
+ self.other_video_paths[self.src_video_path_index :]
)
return F.concat(
video_paths,
output_path,
self.src_video_path_index,
transition=self.transition,
metadata=metadata,
)
class Contrast(BaseTransform):
def __init__(self, level: float = 1.0, p: float = 1.0):
"""
@param level: the value must be a float value in range -1000.0 to 1000.0,
where a negative value removes contrast and a positive value adds contrast
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.level = level
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the contrast of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.contrast(video_path, output_path, self.level, metadata=metadata)
class Crop(BaseTransform):
def __init__(
self,
left: float = 0.25,
top: float = 0.25,
right: float = 0.75,
bottom: float = 0.75,
p: float = 1.0,
):
"""
@param left: left positioning of the crop; between 0 and 1, relative to
the video width
@param top: top positioning of the crop; between 0 and 1, relative to
the video height
@param right: right positioning of the crop; between 0 and 1, relative to
the video width
@param bottom: bottom positioning of the crop; between 0 and 1, relative to
the video height
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.left, self.right, self.top, self.bottom = left, right, top, bottom
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Crops the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.crop(
video_path,
output_path,
self.left,
self.top,
self.right,
self.bottom,
metadata=metadata,
)
class EncodingQuality(BaseTransform):
def __init__(self, quality: int = 23, p: float = 1.0):
"""
@param quality: CRF scale is 0–51, where 0 is lossless, 23 is the default,
and 51 is worst quality possible. A lower value generally leads to higher
quality, and a subjectively sane range is 17–28
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.quality = quality
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the encoding quality of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.encoding_quality(
video_path,
output_path,
quality=int(self.quality),
metadata=metadata,
)
class FPS(BaseTransform):
def __init__(self, fps: int = 15, p: float = 1.0):
"""
@param fps: the desired output frame rate. Note that a FPS value greater than
the original FPS of the video will result in an unaltered video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.fps = fps
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the FPS of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.fps(video_path, output_path, self.fps, metadata=metadata)
class Grayscale(BaseTransform):
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Changes a video to be grayscale
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.grayscale(video_path, output_path, metadata=metadata)
class HFlip(BaseTransform):
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Horizontally flips a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.hflip(video_path, output_path, metadata=metadata)
class HStack(BaseTransform):
def __init__(
self,
second_video_path: str,
use_second_audio: bool = False,
p: float = 1.0,
):
"""
@param second_video_path: the path to the video that will be stacked
to the right
@param use_second_audio: if set to True, the audio of the right video will
be used instead of the left's
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.second_video_path = second_video_path
self.use_second_audio = use_second_audio
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Horizontally stacks two videos
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.hstack(
video_path,
self.second_video_path,
output_path,
self.use_second_audio,
metadata=metadata,
)
class InsertInBackground(BaseTransform):
def __init__(
self,
background_path: Optional[str] = None,
offset_factor: float = 0.0,
transition: Optional[af.TransitionConfig] = None,
p: float = 1.0,
):
"""
@param background_path: the path to the video in which to insert the main
video. If set to None, the main video will play in the middle of a silent
video with black frames
@param offset_factor: the point in the background video in which the main video
starts to play (this factor is multiplied by the background video duration
to determine the start point)
@param transition: optional transition config between the clips
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.background_path = background_path
self.offset_factor = offset_factor
self.transition = transition
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Puts the video in the middle of the background video
(at offset_factor * background.duration)
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.insert_in_background(
video_path,
output_path,
self.background_path,
self.offset_factor,
transition=self.transition,
metadata=metadata,
)
class InsertInBackgroundMultiple(BaseTransform):
def __init__(
self,
background_path: str,
additional_video_paths: List[str],
src_ids: List[str],
seed: Optional[int] = None,
):
"""
@param background_path: the path of the video in which to insert
the main (and additional) video.
@param additional_video_paths: list of additional video paths to
be inserted alongside the main video; one clip from each of the
input videos will be inserted in order.
@param src_ids: the list of identifiers for the main video and additional videos.
@param seed: if provided, this will set the random seed to ensure consistency
between runs.
"""
super().__init__()
self.background_path = background_path
self.additional_video_paths = additional_video_paths
self.src_ids = src_ids
self.seed = seed
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Places the video (and the additional videos) in the middle of the background video.
@param video_path: the path of the main video to be augmented.
@param output_path: the path in which the output video will be stored.
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
"""
return F.insert_in_background_multiple(
video_path=video_path,
output_path=output_path,
background_path=self.background_path,
src_ids=self.src_ids,
additional_video_paths=self.additional_video_paths,
seed=self.seed,
metadata=metadata,
)
class Loop(BaseTransform):
def __init__(self, num_loops: int = 0, p: float = 1.0):
"""
@param num_loops: the number of times to loop the video. 0 means that the
video will play once (i.e. no loops)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.num_loops = num_loops
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Loops a video `num_loops` times
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.loop(video_path, output_path, self.num_loops, metadata=metadata)
class MemeFormat(BaseTransform):
def __init__(
self,
text: str = "LOL",
font_file: str = utils.MEME_DEFAULT_FONT,
opacity: float = 1.0,
text_color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
caption_height: int = 250,
meme_bg_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
p: float = 1.0,
):
"""
@param text: the text to be overlaid/used in the meme. note: if using a very
long string, please add in newline characters such that the text remains
in a readable font size
@param font_file: iopath uri to the .ttf font file
@param opacity: the lower the opacity, the more transparent the text
@param text_color: color of the text in RGB values
@param caption_height: the height of the meme caption
@param meme_bg_color: background color of the meme caption in RGB values
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.text, self.font_file = text, font_file
self.text_color, self.opacity = text_color, opacity
self.meme_bg_color, self.caption_height = meme_bg_color, caption_height
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Creates a new video that looks like a meme, given text and video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.meme_format(
video_path,
output_path,
self.text,
self.font_file,
self.opacity,
self.text_color,
self.caption_height,
self.meme_bg_color,
metadata=metadata,
)
class Overlay(BaseTransform):
def __init__(
self,
overlay_path: str,
overlay_size: Optional[float] = None,
x_factor: float = 0.0,
y_factor: float = 0.0,
use_overlay_audio: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
p: float = 1.0,
):
"""
@param overlay_path: the path to the media (image or video) that will be
overlaid onto the video
@param overlay_size: size of the overlaid media with respect to the background
video. If set to None, the original size of the overlaid media is used
@param x_factor: specifies where the left side of the overlaid media should be
placed, relative to the video width
@param y_factor: specifies where the top side of the overlaid media should be
placed, relative to the video height
@param use_overlay_audio: if set to True and the media type is a video, the
audio of the overlaid video will be used instead of the main/background
video's audio
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.overlay_path = overlay_path
self.overlay_size = overlay_size
self.x_factor = x_factor
self.y_factor = y_factor
self.use_overlay_audio = use_overlay_audio
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays media onto the video at position (width * x_factor, height * y_factor)
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.overlay(
video_path,
self.overlay_path,
output_path,
self.overlay_size,
self.x_factor,
self.y_factor,
self.use_overlay_audio,
metadata=metadata,
)
class OverlayDots(BaseTransform):
def __init__(
self,
num_dots: int = 100,
dot_type: str = "colored",
random_movement: bool = True,
metadata: Optional[List[Dict[str, Any]]] = None,
p: float = 1.0,
):
"""
@param num_dots: the number of dots to add to each frame
@param dot_type: specify if you would like "blur" or "colored"
@param random_movement: whether or not you want the dots to randomly move
around across the frame or to move across in a "linear" way
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.num_dots = num_dots
self.dot_type = dot_type
self.random_movement = random_movement
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays dots onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.overlay_dots(
video_path,
output_path,
self.num_dots,
self.dot_type,
self.random_movement,
metadata=metadata,
)
class OverlayEmoji(BaseTransform):
def __init__(
self,
emoji_path: str = utils.EMOJI_PATH,
x_factor: float = 0.4,
y_factor: float = 0.4,
opacity: float = 1.0,
emoji_size: float = 0.15,
p: float = 1.0,
):
"""
@param emoji_path: iopath uri to the emoji image
@param x_factor: specifies where the left side of the emoji should be
placed, relative to the video width
@param y_factor: specifies where the top side of the emoji should be placed,
relative to the video height
@param opacity: opacity of the emoji image
@param emoji_size: emoji size relative to the height of the video frame
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.emoji_path = emoji_path
self.x_factor = x_factor
self.y_factor = y_factor
self.opacity = opacity
self.emoji_size = emoji_size
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays an emoji onto each frame of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.overlay_emoji(
video_path,
output_path,
self.emoji_path,
self.x_factor,
self.y_factor,
self.opacity,
self.emoji_size,
metadata=metadata,
)
class OverlayOntoBackgroundVideo(BaseTransform):
def __init__(
self,
background_path: str,
overlay_size: Optional[float] = 0.7,
x_factor: float = 0.0,
y_factor: float = 0.0,
use_background_audio: bool = False,
p: float = 1.0,
):
"""
@param background_path: the path to the background video
@param overlay_size: size of the overlaid media with respect to the background
video. If set to None, the original size of the overlaid media is used
@param x_factor: specifies where the left side of the overlaid media should be
placed, relative to the video width
@param y_factor: specifies where the top side of the overlaid media should be
placed, relative to the video height
@param use_background_audio: if set to True and the media type is a video, the
audio of the background video will be used instead of the src video's audio
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.background_path = background_path
self.overlay_size = overlay_size
self.x_factor = x_factor
self.y_factor = y_factor
self.use_background_audio = use_background_audio
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays the video onto a background video, pointed to by background_path
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.overlay_onto_background_video(
video_path,
self.background_path,
output_path,
self.overlay_size,
self.x_factor,
self.y_factor,
self.use_background_audio,
metadata=metadata,
)
class OverlayOntoScreenshot(BaseTransform):
def __init__(
self,
template_filepath: str = utils.TEMPLATE_PATH,
template_bboxes_filepath: str = utils.BBOXES_PATH,
max_image_size_pixels: Optional[int] = None,
crop_src_to_fit: bool = False,
p: float = 1.0,
):
"""
@param template_filepath: iopath uri to the screenshot template
@param template_bboxes_filepath: iopath uri to the file containing the bounding
box for each template
@param max_image_size_pixels: if provided, the template image and/or src video
will be scaled down to avoid an output image with an area greater than this
size (in pixels)
@param crop_src_to_fit: if True, the src image will be cropped if necessary to
fit into the template image. If False, the src image will instead be resized
if necessary
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.template_filepath = template_filepath
self.template_bboxes_filepath = template_bboxes_filepath
self.max_image_size_pixels = max_image_size_pixels
self.crop_src_to_fit = crop_src_to_fit
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays the video onto a screenshot template so it looks like it was
screen-recorded on Instagram
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.overlay_onto_screenshot(
video_path,
output_path,
self.template_filepath,
self.template_bboxes_filepath,
self.max_image_size_pixels,
self.crop_src_to_fit,
metadata=metadata,
)
class OverlayShapes(BaseTransform):
def __init__(
self,
num_shapes: int = 1,
shape_type: Optional[str] = None,
colors: Optional[List[Tuple[int, int, int]]] = None,
thickness: Optional[int] = None,
radius: Optional[float] = None,
random_movement: bool = True,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
p: float = 1.0,
):
"""
@param num_shapes: the number of shapes to add to each frame
@param shape_type: specify if you would like circles or rectangles
@param colors: list of (R, G, B) colors to sample from
@param thickness: specify the thickness desired for the shapes
@param random_movement: whether or not you want the shapes to randomly move
around across the frame or to move across in a "linear" way
@param topleft: specifying the boundary of shape region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param bottomright: specifying the boundary of shape region. The boundary are
all floats [0, 1] epresenting the fraction w.r.t width/height
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.num_shapes = num_shapes
self.shape_type = shape_type
self.colors = colors
self.thickness = thickness
self.radius = radius
self.random_movement = random_movement
self.topleft = topleft
self.bottomright = bottomright
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays random shapes onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.overlay_shapes(
video_path,
output_path,
self.num_shapes,
self.shape_type,
self.colors,
self.thickness,
self.radius,
self.random_movement,
self.topleft,
self.bottomright,
metadata=metadata,
)
class OverlayText(BaseTransform):
def __init__(
self,
text_len: int = 10,
text_change_nth: Optional[int] = None,
fonts: Optional[List[Tuple[Any, Optional[str]]]] = None,
fontscales: Optional[Tuple[float, float]] = None,
colors: Optional[List[Tuple[int, int, int]]] = None,
thickness: Optional[int] = None,
random_movement: bool = False,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
p: float = 1.0,
):
"""
@param text_len: length of string for randomized texts.
@param text_change_nth: change random text every nth frame. None means using
same text for all frames
@param fonts: list of fonts to sample from. Each font can be a cv2 fontFace,
a PIL ImageFont, or a path to a PIL ImageFont file. Each font is coupled
with a chars file (the second item in the tuple) - a path to a file which
contains the characters associated with the given font. For example, non-
western alphabets have different valid characters than the roman alphabet,
and these must be specified in order to construct random valid text in
that font. If the chars file path is None, the roman alphabet will be used
@param fontscales: 2-tuple of float (min_scale, max_scale)
@param colors: list of (R, G, B) colors to sample from
@param thickness: specify the thickness desired for the text
@param random_movement: whether or not you want the text to randomly move around
across frame or to move across in a "linear" way
@param topleft: specifying the boundary of text region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param bottomright: specifying the boundary of text region. The boundary are
all floats [0, 1] representing the fraction w.r.t width/height
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.text_len = text_len
self.text_change_nth = text_change_nth
self.fonts = fonts
self.fontscales = fontscales
self.colors = colors
self.thickness = thickness
self.random_movement = random_movement
self.topleft = topleft
self.bottomright = bottomright
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays random text onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.overlay_text(
video_path,
output_path,
self.text_len,
self.text_change_nth,
self.fonts,
self.fontscales,
self.colors,
self.thickness,
self.random_movement,
self.topleft,
self.bottomright,
metadata=metadata,
)
class Pad(BaseTransform):
def __init__(
self,
w_factor: float = 0.25,
h_factor: float = 0.25,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
p: float = 1.0,
):
"""
@param w_factor: pad right and left with w_factor * frame width
@param h_factor: pad bottom and top with h_factor * frame height
@param color: RGB color of the padded margin
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.w_factor, self.h_factor = w_factor, h_factor
self.color = color
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Pads the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.pad(
video_path,
output_path,
self.w_factor,
self.h_factor,
self.color,
metadata=metadata,
)
class PerspectiveTransformAndShake(BaseTransform):
def __init__(
self,
sigma: float = 50.0,
shake_radius: float = 0.0,
seed: Optional[int] = None,
p: float = 1.0,
):
"""
@param sigma: the standard deviation of the distribution of destination
coordinates. the larger the sigma value, the more intense the transform
@param shake_radius: determines the amount by which to "shake" the video;
the larger the radius, the more intense the shake
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.sigma, self.shake_radius = sigma, shake_radius
self.seed = seed
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Apply a perspective transform to the video so it looks like it was taken
as a photo from another device (e.g. taking a video from your phone of a
video on a computer). Also has a shake factor to mimic the shakiness of
someone holding a phone
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.perspective_transform_and_shake(
video_path, output_path, self.sigma, self.shake_radius, self.seed, metadata
)
class Pixelization(BaseTransform):
def __init__(self, ratio: float = 1.0, p: float = 1.0):
"""
@param ratio: smaller values result in a more pixelated image, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.ratio = ratio
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Pixelizes the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.pixelization(
video_path,
output_path,
ratio=self.ratio,
metadata=metadata,
)
class RemoveAudio(BaseTransform):
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Removes the audio stream from a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.remove_audio(video_path, output_path, metadata=metadata)
class ReplaceWithBackground(BaseTransform):
def __init__(
self,
background_path: Optional[str] = None,
source_offset: float = 0.0,
background_offset: float = 0.0,
source_percentage: float = 0.5,
transition: Optional[af.TransitionConfig] = None,
p: float = 1.0,
):
"""
@param background_path: the path to the video in which to insert the main
video. If set to None, the main video will play in the middle of a silent
video with black frames
@param offset_factor: the point in the background video in which the main video
starts to play (this factor is multiplied by the background video duration
to determine the start point)
@param transition: optional transition config between the clips
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.background_path = background_path
self.source_offset = source_offset
self.background_offset = background_offset
self.source_percentage = source_percentage
self.transition = transition
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Puts the video in the middle of the background video
(at offset_factor * background.duration)
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.replace_with_background(
video_path,
output_path,
background_path=self.background_path,
source_offset=self.source_offset,
background_offset=self.background_offset,
source_percentage=self.source_percentage,
transition=self.transition,
metadata=metadata,
)
class ReplaceWithColorFrames(BaseTransform):
def __init__(
self,
offset_factor: float = 0.0,
duration_factor: float = 1.0,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
transition: Optional[af.TransitionConfig] = None,
p: float = 1.0,
):
"""
@param offset_factor: start point of the replacement relative to the video
duration (this parameter is multiplied by the video duration)
@param duration_factor: the length of the replacement relative to the video
duration (this parameter is multiplied by the video duration)
@param color: RGB color of the replaced frames. Default color is black
@param transition: optional transition config between the clips
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.offset_factor, self.duration_factor = offset_factor, duration_factor
self.color = color
self.transition = transition
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Replaces part of the video with frames of the specified color
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.replace_with_color_frames(
video_path,
output_path,
self.offset_factor,
self.duration_factor,
self.color,
transition=self.transition,
metadata=metadata,
)
class Resize(BaseTransform):
def __init__(
self, height: Optional[int] = None, width: Optional[int] = None, p: float = 1.0
):
"""
@param height: the height in which the video should be resized to. If None,
the original video height will be used
@param width: the width in which the video should be resized to. If None, the
original video width will be used
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.height, self.width = height, width
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Resizes a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.resize(
video_path, output_path, self.height, self.width, metadata=metadata
)
class Rotate(BaseTransform):
def __init__(self, degrees: float = 15, p: float = 1.0):
"""
@param degrees: expression for the angle by which to rotate the input video
clockwise, expressed in degrees (supports negative values as well)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.degrees = degrees
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Rotates a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.rotate(video_path, output_path, self.degrees, metadata=metadata)
class Scale(BaseTransform):
def __init__(self, factor: float = 0.5, p: float = 1.0):
"""
@param factor: the ratio by which the video should be down-scaled or upscaled
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the resolution of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.scale(video_path, output_path, self.factor, metadata=metadata)
class Shift(BaseTransform):
def __init__(
self,
x_factor: float = 0.0,
y_factor: float = 0.0,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
p: float = 1.0,
):
"""
@param x_factor: the horizontal amount that the video should be shifted,
relative to the width of the video
@param y_factor: the vertical amount that the video should be shifted,
relative to the height of the video
@param color: RGB color of the margin generated by the shift. Default color
is black
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.x_factor, self.y_factor = x_factor, y_factor
self.color = color
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Shifts the original frame position from the center by a vector
(width * x_factor, height * y_factor) and pads the rest with a
colored margin
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.shift(
video_path,
output_path,
self.x_factor,
self.y_factor,
self.color,
metadata=metadata,
)
class TimeCrop(BaseTransform):
def __init__(
self, offset_factor: float = 0.0, duration_factor: float = 1.0, p: float = 1.0
):
"""
@param offset_factor: start point of the crop relative to the video duration
(this parameter is multiplied by the video duration)
@param duration_factor: the length of the crop relative to the video duration
(this parameter is multiplied by the video duration)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.offset_factor, self.duration_factor = offset_factor, duration_factor
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Crops the video using the specified offset and duration factors
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.time_crop(
video_path,
output_path,
self.offset_factor,
self.duration_factor,
metadata=metadata,
)
class TimeDecimate(BaseTransform):
def __init__(
self,
start_offset_factor: float = 0.0,
on_factor: float = 0.2,
off_factor: float = 0.5,
transition: Optional[af.TransitionConfig] = None,
p: float = 1.0,
):
"""
@param start_offset_factor: relative to the video duration; the offset
at which to start taking "on" segments
@param on_factor: relative to the video duration; the amount of time each
"on" video chunk should be
@param off_factor: relative to the "on" duration; the amount of time each
"off" video chunk should be
@param transition: optional transition config between the clips
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.start_offset_factor = start_offset_factor
self.on_factor, self.off_factor = on_factor, off_factor
self.transition = transition
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Removes evenly sized (off) chunks, and concatenates evenly spaced (on)
chunks from the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.time_decimate(
video_path,
output_path,
start_offset_factor=self.start_offset_factor,
on_factor=self.on_factor,
off_factor=self.off_factor,
transition=self.transition,
metadata=metadata,
)
class Trim(BaseTransform):
def __init__(
self, start: Optional[float] = None, end: Optional[float] = None, p: float = 1.0
):
"""
@param start: starting point in seconds of when the trimmed video should start.
If None, start will be 0
@param end: ending point in seconds of when the trimmed video should end.
If None, the end will be the duration of the video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.start, self.end = start, end
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Trims the video using the specified start and end parameters
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.trim(video_path, output_path, self.start, self.end, metadata=metadata)
class VFlip(BaseTransform):
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Vertically flips a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.vflip(video_path, output_path, metadata=metadata)
class VStack(BaseTransform):
def __init__(
self,
second_video_path: str,
use_second_audio: bool = False,
p: float = 1.0,
):
"""
@param second_video_path: the path to the video that will be stacked on
the bottom
@param use_second_audio: if set to True, the audio of the bottom video will
be used instead of the top's
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.second_video_path = second_video_path
self.use_second_audio = use_second_audio
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Vertically stacks two videos
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.vstack(
video_path,
self.second_video_path,
output_path,
self.use_second_audio,
metadata=metadata,
)
"""
Random Transforms
These classes below are similar to the non-random transforms in the sense
where they can be used with the Compose operator, etc. However, instead of
specifying specific parameters for the augmentation, with these functions
you can specify a range (or a list) to randomly choose from instead.
Example:
>>> blur_tsfm = RandomBlur(min_radius=2.0, max_radius=5.0, p=0.5)
>>> blur_tsfm(video_path, output_path)
"""
class RandomAspectRatio(BaseRandomRangeTransform):
def __init__(self, min_ratio: float = 0.5, max_ratio: float = 2.0, p: float = 1.0):
"""
@param min_ratio: the lower value on the range of aspect ratio values to choose
from, i.e. the width/height ratio
@param max_ratio: the upper value on the range of aspect ratio values to choose
from, i.e. the width/height ratio
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_ratio, max_ratio, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly changes the aspect ratio of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.change_aspect_ratio(
video_path, output_path, ratio=self.chosen_value, metadata=metadata
)
class RandomBlur(BaseRandomRangeTransform):
def __init__(self, min_sigma: float = 0.0, max_sigma: float = 10.0, p: float = 1.0):
"""
@param min_sigma: the lower value on the range of blur values to choose from.
The larger the radius, the blurrier the video
@param max_sigma: the upper value on the range of blur values to choose from.
The larger the radius, the blurrier the video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_sigma, max_sigma, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly blurs a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.blur(
video_path, output_path, sigma=self.chosen_value, metadata=metadata
)
class RandomBrightness(BaseRandomRangeTransform):
def __init__(self, min_level: float = -1.0, max_level: float = 1.0, p: float = 1.0):
"""
@param min_level: the lower value on the range of brightness values to choose
from. The lower the factor, the darker the video
@param max_level: the upper value on the range of brightness values to choose
from. The higher the factor, the brighter the video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_level, max_level, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly changes the brightness of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.brightness(
video_path,
output_path,
level=self.chosen_value,
metadata=metadata,
)
class RandomContrast(BaseRandomRangeTransform):
def __init__(
self, min_factor: float = -5.0, max_factor: float = 5.0, p: float = 1.0
):
"""
@param min_factor: the lower value on the range of contrast values to choose
from. The lower the factor, the less contrast
@param max_factor: the upper value on the range of contrast values to choose
from. The higher the factor, the more contrast
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_factor, max_factor, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly changes the contrast of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.contrast(
video_path, output_path, level=self.chosen_value, metadata=metadata
)
class RandomEmojiOverlay(BaseTransform):
def __init__(
self,
emoji_directory: str = utils.SMILEY_EMOJI_DIR,
opacity: float = 1.0,
emoji_size: float = 0.15,
x_factor: float = 0.4,
y_factor: float = 0.4,
p: float = 1.0,
):
"""
@param emoji_directory: iopath directory uri containing the emoji images
@param opacity: the lower the opacity, the more transparent the overlaid emoji
@param emoji_size: size of the emoji is emoji_size * height of the
original video
@param x_factor: position of emoji relative to the video width
@param y_factor: position of emoji relative to the video height
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.emoji_directory = emoji_directory
self.emoji_paths = utils.pathmgr.ls(emoji_directory)
self.opacity = opacity
self.emoji_size = emoji_size
self.x_factor = x_factor
self.y_factor = y_factor
def apply_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that overlays a random emoji onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
emoji_path = random.choice(self.emoji_paths)
return F.overlay_emoji(
video_path,
output_path,
emoji_path=os.path.join(self.emoji_directory, emoji_path),
opacity=self.opacity,
emoji_size=self.emoji_size,
x_factor=self.x_factor,
y_factor=self.y_factor,
metadata=metadata,
)
class RandomEncodingQuality(BaseRandomRangeTransform):
def __init__(self, min_quality: int = 10, max_quality: int = 40, p: float = 1.0):
"""
@param min_quality: the lower value on the range of encoding quality values
to choose from. CRF scale is 0–51, where 0 is lossless, 23 is the default,
and 51 is worst quality possible. a lower value generally leads to higher
quality, and a subjectively sane range is 17–28
@param max_quality: the upper value on the range of encoding quality values to
choose from. CRF scale is 0–51, where 0 is lossless, 23 is the default, and
51 is worst quality possible. a lower value generally leads to higher
quality, and a subjectively sane range is 17–28
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_quality, max_quality, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly changes the encoding quality of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.encoding_quality(
video_path,
output_path,
quality=int(self.chosen_value),
metadata=metadata,
)
class RandomFPS(BaseRandomRangeTransform):
def __init__(self, min_fps: float = 5.0, max_fps: float = 30.0, p: float = 1.0):
"""
@param min_fps: the lower value on the range of fps values to choose from
@param max_fps: the upper value on the range of fps values to choose from. Note
that a FPS value greater than the original FPS of the video will result in
an unaltered video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_fps, max_fps, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly changes the FPS of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.fps(video_path, output_path, fps=self.chosen_value, metadata=metadata)
class RandomNoise(BaseRandomRangeTransform):
def __init__(self, min_level: int = 0, max_level: int = 50, p: float = 1.0):
"""
@param min_level: the lower value on the range of noise strength level
values to choose from. 0 indicates no change, allowed range is [0, 100]
@param max_level: the upper value on the range of noise strength level values
to choose from. 0 indicates no change, allowed range is [0, 100]
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_level, max_level, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly adds noise to a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.add_noise(
video_path, output_path, level=int(self.chosen_value), metadata=metadata
)
class RandomPixelization(BaseRandomRangeTransform):
def __init__(self, min_ratio: float = 0.1, max_ratio: float = 1.0, p: float = 1.0):
"""
@param min_ratio: the lower value on the range of pixelization ratio values to
choose from. Smaller values result in a more pixelated video, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param max_ratio: the upper value on the range of pixelization ratio values to
choose from. Smaller values result in a more pixelated video, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_ratio, max_ratio, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly pixelizes a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.pixelization(
video_path,
output_path,
ratio=self.chosen_value,
metadata=metadata,
)
class RandomRotation(BaseRandomRangeTransform):
def __init__(
self, min_degrees: float = 0.0, max_degrees: float = 180.0, p: float = 1.0
):
"""
@param min_degrees: the lower value on the range of degree values to choose from
@param max_degrees: the upper value on the range of degree values to choose from
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_degrees, max_degrees, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly rotates a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.rotate(
video_path, output_path, degrees=self.chosen_value, metadata=metadata
)
class RandomVideoSpeed(BaseRandomRangeTransform):
def __init__(
self, min_factor: float = 0.25, max_factor: float = 4.0, p: float = 1.0
):
"""
@param min_factor: the lower value on the range of speed values to choose
from. A factor less than one will slow down the video, a factor equal to
one won't alter the video, and a factor greater than one will speed up the
video relative to the original speed
@param max_factor: the upper value on the range of speed values to choose from.
A factor less than one will slow down the video, a factor equal to one won't
alter the video, and a factor greater than one will speed up the video
relative to the original speed
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_factor, max_factor, p)
def apply_random_transform(
self,
video_path: str,
output_path: str,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Transform that randomly changes the video speed
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
return F.change_video_speed(
video_path, output_path, factor=self.chosen_value, metadata=metadata
)
|
AugLy-main
|
augly/video/transforms.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.video.composition import Compose, OneOf
from augly.video.functional import (
add_noise,
apply_lambda,
audio_swap,
augment_audio,
blend_videos,
blur,
brightness,
change_aspect_ratio,
change_video_speed,
color_jitter,
concat,
contrast,
crop,
encoding_quality,
fps,
grayscale,
hflip,
hstack,
insert_in_background,
insert_in_background_multiple,
loop,
meme_format,
overlay,
overlay_dots,
overlay_emoji,
overlay_onto_background_video,
overlay_onto_screenshot,
overlay_shapes,
overlay_text,
pad,
perspective_transform_and_shake,
pixelization,
remove_audio,
replace_with_background,
replace_with_color_frames,
resize,
rotate,
scale,
shift,
time_crop,
time_decimate,
trim,
vflip,
vstack,
)
from augly.video.transforms import (
AddNoise,
ApplyLambda,
AudioSwap,
AugmentAudio,
BlendVideos,
Blur,
Brightness,
ChangeAspectRatio,
ChangeVideoSpeed,
ColorJitter,
Concat,
Contrast,
Crop,
EncodingQuality,
FPS,
Grayscale,
HFlip,
HStack,
InsertInBackground,
InsertInBackgroundMultiple,
Loop,
MemeFormat,
Overlay,
OverlayDots,
OverlayEmoji,
OverlayOntoBackgroundVideo,
OverlayOntoScreenshot,
OverlayShapes,
OverlayText,
Pad,
PerspectiveTransformAndShake,
Pixelization,
RandomAspectRatio,
RandomBlur,
RandomBrightness,
RandomContrast,
RandomEmojiOverlay,
RandomEncodingQuality,
RandomFPS,
RandomNoise,
RandomPixelization,
RandomRotation,
RandomVideoSpeed,
RemoveAudio,
ReplaceWithBackground,
ReplaceWithColorFrames,
Resize,
Rotate,
Scale,
Shift,
TimeCrop,
TimeDecimate,
Trim,
VFlip,
VStack,
)
__all__ = [
"AddNoise",
"ApplyLambda",
"AudioSwap",
"AugmentAudio",
"BlendVideos",
"Blur",
"Brightness",
"ChangeAspectRatio",
"ChangeVideoSpeed",
"ColorJitter",
"Compose",
"Concat",
"Contrast",
"Crop",
"EncodingQuality",
"FPS",
"Grayscale",
"HFlip",
"HStack",
"InsertInBackground",
"InsertInBackgroundMultiple",
"Loop",
"MemeFormat",
"OneOf",
"Overlay",
"OverlayDots",
"OverlayEmoji",
"OverlayOntoBackgroundVideo",
"OverlayOntoScreenshot",
"OverlayShapes",
"OverlayText",
"Pad",
"PerspectiveTransformAndShake",
"Pixelization",
"RandomAspectRatio",
"RandomBlur",
"RandomBrightness",
"RandomContrast",
"RandomEmojiOverlay",
"RandomEncodingQuality",
"RandomFPS",
"RandomNoise",
"RandomPixelization",
"RandomRotation",
"RandomVideoSpeed",
"RemoveAudio",
"ReplaceWithBackground",
"ReplaceWithColorFrames",
"Resize",
"Rotate",
"Scale",
"Shift",
"TimeCrop",
"TimeDecimate",
"Trim",
"VFlip",
"VStack",
"add_noise",
"apply_lambda",
"audio_swap",
"augment_audio",
"blend_videos",
"blur",
"brightness",
"change_aspect_ratio",
"change_video_speed",
"color_jitter",
"concat",
"contrast",
"crop",
"encoding_quality",
"fps",
"grayscale",
"hflip",
"hstack",
"insert_in_background",
"insert_in_background_multiple",
"replace_with_background",
"loop",
"meme_format",
"overlay",
"overlay_dots",
"overlay_emoji",
"overlay_onto_background_video",
"overlay_onto_screenshot",
"overlay_shapes",
"overlay_text",
"pad",
"perspective_transform_and_shake",
"pixelization",
"remove_audio",
"replace_with_color_frames",
"resize",
"rotate",
"scale",
"shift",
"time_crop",
"time_decimate",
"trim",
"vflip",
"vstack",
]
|
AugLy-main
|
augly/video/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import functools
import math
import os
import shutil
import tempfile
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import audio as audaugs, image as imaugs, utils
from augly.audio import utils as audutils
from augly.video import helpers, utils as vdutils
from augly.video.augmenters import cv2 as ac, ffmpeg as af
def add_noise(
video_path: str,
output_path: Optional[str] = None,
level: int = 25,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Adds noise to a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param level: noise strength for specific pixel component. Default value is
25. Allowed range is [0, 100], where 0 indicates no change
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
noise_aug = af.VideoAugmenterByNoise(level)
noise_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="add_noise", **func_kwargs
)
return output_path or video_path
def apply_lambda(
video_path: str,
output_path: Optional[str] = None,
aug_function: Callable[..., Any] = helpers.identity_function,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> str:
"""
Apply a user-defined lambda on a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param aug_function: the augmentation function to be applied onto the video
(should expect a video path and output path as input and output the augmented
video to the output path. Nothing needs to be returned)
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param **kwargs: the input attributes to be passed into `aug_function`
@returns: the path to the augmented video
"""
assert callable(aug_function), (
repr(type(aug_function).__name__) + " object is not callable"
)
func_kwargs = helpers.get_func_kwargs(
metadata, locals(), video_path, aug_function=aug_function.__name__
)
aug_function(video_path, output_path or video_path, **kwargs)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="apply_lambda", **func_kwargs
)
return output_path or video_path
def audio_swap(
video_path: str,
audio_path: str,
output_path: Optional[str] = None,
offset: float = 0.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Swaps the video audio for the audio passed in provided an offset
@param video_path: the path to the video to be augmented
@param audio_path: the iopath uri to the audio you'd like to swap with the
video's audio
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param offset: starting point in seconds such that an audio clip of offset to
offset + video_duration is used in the audio swap. Default value is zero
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
audio_swap_aug = af.VideoAugmenterByAudioSwap(audio_path, offset)
audio_swap_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="audio_swap", **func_kwargs
)
return output_path or video_path
def augment_audio(
video_path: str,
output_path: Optional[str] = None,
audio_aug_function: Callable[..., Tuple[np.ndarray, int]] = audaugs.apply_lambda,
metadata: Optional[List[Dict[str, Any]]] = None,
**audio_aug_kwargs,
) -> str:
"""
Augments the audio track of the input video using a given AugLy audio augmentation
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param audio_aug_function: the augmentation function to be applied onto the video's
audio track. Should have the standard API of an AugLy audio augmentation, i.e.
expect input audio as a numpy array or path & output path as input, and output
the augmented audio to the output path
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param audio_aug_kwargs: the input attributes to be passed into `audio_aug`
@returns: the path to the augmented video
"""
assert callable(audio_aug_function), (
repr(type(audio_aug_function).__name__) + " object is not callable"
)
func_kwargs = helpers.get_func_kwargs(
metadata, locals(), video_path, audio_aug_function=audio_aug_function
)
if audio_aug_function is not None:
try:
func_kwargs["audio_aug_function"] = audio_aug_function.__name__
except AttributeError:
func_kwargs["audio_aug_function"] = type(audio_aug_function).__name__
audio_metadata = []
with tempfile.NamedTemporaryFile(suffix=".wav") as tmpfile:
helpers.extract_audio_to_file(video_path, tmpfile.name)
audio, sr = audutils.validate_and_load_audio(tmpfile.name)
aug_audio, aug_sr = audio_aug_function(
audio, sample_rate=sr, metadata=audio_metadata, **audio_aug_kwargs
)
audutils.ret_and_save_audio(aug_audio, tmpfile.name, aug_sr)
audio_swap(video_path, tmpfile.name, output_path=output_path or video_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
audio_metadata=audio_metadata,
function_name="augment_audio",
**func_kwargs,
)
return output_path or video_path
def blend_videos(
video_path: str,
overlay_path: str,
output_path: Optional[str] = None,
opacity: float = 0.5,
overlay_size: float = 1.0,
x_pos: float = 0.0,
y_pos: float = 0.0,
use_second_audio: bool = True,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays a video onto another video at position (width * x_pos, height * y_pos)
at a lower opacity
@param video_path: the path to the video to be augmented
@param overlay_path: the path to the video that will be overlaid onto the
background video
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param opacity: the lower the opacity, the more transparent the overlaid video
@param overlay_size: size of the overlaid video is overlay_size * height of
the background video
@param x_pos: position of overlaid video relative to the background video width
@param y_pos: position of overlaid video relative to the background video height
@param use_second_audio: use the audio of the overlaid video rather than the audio
of the background video
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
blend_func = functools.partial(
imaugs.overlay_image,
opacity=opacity,
overlay_size=overlay_size,
x_pos=x_pos,
y_pos=y_pos,
)
vdutils.apply_to_frames(
blend_func, video_path, overlay_path, output_path, use_second_audio
)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="blend_videos", **func_kwargs
)
return output_path or video_path
def blur(
video_path: str,
output_path: Optional[str] = None,
sigma: float = 1,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Blurs a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param sigma: horizontal sigma, standard deviation of Gaussian blur
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
blur_aug = af.VideoAugmenterByBlur(sigma)
blur_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="blur", **func_kwargs)
return output_path or video_path
def brightness(
video_path: str,
output_path: Optional[str] = None,
level: float = 0.15,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Brightens or darkens a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param level: the value must be a float value in range -1.0 to 1.0, where a
negative value darkens and positive brightens
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
brightness_aug = af.VideoAugmenterByBrightness(level)
brightness_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="brightness", **func_kwargs
)
return output_path or video_path
def change_aspect_ratio(
video_path: str,
output_path: Optional[str] = None,
ratio: Union[float, str] = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Changes the sample aspect ratio attribute of the video, and resizes the
video to reflect the new aspect ratio
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param ratio: aspect ratio of the new video, either as a float i.e. width/height,
or as a string representing the ratio in the form "num:denom"
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
aspect_ratio_aug = af.VideoAugmenterByAspectRatio(ratio)
aspect_ratio_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="change_aspect_ratio", **func_kwargs
)
return output_path or video_path
def change_video_speed(
video_path: str,
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Changes the speed of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param factor: the factor by which to alter the speed of the video. A factor
less than one will slow down the video, a factor equal to one won't alter
the video, and a factor greater than one will speed up the video
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
speed_aug = af.VideoAugmenterBySpeed(factor)
speed_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="change_video_speed", **func_kwargs
)
return output_path or video_path
def color_jitter(
video_path: str,
output_path: Optional[str] = None,
brightness_factor: float = 0,
contrast_factor: float = 1.0,
saturation_factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Color jitters the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param brightness_factor: set the brightness expression. The value must be a
float value in range -1.0 to 1.0. The default value is 0
@param contrast_factor: set the contrast expression. The value must be a float
value in range -1000.0 to 1000.0. The default value is 1
@param saturation_factor: set the saturation expression. The value must be a float
in range 0.0 to 3.0. The default value is 1
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
color_jitter_aug = af.VideoAugmenterByColorJitter(
brightness_level=brightness_factor,
contrast_level=contrast_factor,
saturation_level=saturation_factor,
)
color_jitter_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="color_jitter", **func_kwargs
)
return output_path or video_path
def concat(
video_paths: List[str],
output_path: Optional[str] = None,
src_video_path_index: int = 0,
transition: Optional[af.TransitionConfig] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Concatenates videos together. Resizes all other videos to the size of the
`source` video (video_paths[src_video_path_index]), and modifies the sample
aspect ratios to match (ffmpeg will fail to concat if SARs don't match)
@param video_paths: a list of paths to all the videos to be concatenated (in order)
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param src_video_path_index: for metadata purposes, this indicates which video in
the list `video_paths` should be considered the `source` or original video
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(
metadata, locals(), video_paths[src_video_path_index]
)
concat_aug = af.VideoAugmenterByConcat(
video_paths,
src_video_path_index,
transition,
)
concat_aug.add_augmenter(video_paths[src_video_path_index], output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
function_name="concat",
video_path=video_paths[src_video_path_index],
**func_kwargs,
)
return output_path or video_paths[src_video_path_index]
def contrast(
video_path: str,
output_path: Optional[str] = None,
level: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the contrast of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param level: the value must be a float value in range -1000.0 to 1000.0,
where a negative value removes contrast and a positive value adds contrast
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
contrast_aug = af.VideoAugmenterByContrast(level)
contrast_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="contrast", **func_kwargs)
return output_path or video_path
def crop(
video_path: str,
output_path: Optional[str] = None,
left: float = 0.25,
top: float = 0.25,
right: float = 0.75,
bottom: float = 0.75,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Crops the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param left: left positioning of the crop; between 0 and 1, relative to
the video width
@param top: top positioning of the crop; between 0 and 1, relative to
the video height
@param right: right positioning of the crop; between 0 and 1, relative to
the video width
@param bottom: bottom positioning of the crop; between 0 and 1, relative to
the video height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
crop_aug = af.VideoAugmenterByCrop(left, top, right, bottom)
crop_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="crop", **func_kwargs)
return output_path or video_path
def encoding_quality(
video_path: str,
output_path: Optional[str] = None,
quality: int = 23,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the encoding quality of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param quality: CRF scale is 0–51, where 0 is lossless, 23 is the default,
and 51 is worst quality possible. A lower value generally leads to higher
quality, and a subjectively sane range is 17–28
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
encoding_aug = af.VideoAugmenterByQuality(quality)
encoding_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="encoding_quality", **func_kwargs
)
return output_path or video_path
def fps(
video_path: str,
output_path: Optional[str] = None,
fps: int = 15,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the FPS of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param fps: the desired output frame rate. Note that a FPS value greater than
the original FPS of the video will result in an unaltered video
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
fps_aug = af.VideoAugmenterByFPSChange(fps)
fps_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="fps", **func_kwargs)
return output_path or video_path
def grayscale(
video_path: str,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Changes a video to be grayscale
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
grayscale_aug = af.VideoAugmenterByGrayscale()
grayscale_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="grayscale", **func_kwargs
)
return output_path or video_path
def hflip(
video_path: str,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Horizontally flips a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
hflip_aug = af.VideoAugmenterByHFlip()
hflip_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="hflip", **func_kwargs)
return output_path or video_path
def hstack(
video_path: str,
second_video_path: str,
output_path: Optional[str] = None,
use_second_audio: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Horizontally stacks two videos
@param video_path: the path to the video that will be stacked to the left
@param second_video_path: the path to the video that will be stacked to the right
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param use_second_audio: if set to True, the audio of the right video will be
used instead of the left's
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
hstack_aug = af.VideoAugmenterByStack(second_video_path, use_second_audio, "hstack")
hstack_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="hstack", **func_kwargs)
return output_path or video_path
def insert_in_background(
video_path: str,
output_path: Optional[str] = None,
background_path: Optional[str] = None,
offset_factor: float = 0.0,
source_percentage: Optional[float] = None,
seed: Optional[int] = None,
transition: Optional[af.TransitionConfig] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Puts the video in the middle of the background video
(at offset_factor * background.duration)
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param background_path: the path to the video in which to insert the main
video. If set to None, the main video will play in the middle of a silent
video with black frames
@param offset_factor: the point in the background video in which the main video
starts to play (this factor is multiplied by the background video duration to
determine the start point)
@param source_percentage: when set, source_percentage of the duration
of the final video (background + source) will be taken up by the
source video. Randomly crops the background video to the correct duration.
If the background video isn't long enough to get the desired source_percentage,
it will be looped.
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution including
its name, the source & dest duration, fps, etc. will be appended to the inputted
list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
assert (
0.0 <= offset_factor <= 1.0
), "Offset factor must be a value in the range [0.0, 1.0]"
if source_percentage is not None:
assert (
0.0 <= source_percentage <= 1.0
), "Source percentage must be a value in the range [0.0, 1.0]"
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
local_path = utils.pathmgr.get_local_path(video_path)
utils.validate_video_path(local_path)
video_info = helpers.get_video_info(local_path)
video_duration = float(video_info["duration"])
width, height = video_info["width"], video_info["height"]
rng = np.random.RandomState(seed) if seed is not None else np.random
video_paths = []
with tempfile.TemporaryDirectory() as tmpdir:
tmp_video_path = os.path.join(tmpdir, "in.mp4")
resized_bg_path = os.path.join(tmpdir, "bg.mp4")
helpers.add_silent_audio(video_path, tmp_video_path)
if background_path is None:
helpers.create_color_video(resized_bg_path, video_duration, height, width)
else:
resize(background_path, resized_bg_path, height, width)
bg_video_info = helpers.get_video_info(resized_bg_path)
bg_video_duration = float(bg_video_info["duration"])
bg_start = 0
bg_end = bg_video_duration
desired_bg_duration = bg_video_duration
if source_percentage is not None:
# desired relationship: percent * (bg_len + s_len) = s_len
# solve for bg_len -> bg_len = s_len / percent - s_len
desired_bg_duration = video_duration / source_percentage - video_duration
# if background vid isn't long enough, loop
num_loops_needed = math.ceil(desired_bg_duration / bg_video_duration)
if num_loops_needed > 1:
loop(resized_bg_path, num_loops=num_loops_needed)
bg_video_duration *= num_loops_needed
bg_start = rng.uniform(0, bg_video_duration - desired_bg_duration)
bg_end = bg_start + desired_bg_duration
offset = desired_bg_duration * offset_factor
transition_before = False
if offset > 0:
before_path = os.path.join(tmpdir, "before.mp4")
trim(resized_bg_path, before_path, start=bg_start, end=bg_start + offset)
video_paths.append(before_path)
src_video_path_index = 1
transition_before = True
else:
src_video_path_index = 0
video_paths.append(tmp_video_path)
transition_after = False
if bg_start + offset < bg_end:
after_path = os.path.join(tmpdir, "after.mp4")
trim(resized_bg_path, after_path, start=bg_start + offset, end=bg_end)
video_paths.append(after_path)
transition_after = True
concat(
video_paths,
output_path or video_path,
src_video_path_index,
transition=transition,
)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
function_name="insert_in_background",
background_video_duration=desired_bg_duration,
transition_before=transition_before,
transition_after=transition_after,
**func_kwargs,
)
return output_path or video_path
def insert_in_background_multiple(
video_path: str,
output_path: str,
background_path: str,
src_ids: List[str],
additional_video_paths: List[str],
seed: Optional[int] = None,
min_source_segment_duration: float = 5.0,
max_source_segment_duration: float = 20.0,
min_background_segment_duration: float = 2.0,
min_result_video_duration: float = 30.0,
max_result_video_duration: float = 60.0,
transition: Optional[af.TransitionConfig] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Places the video (and the additional videos) in the middle of the background video.
@param video_path: the path of the main video to be augmented.
@param output_path: the path in which the output video will be stored.
@param background_path: the path of the video in which to insert the main
(and additional) video.
@param src_ids: the list of identifiers for the main video and additional videos.
@param additional_video_paths: list of additional video paths to be
inserted alongside the main video; one clip from each of the input
videos will be inserted in order.
@param seed: if provided, this will set the random seed to ensure consistency
between runs.
@param min_source_segment_duration: minimum duration in seconds of the source
segments that will be inserted in the background video.
@param max_source_segment_duration: maximum duration in seconds of the source
segments that will be inserted in the background video.
@param min_background_segment_duration: minimum duration in seconds of a background segment.
@param min_result_video_duration: minimum duration in seconds of the output video.
@param max_result_video_duration: maximum duration in seconds of the output video.
@param transition: optional transition configuration to apply between the clips.
@param metadata: if set to be a list, metadata about the function execution including
its name, the source & dest duration, fps, etc. will be appended to the inputted
list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
if additional_video_paths:
assert len(additional_video_paths) + 1 == len(
src_ids
), "src_ids need to be specified for the main video and all additional videos."
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
rng = np.random.RandomState(seed) if seed is not None else np.random
local_path = utils.pathmgr.get_local_path(video_path)
additional_local_paths = (
[utils.pathmgr.get_local_path(p) for p in additional_video_paths]
if additional_video_paths
else []
)
bkg_local_path = utils.pathmgr.get_local_path(background_path)
src_paths = [
local_path,
] + additional_local_paths
src_video_durations = np.array(
[float(helpers.get_video_info(v)["duration"]) for v in src_paths]
)
bkg_duration = float(helpers.get_video_info(bkg_local_path)["duration"])
src_segment_durations = (
rng.random_sample(len(src_video_durations))
* (max_source_segment_duration - min_source_segment_duration)
+ min_source_segment_duration
)
src_segment_durations = np.minimum(src_segment_durations, src_video_durations)
src_segment_starts = rng.random(len(src_video_durations)) * (
src_video_durations - src_segment_durations
)
src_segment_ends = src_segment_starts + src_segment_durations
sum_src_duration = np.sum(src_segment_durations)
required_result_duration = (
len(src_segment_durations) + 1
) * min_background_segment_duration + sum_src_duration
if required_result_duration > max_result_video_duration:
raise ValueError(
"Failed to generate config for source segments in insert_in_background_multiple."
)
duration_budget = max_result_video_duration - required_result_duration
bkg_budget = rng.random() * duration_budget
overall_bkg_needed_duration = (
len(src_segment_durations) + 1
) * min_background_segment_duration + bkg_budget
num_loops_needed = 0
if overall_bkg_needed_duration > bkg_duration:
num_loops_needed = math.ceil(overall_bkg_needed_duration / bkg_duration)
# Now sample insertion points by picking len(src_segment_durations) points in the interval [0, bkg_budget)
# Then sort the segments and add spacing for the minimum background segment duration.
bkg_insertion_points = (
np.sort(rng.random(len(src_segment_durations)) * bkg_budget)
+ np.arange(len(src_segment_durations)) * min_background_segment_duration
)
last_bkg_point = overall_bkg_needed_duration
dst_starts = bkg_insertion_points + np.concatenate(
(
[
0.0,
],
np.cumsum(src_segment_durations)[:-1],
)
)
# Start applying transforms.
with tempfile.TemporaryDirectory() as tmpdir:
# First, loop through background video if needed.
if num_loops_needed > 0:
buf = os.path.join(tmpdir, "bkg_loop.mp4")
loop(bkg_local_path, buf, num_loops=num_loops_needed)
bkg_path = buf
else:
bkg_path = bkg_local_path
bkg_videos = []
# Sample background segments.
prev = 0.0
for i, pt in enumerate(bkg_insertion_points):
out_path = os.path.join(tmpdir, f"bkg_{i}.mp4")
trim(bkg_path, out_path, start=prev, end=pt)
prev = pt
bkg_videos.append(out_path)
# last background segment
last_bkg_path = os.path.join(tmpdir, "bkg_last.mp4")
trim(bkg_path, last_bkg_path, start=prev, end=last_bkg_point)
src_videos = []
# Sample source segments.
for i, seg in enumerate(zip(src_segment_starts, src_segment_ends)):
out_path = os.path.join(tmpdir, f"src_{i}.mp4")
trim(src_paths[i], out_path, start=seg[0], end=seg[1])
src_videos.append(out_path)
all_videos = [v for pair in zip(bkg_videos, src_videos) for v in pair] + [
last_bkg_path,
]
concat(all_videos, output_path, 1, transition=transition)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
function_name="insert_in_background_multiple",
src_segment_starts=src_segment_starts,
src_segment_ends=src_segment_ends,
bkg_insertion_points=bkg_insertion_points,
**func_kwargs,
)
return output_path
def replace_with_background(
video_path: str,
output_path: Optional[str] = None,
background_path: Optional[str] = None,
source_offset: float = 0.0,
background_offset: float = 0.0,
source_percentage: float = 0.5,
transition: Optional[af.TransitionConfig] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Replaces the beginning and end of the source video with the background video, keeping the
total duration of the output video equal to the original duration of the source video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored. If not
passed in, the original video file will be overwritten
@param background_path: the path to the video in which to insert the main video.
If set to None, the main video will play in the middle of a silent video with
black frames
@param source_offset: the starting point where the background video transitions to
the source video. Prior to this point, the source video is replaced with the
background video. A value of 0 means all background is at the beginning. A value
of 1 means all background is at the end of the video
@param background_offset: the starting point from which the background video starts
to play, as a proportion of the background video duration (i.e. this factor is
multiplied by the background video duration to determine the start point)
@param source_percentage: the percentage of the source video that remains unreplaced
by the background video. The source percentage plus source offset should be less
than 1. If it is greater, the output video duration will be longer than the source.
If the background video is not long enough to get the desired source percentage,
it will be looped
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution including
its name, the source & dest duration, fps, etc. will be appended to the inputted
list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
assert (
0.0 <= source_offset <= 1.0
), "Source offset factor must be a value in the range [0.0, 1.0]"
assert (
0.0 <= background_offset <= 1.0
), "Background offset factor must be a value in the range [0.0, 1.0]"
assert (
0.0 <= source_percentage <= 1.0
), "Source percentage must be a value in the range [0.0, 1.0]"
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
local_path = utils.pathmgr.get_local_path(video_path)
utils.validate_video_path(local_path)
video_info = helpers.get_video_info(video_path)
video_duration = float(video_info["duration"])
width, height = video_info["width"], video_info["height"]
video_paths = []
with tempfile.TemporaryDirectory() as tmpdir:
tmp_video_path = os.path.join(tmpdir, "in.mp4")
resized_bg_path = os.path.join(tmpdir, "bg.mp4")
# create bg video
if background_path is None:
helpers.create_color_video(resized_bg_path, video_duration, height, width)
else:
resize(background_path, resized_bg_path, height, width)
helpers.add_silent_audio(resized_bg_path)
bg_video_info = helpers.get_video_info(resized_bg_path)
bg_video_duration = float(bg_video_info["duration"])
src_video_path_index = 1
final_bg_len = video_duration * (1 - source_percentage)
# if desired bg video too short, loop bg video
num_loops_needed = math.ceil(final_bg_len / bg_video_duration)
if num_loops_needed > 1:
loop(resized_bg_path, num_loops=num_loops_needed)
first_bg_segment_len = source_offset * final_bg_len
last_bg_segment_len = final_bg_len - first_bg_segment_len
# calculate bg start and end times of bg in output video
bg_start = background_offset * bg_video_duration
src_start = first_bg_segment_len
src_length = source_percentage * video_duration
src_end = src_start + src_length
# add pre src background segment
if source_offset > 0:
before_path = os.path.join(tmpdir, "before.mp4")
trim(
resized_bg_path,
before_path,
start=bg_start,
end=bg_start + first_bg_segment_len,
)
video_paths.append(before_path)
src_video_path_index = 1
else:
src_video_path_index = 0
# trim source to length satisfying source_percentage
helpers.add_silent_audio(video_path, tmp_video_path)
trimmed_src_path = os.path.join(tmpdir, "trim_src.mp4")
trim(tmp_video_path, trimmed_src_path, start=src_start, end=src_end)
video_paths.append(trimmed_src_path)
# add post src background segment
if source_offset < 1:
after_path = os.path.join(tmpdir, "after.mp4")
trim(
resized_bg_path,
after_path,
start=bg_start + src_start,
end=bg_start + src_start + last_bg_segment_len,
)
video_paths.append(after_path)
concat(
video_paths,
output_path or video_path,
src_video_path_index,
transition=transition,
)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
function_name="replace_with_background",
starting_background_duration=first_bg_segment_len,
source_duration=src_length,
ending_background_duration=last_bg_segment_len,
**func_kwargs,
)
return output_path or video_path
def loop(
video_path: str,
output_path: Optional[str] = None,
num_loops: int = 0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Loops a video `num_loops` times
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param num_loops: the number of times to loop the video. 0 means that the video
will play once (i.e. no loops)
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
loop_aug = af.VideoAugmenterByLoops(num_loops)
loop_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="loop", **func_kwargs)
return output_path or video_path
def meme_format(
video_path: str,
output_path: Optional[str] = None,
text: str = "LOL",
font_file: str = utils.MEME_DEFAULT_FONT,
opacity: float = 1.0,
text_color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
caption_height: int = 250,
meme_bg_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Creates a new video that looks like a meme, given text and video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param text: the text to be overlaid/used in the meme. note: if using a very
long string, please add in newline characters such that the text remains
in a readable font size
@param font_file: iopath uri to the .ttf font file
@param opacity: the lower the opacity, the more transparent the text
@param text_color: color of the text in RGB values
@param caption_height: the height of the meme caption
@param meme_bg_color: background color of the meme caption in RGB values
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
meme_func = functools.partial(
imaugs.meme_format,
text=text,
font_file=font_file,
opacity=opacity,
text_color=text_color,
caption_height=caption_height,
meme_bg_color=meme_bg_color,
)
vdutils.apply_to_each_frame(meme_func, video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="meme_format", **func_kwargs
)
return output_path or video_path
def overlay(
video_path: str,
overlay_path: str,
output_path: Optional[str] = None,
overlay_size: Optional[float] = None,
x_factor: float = 0.0,
y_factor: float = 0.0,
use_overlay_audio: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays media onto the video at position (width * x_factor, height * y_factor)
@param video_path: the path to the video to be augmented
@param overlay_path: the path to the media (image or video) that will be
overlaid onto the video
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param overlay_size: size of the overlaid media with respect to the background
video. If set to None, the original size of the overlaid media is used
@param x_factor: specifies where the left side of the overlaid media should be
placed, relative to the video width
@param y_factor: specifies where the top side of the overlaid media should be
placed, relative to the video height
@param use_overlay_audio: if set to True and the media type is a video, the audio
of the overlaid video will be used instead of the main/background video's audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
local_path = utils.pathmgr.get_local_path(video_path)
overlay_path = utils.pathmgr.get_local_path(overlay_path)
tmp_overlay_path = None
if overlay_size is not None:
assert 0 < overlay_size <= 1, "overlay_size must be a value in the range (0, 1]"
video_info = helpers.get_video_info(local_path)
overlay_h = int(video_info["height"] * overlay_size)
overlay_w = int(video_info["width"] * overlay_size)
_, tmp_overlay_path = tempfile.mkstemp(suffix=os.path.splitext(overlay_path)[1])
if utils.is_image_file(overlay_path):
imaugs.resize(overlay_path, tmp_overlay_path, overlay_w, overlay_h)
else:
resize(overlay_path, tmp_overlay_path, overlay_h, overlay_w)
overlay_aug = af.VideoAugmenterByOverlay(
tmp_overlay_path or overlay_path, x_factor, y_factor, use_overlay_audio
)
overlay_aug.add_augmenter(local_path, output_path)
if tmp_overlay_path:
os.remove(tmp_overlay_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="overlay", **func_kwargs)
return output_path or video_path
def overlay_dots(
video_path: str,
output_path: Optional[str] = None,
num_dots: int = 100,
dot_type: str = "colored",
random_movement: bool = True,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> str:
"""
Overlays dots onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param num_dots: the number of dots to add to each frame
@param dot_type: specify if you would like "blur" or "colored"
@param random_movement: whether or not you want the dots to randomly move around
across the frame or to move across in a "linear" way
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
dots_aug = ac.VideoDistractorByDots(num_dots, dot_type, random_movement)
vdutils.apply_cv2_augmenter(dots_aug, video_path, output_path, **kwargs)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_dots", **func_kwargs
)
return output_path or video_path
def overlay_emoji(
video_path: str,
output_path: Optional[str] = None,
emoji_path: str = utils.EMOJI_PATH,
x_factor: float = 0.4,
y_factor: float = 0.4,
opacity: float = 1.0,
emoji_size: float = 0.15,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays an emoji onto each frame of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param emoji_path: iopath uri to the emoji image
@param x_factor: specifies where the left side of the emoji should be placed,
relative to the video width
@param y_factor: specifies where the top side of the emoji should be placed,
relative to the video height
@param opacity: opacity of the emoji image
@param emoji_size: emoji size relative to the height of the video frame
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
local_path = utils.pathmgr.get_local_path(video_path)
utils.validate_video_path(video_path)
video_info = helpers.get_video_info(local_path)
with tempfile.TemporaryDirectory() as tmpdir:
local_emoji_path = utils.pathmgr.get_local_path(emoji_path, cache_dir=tmpdir)
utils.validate_image_path(local_emoji_path)
emoji_output_path = os.path.join(tmpdir, "modified_emoji.png")
imaugs.resize(
local_emoji_path,
output_path=emoji_output_path,
height=int(emoji_size * video_info["height"]),
width=int(emoji_size * video_info["height"]),
)
imaugs.opacity(emoji_output_path, output_path=emoji_output_path, level=opacity)
overlay(
video_path,
emoji_output_path,
output_path,
x_factor=x_factor,
y_factor=y_factor,
)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_emoji", **func_kwargs
)
return output_path or video_path
def overlay_onto_background_video(
video_path: str,
background_path: str,
output_path: Optional[str] = None,
overlay_size: Optional[float] = 0.7,
x_factor: float = 0.0,
y_factor: float = 0.0,
use_background_audio: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays the video onto a background video, pointed to by background_path.
@param video_path: the path to the video to be augmented
@param background_path: the path to the background video
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param overlay_size: size of the overlaid media with respect to the background
video. If set to None, the original size of the overlaid media is used
@param x_factor: specifies where the left side of the overlaid media should be
placed, relative to the video width
@param y_factor: specifies where the top side of the overlaid media should be
placed, relative to the video height
@param use_background_audio: if set to True and the media type is a video, the
audio of the background video will be used instead of the src video's audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
overlay(
video_path=background_path,
overlay_path=video_path,
output_path=output_path or video_path,
overlay_size=overlay_size,
x_factor=x_factor,
y_factor=y_factor,
use_overlay_audio=not use_background_audio,
)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
function_name="overlay_onto_background_video",
**func_kwargs,
)
return output_path or video_path
def overlay_onto_screenshot(
video_path: str,
output_path: Optional[str] = None,
template_filepath: str = utils.TEMPLATE_PATH,
template_bboxes_filepath: str = utils.BBOXES_PATH,
max_image_size_pixels: Optional[int] = None,
crop_src_to_fit: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays the video onto a screenshot template so it looks like it was
screen-recorded on Instagram
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param template_filepath: iopath uri to the screenshot template
@param template_bboxes_filepath: iopath uri to the file containing the bounding
box for each template
@param max_image_size_pixels: if provided, the template image and/or src video
will be scaled down to avoid an output image with an area greater than this
size (in pixels)
@param crop_src_to_fit: if True, the src image will be cropped if necessary to
fit into the template image. If False, the src image will instead be resized
if necessary
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
sc_func = functools.partial(
imaugs.overlay_onto_screenshot,
template_filepath=template_filepath,
template_bboxes_filepath=template_bboxes_filepath,
max_image_size_pixels=max_image_size_pixels,
crop_src_to_fit=crop_src_to_fit,
)
vdutils.apply_to_each_frame(sc_func, video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_onto_screenshot", **func_kwargs
)
return output_path or video_path
def overlay_shapes(
video_path: str,
output_path: Optional[str] = None,
num_shapes: int = 1,
shape_type: Optional[str] = None,
colors: Optional[List[Tuple[int, int, int]]] = None,
thickness: Optional[int] = None,
radius: Optional[float] = None,
random_movement: bool = True,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> str:
"""
Overlays random shapes onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param num_shapes: the number of shapes to add to each frame
@param shape_type: specify if you would like circles or rectangles
@param colors: list of (R, G, B) colors to sample from
@param thickness: specify the thickness desired for the shapes
@param random_movement: whether or not you want the shapes to randomly move
around across the frame or to move across in a "linear" way
@param topleft: specifying the boundary of shape region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param bottomright: specifying the boundary of shape region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
shapes_aug = ac.VideoDistractorByShapes(
num_shapes,
shape_type,
colors,
thickness,
radius,
random_movement,
topleft,
bottomright,
)
vdutils.apply_cv2_augmenter(shapes_aug, video_path, output_path, **kwargs)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_shapes", **func_kwargs
)
return output_path or video_path
def overlay_text(
video_path: str,
output_path: Optional[str] = None,
text_len: int = 10,
text_change_nth: Optional[int] = None,
fonts: Optional[List[Tuple[Any, Optional[str]]]] = None,
fontscales: Optional[Tuple[float, float]] = None,
colors: Optional[List[Tuple[int, int, int]]] = None,
thickness: Optional[int] = None,
random_movement: bool = False,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> str:
"""
Overlays random text onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param text_len: length of string for randomized texts.
@param text_change_nth: change random text every nth frame. None means
using same text for all frames.
@param fonts: list of fonts to sample from. Each font can be a cv2 fontFace,
a PIL ImageFont, or a path to a PIL ImageFont file. Each font is coupled with
a chars file (the second item in the tuple) - a path to a file which contains
the characters associated with the given font. For example, non-western
alphabets have different valid characters than the roman alphabet, and these
must be specified in order to construct random valid text in that font. If the
chars file path is None, the roman alphabet will be used.
@param fontscales: 2-tuple of float (min_scale, max_scale).
@param colors: list of (R, G, B) colors to sample from
@param thickness: specify the thickness desired for the text.
@param random_movement: whether or not you want the text to randomly move around
across frame or to move across in a "linear" way
@param topleft: specifying the boundary of text region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param bottomright: specifying the boundary of text region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
text_aug = ac.VideoDistractorByText(
text_len,
text_change_nth,
fonts,
fontscales,
colors,
thickness,
random_movement,
topleft,
bottomright,
)
vdutils.apply_cv2_augmenter(text_aug, video_path, output_path, **kwargs)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_text", **func_kwargs
)
return output_path or video_path
def pad(
video_path: str,
output_path: Optional[str] = None,
w_factor: float = 0.25,
h_factor: float = 0.25,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Pads the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param w_factor: pad right and left with w_factor * frame width
@param h_factor: pad bottom and top with h_factor * frame height
@param color: RGB color of the padded margin
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
pad_aug = af.VideoAugmenterByPadding(w_factor, h_factor, color)
pad_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="pad", **func_kwargs)
return output_path or video_path
def perspective_transform_and_shake(
video_path: str,
output_path: Optional[str] = None,
sigma: float = 50.0,
shake_radius: float = 0.0,
seed: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Apply a perspective transform to the video so it looks like it was taken
as a photo from another device (e.g. taking a video from your phone of a
video on a computer). Also has a shake factor to mimic the shakiness of
someone holding a phone.
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param sigma: the standard deviation of the distribution of destination coordinates.
The larger the sigma value, the more intense the transform
@param shake_radius: determines the amount by which to "shake" the video; the larger
the radius, the more intense the shake.
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
perspective_func = functools.partial(
imaugs.perspective_transform, sigma=sigma, seed=seed
)
duration = float(helpers.get_video_info(video_path)["duration"])
rng = np.random.RandomState(seed) if seed is not None else np.random
def get_dx_dy(frame_number: int) -> Dict:
u = math.sin(frame_number / duration * math.pi) ** 2
return {
"dx": u * rng.normal(0, shake_radius),
"dy": u * rng.normal(0, shake_radius),
}
vdutils.apply_to_each_frame(perspective_func, video_path, output_path, get_dx_dy)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
function_name="perspective_transform_and_shake",
**func_kwargs,
)
return output_path or video_path
def pixelization(
video_path: str,
output_path: Optional[str] = None,
ratio: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Pixelizes the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param ratio: smaller values result in a more pixelated video, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
assert ratio > 0, "Expected 'ratio' to be a positive number"
video_info = helpers.get_video_info(video_path)
width, height = video_info["width"], video_info["height"]
output_path = output_path or video_path
resize(video_path, output_path, height * ratio, width * ratio)
resize(output_path, output_path, height, width)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="pixelization", **func_kwargs
)
return output_path or video_path
def remove_audio(
video_path: str,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Removes the audio stream from a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
remove_audio_aug = af.VideoAugmenterByRemovingAudio()
remove_audio_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="remove_audio", **func_kwargs
)
return output_path or video_path
def replace_with_color_frames(
video_path: str,
output_path: Optional[str] = None,
offset_factor: float = 0.0,
duration_factor: float = 1.0,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
transition: Optional[af.TransitionConfig] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Replaces part of the video with frames of the specified color
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param offset_factor: start point of the replacement relative to the video
duration (this parameter is multiplied by the video duration)
@param duration_factor: the length of the replacement relative to the video
duration (this parameter is multiplied by the video duration)
@param color: RGB color of the replaced frames. Default color is black
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
utils.validate_video_path(video_path)
assert (
0.0 <= offset_factor <= 1.0 and 0.0 <= duration_factor <= 1.0
), "Both offset & duration factors must be values in the range [0.0, 1.0]"
func_kwargs = {
**helpers.get_func_kwargs(metadata, locals(), video_path),
"function_name": "replace_with_color_frames",
}
video_info = helpers.get_video_info(video_path)
video_duration = float(video_info["duration"])
width, height = video_info["width"], video_info["height"]
offset = video_duration * offset_factor
duration = video_duration * duration_factor
output_path = output_path or video_path
if duration == 0 or offset == video_duration:
if output_path != video_path:
shutil.copy(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, **func_kwargs)
return output_path or video_path
video_paths = []
src_video_path_index = 0 if offset > 0 else 2
with tempfile.TemporaryDirectory() as tmpdir:
color_duration = (
video_duration - offset if offset + duration >= video_duration else duration
)
color_path = os.path.join(tmpdir, "color_frames.mp4")
helpers.create_color_video(color_path, color_duration, height, width, color)
if helpers.has_audio_stream(video_path):
audio_path = os.path.join(tmpdir, "audio.aac")
helpers.extract_audio_to_file(video_path, audio_path)
audio_swap(color_path, audio_path, offset=offset)
if offset_factor == 0 and duration_factor == 1.0:
shutil.copy(color_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, **func_kwargs)
return output_path or video_path
if offset > 0:
before_path = os.path.join(tmpdir, "before.mp4")
trim(video_path, before_path, end=offset)
video_paths.append(before_path)
video_paths.append(color_path)
if offset + duration < video_duration:
after_path = os.path.join(tmpdir, "after.mp4")
trim(video_path, after_path, start=offset + duration)
video_paths.append(after_path)
concat(
video_paths,
output_path,
src_video_path_index=src_video_path_index,
transition=transition,
)
if metadata is not None:
helpers.get_metadata(metadata=metadata, **func_kwargs)
return output_path or video_path
def resize(
video_path: str,
output_path: Optional[str] = None,
height: Union[int, str] = "ih",
width: Union[int, str] = "iw",
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Resizes a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param height: the height in which the video should be resized to. If not specified,
the original video height will be used
@param width: the width in which the video should be resized to. If not specified,
the original video width will be used
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
resize_aug = af.VideoAugmenterByResize(height, width)
resize_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="resize", **func_kwargs)
return output_path or video_path
def rotate(
video_path: str,
output_path: Optional[str] = None,
degrees: float = 15.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Rotates a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param degrees: expression for the angle by which to rotate the input video
clockwise, expressed in degrees (supports negative values as well)
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
rotate_aug = af.VideoAugmenterByRotation(degrees)
rotate_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="rotate", **func_kwargs)
return output_path or video_path
def scale(
video_path: str,
output_path: Optional[str] = None,
factor: float = 0.5,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the resolution of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param factor: the ratio by which the video should be downscaled or upscaled
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
scale_aug = af.VideoAugmenterByResolution(factor)
scale_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="scale", **func_kwargs)
return output_path or video_path
def shift(
video_path: str,
output_path: Optional[str] = None,
x_factor: float = 0.0,
y_factor: float = 0.0,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Shifts the original frame position from the center by a vector
(width * x_factor, height * y_factor) and pads the rest with a
colored margin
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param x_factor: the horizontal amount that the video should be shifted,
relative to the width of the video
@param y_factor: the vertical amount that the video should be shifted,
relative to the height of the video
@param color: RGB color of the margin generated by the shift. Default color is black
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
utils.validate_video_path(video_path)
video_info = helpers.get_video_info(video_path)
with tempfile.TemporaryDirectory() as tmpdir:
background_path = os.path.join(tmpdir, "background.mp4")
helpers.create_color_video(
background_path,
float(video_info["duration"]),
video_info["height"],
video_info["width"],
color,
)
overlay(
background_path,
video_path,
output_path,
x_factor=x_factor,
y_factor=y_factor,
use_overlay_audio=True,
)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="shift", **func_kwargs)
return output_path or video_path
def time_crop(
video_path: str,
output_path: Optional[str] = None,
offset_factor: float = 0.0,
duration_factor: float = 1.0,
minimum_duration: float = 0.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Crops the video using the specified offset and duration factors
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param offset_factor: start point of the crop relative to the video duration
(this parameter is multiplied by the video duration)
@param duration_factor: the length of the crop relative to the video duration
(this parameter is multiplied by the video duration)
@param minimum_duration: the minimum duration of a segment selected
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
time_crop_aug = af.VideoAugmenterByTrim(
offset_factor=offset_factor,
duration_factor=duration_factor,
minimum_duration=minimum_duration,
)
time_crop_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="time_crop", **func_kwargs
)
return output_path or video_path
def time_decimate(
video_path: str,
output_path: Optional[str] = None,
start_offset_factor: float = 0.0,
on_factor: float = 0.2,
off_factor: float = 0.5,
transition: Optional[af.TransitionConfig] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Removes evenly sized (off) chunks, and concatenates evenly spaced (on)
chunks from the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param start_offset_factor: relative to the video duration; the offset
at which to start taking "on" segments
@param on_factor: relative to the video duration; the amount of time each
"on" video chunk should be
@param off_factor: relative to the "on" duration; the amount of time each
"off" video chunk should be
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
assert (
0 <= start_offset_factor < 1
), f"start_offset_factor value {start_offset_factor} must be in the range [0, 1)"
assert 0 < on_factor <= 1, "on_factor must be a value in the range (0, 1]"
assert 0 <= off_factor <= 1, "off_factor must be a value in the range [0, 1]"
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
local_path = utils.pathmgr.get_local_path(video_path)
utils.validate_video_path(local_path)
video_info = helpers.get_video_info(local_path)
_, video_ext = os.path.splitext(local_path)
duration = float(video_info["duration"])
start_offset = duration * start_offset_factor
on_segment = duration * on_factor
off_segment = on_segment * off_factor
subclips = []
n = int((duration - start_offset) / (on_segment + off_segment))
# let a = on_segment and b = off_segment
# subclips: 0->a, a+b -> 2*a + b, 2a+2b -> 3a+2b, .., na+nb -> (n+1)a + nb
with tempfile.TemporaryDirectory() as tmpdir:
for i in range(n):
clip_path = os.path.join(tmpdir, f"{i}{video_ext}")
trim(
video_path,
clip_path,
start=start_offset + i * on_segment + i * off_segment,
end=min(
duration, start_offset + (i + 1) * on_segment + i * off_segment
),
)
subclips.append(clip_path)
# Skip concatenation if only 1 clip.
if n > 1:
concat(
subclips,
output_path,
transition=transition,
)
else:
if output_path is not None:
shutil.copy(subclips[0], output_path)
else:
shutil.copy(subclips[0], local_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="time_decimate", **func_kwargs
)
return output_path or video_path
def trim(
video_path: str,
output_path: Optional[str] = None,
start: Optional[float] = None,
end: Optional[float] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Trims the video using the specified start and end parameters
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param start: starting point in seconds of when the trimmed video should start.
If None, start will be 0
@param end: ending point in seconds of when the trimmed video should end.
If None, the end will be the duration of the video
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
trim_aug = af.VideoAugmenterByTrim(start=start, end=end)
trim_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="trim", **func_kwargs)
return output_path or video_path
def vflip(
video_path: str,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Vertically flips a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
vflip_aug = af.VideoAugmenterByVFlip()
vflip_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="vflip", **func_kwargs)
return output_path or video_path
def vstack(
video_path: str,
second_video_path: str,
output_path: Optional[str] = None,
use_second_audio: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Vertically stacks two videos
@param video_path: the path to the video that will be stacked on top
@param second_video_path: the path to the video that will be stacked on the bottom
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param use_second_audio: if set to True, the audio of the bottom video will be used
instead of the top's
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
vstack_aug = af.VideoAugmenterByStack(second_video_path, use_second_audio, "vstack")
vstack_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="vstack", **func_kwargs)
return output_path or video_path
|
AugLy-main
|
augly/video/functional.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import functools
import os
import shutil
import tempfile
from typing import Callable, Dict, List, Optional, Union
from augly import utils
from augly.video import helpers as helpers
from augly.video.augmenters import cv2 as ac
"""
Utility Functions: Augmentation Application Functions
- For FFMPEG-Based Functions
- For CV2-Based Functions
- For Applying Image Functions to Each Frame
"""
def apply_to_each_frame(
img_aug_function: functools.partial,
video_path: str,
output_path: Optional[str],
frame_func: Optional[Callable[[int], Dict]] = None,
) -> None:
video_path, output_path = helpers.validate_input_and_output_paths(
video_path, output_path
)
with tempfile.TemporaryDirectory() as tmpdir:
frame_dir = os.path.join(tmpdir, "video_frames")
os.mkdir(frame_dir)
helpers.extract_frames_to_dir(video_path, frame_dir)
for i, frame_file in enumerate(os.listdir(frame_dir)):
frame_path = os.path.join(frame_dir, frame_file)
kwargs = frame_func(i) if frame_func is not None else {}
img_aug_function(frame_path, output_path=frame_path, **kwargs)
audio_path = None
if helpers.has_audio_stream(video_path):
audio_path = os.path.join(tmpdir, "audio.aac")
helpers.extract_audio_to_file(video_path, audio_path)
helpers.combine_frames_and_audio_to_file(
os.path.join(frame_dir, "raw_frame*.jpg"),
audio_path,
output_path,
helpers.get_video_fps(video_path) or utils.DEFAULT_FRAME_RATE,
)
def apply_to_frames(
img_aug_function: functools.partial,
video_path: str,
second_video_path: str,
output_path: Optional[str],
use_second_audio: bool = False,
) -> None:
video_path, output_path = helpers.validate_input_and_output_paths(
video_path, output_path
)
second_video_path = helpers.get_local_path(second_video_path)
with tempfile.TemporaryDirectory() as tmpdir:
frame_dir = os.path.join(tmpdir, "video_frames")
os.mkdir(frame_dir)
helpers.extract_frames_to_dir(video_path, frame_dir)
second_frame_dir = os.path.join(tmpdir, "second_video_frames")
os.mkdir(second_frame_dir)
helpers.extract_frames_to_dir(second_video_path, second_frame_dir)
for frame_file, second_frame_file in zip(
os.listdir(frame_dir), os.listdir(second_frame_dir)
):
img_aug_function(
os.path.join(frame_dir, frame_file),
os.path.join(second_frame_dir, second_frame_file),
output_path=os.path.join(frame_dir, frame_file),
)
audio_path = None
if not use_second_audio and helpers.has_audio_stream(video_path):
audio_path = os.path.join(tmpdir, "audio.aac")
helpers.extract_audio_to_file(video_path, audio_path)
elif use_second_audio and helpers.has_audio_stream(second_video_path):
audio_path = os.path.join(tmpdir, "audio.aac")
helpers.extract_audio_to_file(second_video_path, audio_path)
helpers.combine_frames_and_audio_to_file(
os.path.join(frame_dir, "raw_frame*.jpg"),
audio_path,
output_path,
helpers.get_video_fps(video_path) or utils.DEFAULT_FRAME_RATE,
)
def apply_cv2_augmenter(
distractor: ac.BaseCV2Augmenter,
video_path: str,
output_path: Optional[str],
**kwargs,
) -> None:
video_path, output_path = helpers.validate_input_and_output_paths(
video_path, output_path
)
with tempfile.TemporaryDirectory() as tmpdir:
video_tmp_path = os.path.join(tmpdir, os.path.basename(video_path))
shutil.copyfile(video_path, video_tmp_path)
fps = helpers.get_video_fps(video_tmp_path) or utils.DEFAULT_FRAME_RATE
aug_frame_temp_dir = distractor.augment(video_tmp_path, fps, **kwargs)
audio_path = None
if helpers.has_audio_stream(video_path):
audio_path = os.path.join(tmpdir, "audio.aac")
helpers.extract_audio_to_file(video_path, audio_path)
helpers.combine_frames_and_audio_to_file(
os.path.join(aug_frame_temp_dir, "raw_frame*.jpg"),
audio_path,
video_tmp_path,
fps,
)
shutil.move(video_tmp_path, output_path)
shutil.rmtree(aug_frame_temp_dir)
def get_image_kwargs(imgs_dir: str) -> Dict[str, Optional[Union[List[str], str]]]:
return {"imgs_dir": imgs_dir, "imgs_files": utils.pathmgr.ls(imgs_dir)}
|
AugLy-main
|
augly/video/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import shutil
from typing import Any, Dict, List, Optional
from augly.video.helpers import validate_input_and_output_paths
from augly.video.transforms import VidAugBaseClass
"""
Composition Operators:
Compose: identical to the Compose object provided by the torchvision
library, this class provides a similar experience for applying multiple
transformations onto a video
OneOf: the OneOf operator takes as input a list of transforms and
may apply (with probability p) one of the transforms in the list.
If a transform is applied, it is selected using the specified
probabilities of the individual transforms.
Example:
>>> Compose([
>>> VFlip(),
>>> Brightness(),
>>> OneOf([
>>> OverlayOntoScreenshot(),
>>> OverlayText(),
>>> ]),
>>> ])
"""
class BaseComposition(VidAugBaseClass):
def __init__(self, transforms: List[VidAugBaseClass], p: float = 1.0):
"""
@param transforms: a list of transforms
@param p: the probability of the transform being applied; default value is 1.0
"""
for transform in transforms:
assert isinstance(
transform, VidAugBaseClass
), "Expected instances of type 'VidAugBaseClass' for parameter 'transforms'"
super().__init__(p)
self.transforms = transforms
class Compose(BaseComposition):
def __call__(
self,
video_path: str,
output_path: Optional[str] = None,
seed: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Applies the list of transforms in order to the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param seed: if provided, the random seed will be set to this before calling
the transform
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
video_path, output_path = validate_input_and_output_paths(
video_path, output_path
)
if video_path != output_path:
shutil.copy(video_path, output_path)
if seed is not None:
random.seed(seed)
for transform in self.transforms:
output_path = transform(output_path, metadata=metadata)
return output_path
class OneOf(BaseComposition):
def __init__(self, transforms: List[VidAugBaseClass], p: float = 1.0):
"""
@param transforms: a list of transforms to select from; one of which will
be chosen to be applied to the video
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(transforms, p)
transform_probs = [t.p for t in transforms]
probs_sum = sum(transform_probs)
self.transform_probs = [t / probs_sum for t in transform_probs]
def __call__(
self,
video_path: str,
output_path: Optional[str] = None,
seed: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Applies one of the transforms to the video (with probability p)
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param seed: if provided, the random seed will be set to this before calling
the transform
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
video_path, output_path = validate_input_and_output_paths(
video_path, output_path
)
if seed is not None:
random.seed(seed)
if random.random() > self.p:
return output_path
transform = random.choices(self.transforms, self.transform_probs)[0]
return transform(video_path, output_path, force=True, metadata=metadata)
|
AugLy-main
|
augly/video/composition.py
|
AugLy-main
|
augly/video/augmenters/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Implementation of base class for FFMPEG-based video augmenters
- Method to override:
- `get_command(self, video_path: str, output_path: str)`:
returns a list of strings containing the options to pass into the ffmpeg command
"""
import os
import shutil
import tempfile
from abc import ABC, abstractmethod
from typing import List, Optional
from augly.video.helpers import validate_input_and_output_paths
from vidgear.gears import WriteGear
class BaseVidgearFFMPEGAugmenter(ABC):
def add_augmenter(
self, video_path: str, output_path: Optional[str] = None, **kwargs
) -> None:
"""
Applies the specific augmentation to the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param kwargs: parameters for specific augmenters
"""
video_path, output_path = validate_input_and_output_paths(
video_path, output_path
)
with tempfile.NamedTemporaryFile(
suffix=os.path.splitext(video_path)[1]
) as tmpfile:
if video_path == output_path:
shutil.copyfile(video_path, tmpfile.name)
video_path = tmpfile.name
writer = WriteGear(output_filename=output_path, logging=True)
writer.execute_ffmpeg_cmd(self.get_command(video_path, output_path))
writer.close()
@abstractmethod
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Constructs the FFMPEG command for VidGear
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
raise NotImplementedError("Implement get_command method")
@staticmethod
def input_fmt(video_path: str) -> List[str]:
return ["-y", "-i", video_path]
@staticmethod
def output_fmt(output_path: str) -> List[str]:
return ["-preset", "ultrafast", output_path]
@staticmethod
def standard_filter_fmt(
video_path: str, filters: List[str], output_path: str
) -> List[str]:
return [
*BaseVidgearFFMPEGAugmenter.input_fmt(video_path),
"-vf",
*filters,
"-c:a",
"copy",
*BaseVidgearFFMPEGAugmenter.output_fmt(output_path),
]
|
AugLy-main
|
augly/video/augmenters/ffmpeg/base_augmenter.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Union
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_video_info
class VideoAugmenterByAspectRatio(BaseVidgearFFMPEGAugmenter):
def __init__(self, ratio: Union[float, str]):
assert (isinstance(ratio, str) and len(ratio.split(":")) == 2) or (
isinstance(ratio, (int, float)) and ratio > 0
), "Aspect ratio must be a valid string ratio or a positive number"
self.aspect_ratio = ratio
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Changes the sample (sar) & display (dar) aspect ratios of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
video_info = get_video_info(video_path)
area = int(video_info["width"]) * int(video_info["height"])
if isinstance(self.aspect_ratio, float):
aspect_ratio = float(self.aspect_ratio)
else:
num, denom = [int(x) for x in str(self.aspect_ratio).split(":")]
aspect_ratio = num / denom
new_w = int(math.sqrt(area * aspect_ratio))
new_h = int(area / new_w)
filters = [
f"scale=width={new_w}:height={new_h},"
+ "pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2,"
+ f"setsar=ratio={self.aspect_ratio},"
+ f"setdar=ratio={self.aspect_ratio}",
]
return self.standard_filter_fmt(video_path, filters, output_path)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/aspect_ratio.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.utils import is_image_file, is_video_file, pathmgr
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_video_info, has_audio_stream
class VideoAugmenterByOverlay(BaseVidgearFFMPEGAugmenter):
def __init__(
self,
overlay_path: str,
x_factor: float,
y_factor: float,
use_overlay_audio: bool,
):
assert is_image_file(overlay_path) or is_video_file(
overlay_path
), "Overlaid media type not supported: please overlay either an image or video"
assert 0 <= x_factor <= 1, "x_factor must be a value in the range [0, 1]"
assert 0 <= y_factor <= 1, "y_factor must be a value in the range [0, 1]"
assert (
type(use_overlay_audio) == bool
), "Expected a boolean value for use_overlay_audio"
self.overlay_path = pathmgr.get_local_path(overlay_path)
self.x_factor = x_factor
self.y_factor = y_factor
self.use_overlay_audio = use_overlay_audio and is_video_file(overlay_path)
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Overlays media onto the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
video_info = get_video_info(video_path)
new_width = video_info["width"] * self.x_factor
new_height = video_info["height"] * self.y_factor
process_audio = has_audio_stream(video_path)
ret = [
*self.input_fmt(video_path),
"-i",
self.overlay_path,
"-filter_complex",
f"[0:v][1:v] overlay={new_width}:{new_height}",
]
if process_audio:
ret += [
"-map",
f"{int(self.use_overlay_audio)}:a:0",
]
ret += self.output_fmt(output_path)
return ret
|
AugLy-main
|
augly/video/augmenters/ffmpeg/overlay.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByGrayscale(BaseVidgearFFMPEGAugmenter):
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Changes the video to be grayscale
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(video_path, ["hue=s=0"], output_path)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/grayscale.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByFPSChange(BaseVidgearFFMPEGAugmenter):
def __init__(self, fps: int):
assert fps > 0, "FPS must be greater than zero"
self.fps = fps
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Changes the frame rate of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(
video_path, [f"fps=fps={self.fps}:round=up"], output_path
)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/fps.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from enum import Enum
from math import ceil
from typing import List, Optional
from augly.utils import pathmgr
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_video_info, has_audio_stream
from dataclasses_json import dataclass_json
log = logging.getLogger(__name__)
class TransitionEffect(Enum):
DISSOLVE = 2
RADIAL = 3
CIRCLEOPEN = 4
CIRCLECLOSE = 5
PIXELIZE = 6
HLSLICE = 7
HRSLICE = 8
VUSLICE = 9
VDSLICE = 10
HBLUR = 11
FADEGRAYS = 12
FADEBLACK = 13
FADEWHITE = 14
RECTCROP = 15
CIRCLECROP = 16
WIPELEFT = 17
WIPERIGHT = 18
SLIDEDOWN = 19
SLIDEUP = 20
SLIDELEFT = 21
SLIDERIGHT = 22
@dataclass_json
@dataclass
class TransitionConfig:
effect: TransitionEffect
# transition duration in seconds.
duration: float = 2.0
class VideoAugmenterByConcat(BaseVidgearFFMPEGAugmenter):
def __init__(
self,
video_paths: List[str],
src_video_path_index: int,
transition: Optional[TransitionConfig] = None,
):
assert len(video_paths) > 0, "Please provide at least one input video"
assert all(
pathmgr.exists(video_path) for video_path in video_paths
), "Invalid video path(s) provided"
self.video_paths = [
pathmgr.get_local_path(video_path) for video_path in video_paths
]
self.src_video_path_index = src_video_path_index
video_info = get_video_info(self.video_paths[src_video_path_index])
self.height = ceil(video_info["height"] / 2) * 2
self.width = ceil(video_info["width"] / 2) * 2
log.info("Width=%d height=%d", self.width, self.height)
self.sample_aspect_ratio = video_info.get("sample_aspect_ratio", "1")
log.info("Aspect ratio=%s", self.sample_aspect_ratio)
self.transition = transition
def _create_null_transition_filters(
self,
video_streams: List[str],
audio_streams: List[str],
process_audio: bool,
) -> List[str]:
if process_audio:
# Interleave the video and audio streams.
all_streams = [
v for pair in zip(video_streams, audio_streams) for v in pair
]
out_streams = "v=1:a=1[v][a]"
else:
all_streams = video_streams
out_streams = "v=1[v]"
filters = [
f"{''.join(all_streams)}concat=n={len(self.video_paths)}:{out_streams}"
]
return filters
def _create_transition_filters(
self,
video_streams: List[str],
audio_streams: List[str],
process_audio: bool,
out_video: str = "[v]",
out_audio: str = "[a]",
) -> List[str]:
if self.transition is None:
return self._create_null_transition_filters(
video_streams, audio_streams, process_audio
)
transition = self.transition
effect = transition.effect.name.lower()
video_durations = [
float(get_video_info(video_path)["duration"])
for video_path in self.video_paths
]
log.info(f"Video durations = {video_durations}.")
# There are 2 steps:
# 1. Harmonize the timebase between clips;
# 2. Add the transition filter.
td = transition.duration
concat_filters = []
for i, name in enumerate(video_streams):
fps_filter = f"[{i}fps]"
concat_filters.append(f"{name}settb=AVTB,fps=30/1{fps_filter}")
if td > min(video_durations):
raise ValueError(
f"transition duration {td} should be smaller than video durations {min(video_durations)} "
)
prev = "[0fps]"
cum_dur = video_durations[0]
for i in range(1, len(video_durations) - 1):
dur = video_durations[i]
fps_filter = f"[{i}fps]"
out_filter = f"[{i}m]"
offset = cum_dur - td
concat_filters.append(
f"{prev}{fps_filter}xfade=transition={effect}:duration={td}:offset={offset}{out_filter}"
)
prev = out_filter
cum_dur += dur - td
# Special processing for the last filter to comply with out_video requirement.
concat_filters.append(
f"{prev}[{len(video_durations) - 1}fps]xfade=transition={effect}:duration={td}:offset={cum_dur - td}{out_video}"
)
# Concat audio filters.
if process_audio:
prev = audio_streams[0]
cum_dur = video_durations[0]
for i in range(1, len(video_durations) - 1):
dur = video_durations[i]
in_f = audio_streams[i]
out_f = f"[a{i}m]"
offset = cum_dur - td
concat_filters.append(
f"{prev}{in_f}acrossfade=d={td}:c1=tri:c2=tri{out_f}"
)
prev = out_f
cum_dur += dur - td
concat_filters.append(
f"{prev}[{len(video_durations) - 1}:a]acrossfade=d={td}:c1=tri:c2=tri{out_audio}"
)
return concat_filters
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Concatenates multiple videos together on both channels, if present.
If any of the input files does not have an audio stream, then audio will not be processed.
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
inputs = [["-i", video] for video in self.video_paths]
flat_inputs = [element for sublist in inputs for element in sublist]
filters = []
process_audio = all(has_audio_stream(v) for v in self.video_paths)
if not process_audio:
log.warn("Audio processing will be skipped.")
video_streams = []
audio_streams = []
for i in range(len(self.video_paths)):
filters.append(
f"[{i}:v]scale={self.width}:{self.height}[{i}v],[{i}v]"
f"setsar=ratio={self.sample_aspect_ratio}[{i}vf]"
)
video_streams.append(f"[{i}vf]")
audio_streams.append(f"[{i}:a]")
filters += self._create_transition_filters(
video_streams, audio_streams, process_audio
)
result = [
"-y",
*flat_inputs,
"-filter_complex",
";".join(filters),
"-map",
"[v]",
]
if process_audio:
result += ["-map", "[a]"]
result += [
"-vsync",
"2",
*self.output_fmt(output_path),
]
return result
|
AugLy-main
|
augly/video/augmenters/ffmpeg/concat.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterBySpeed(BaseVidgearFFMPEGAugmenter):
def __init__(self, factor: float):
assert factor > 0, "Factor must be greater than zero"
self.factor = factor
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Changes the speed of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return [
*self.input_fmt(video_path),
"-vf",
f"setpts={1/self.factor}*PTS",
"-filter:a",
f"atempo={self.factor}",
*self.output_fmt(output_path),
]
|
AugLy-main
|
augly/video/augmenters/ffmpeg/speed.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.video.augmenters.ffmpeg.aspect_ratio import VideoAugmenterByAspectRatio
from augly.video.augmenters.ffmpeg.audio_swap import VideoAugmenterByAudioSwap
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.augmenters.ffmpeg.blur import VideoAugmenterByBlur
from augly.video.augmenters.ffmpeg.brightness import VideoAugmenterByBrightness
from augly.video.augmenters.ffmpeg.color_jitter import VideoAugmenterByColorJitter
from augly.video.augmenters.ffmpeg.concat import (
TransitionConfig,
TransitionEffect,
VideoAugmenterByConcat,
)
from augly.video.augmenters.ffmpeg.contrast import VideoAugmenterByContrast
from augly.video.augmenters.ffmpeg.crop import VideoAugmenterByCrop
from augly.video.augmenters.ffmpeg.fps import VideoAugmenterByFPSChange
from augly.video.augmenters.ffmpeg.grayscale import VideoAugmenterByGrayscale
from augly.video.augmenters.ffmpeg.hflip import VideoAugmenterByHFlip
from augly.video.augmenters.ffmpeg.loops import VideoAugmenterByLoops
from augly.video.augmenters.ffmpeg.no_audio import VideoAugmenterByRemovingAudio
from augly.video.augmenters.ffmpeg.noise import VideoAugmenterByNoise
from augly.video.augmenters.ffmpeg.overlay import VideoAugmenterByOverlay
from augly.video.augmenters.ffmpeg.pad import VideoAugmenterByPadding
from augly.video.augmenters.ffmpeg.quality import VideoAugmenterByQuality
from augly.video.augmenters.ffmpeg.resize import VideoAugmenterByResize
from augly.video.augmenters.ffmpeg.resolution import VideoAugmenterByResolution
from augly.video.augmenters.ffmpeg.rotate import VideoAugmenterByRotation
from augly.video.augmenters.ffmpeg.speed import VideoAugmenterBySpeed
from augly.video.augmenters.ffmpeg.stack import VideoAugmenterByStack
from augly.video.augmenters.ffmpeg.trim import VideoAugmenterByTrim
from augly.video.augmenters.ffmpeg.vflip import VideoAugmenterByVFlip
__all__ = [
"BaseVidgearFFMPEGAugmenter",
"TransitionConfig",
"TransitionEffect",
"VideoAugmenterByAspectRatio",
"VideoAugmenterByAudioSwap",
"VideoAugmenterByBlur",
"VideoAugmenterByBrightness",
"VideoAugmenterByColorJitter",
"VideoAugmenterByConcat",
"VideoAugmenterByContrast",
"VideoAugmenterByCrop",
"VideoAugmenterByFPSChange",
"VideoAugmenterByGrayscale",
"VideoAugmenterByHFlip",
"VideoAugmenterByLoops",
"VideoAugmenterByNoise",
"VideoAugmenterByOverlay",
"VideoAugmenterByPadding",
"VideoAugmenterByQuality",
"VideoAugmenterByRemovingAudio",
"VideoAugmenterByResize",
"VideoAugmenterByResolution",
"VideoAugmenterByRotation",
"VideoAugmenterBySpeed",
"VideoAugmenterByStack",
"VideoAugmenterByTrim",
"VideoAugmenterByVFlip",
]
|
AugLy-main
|
augly/video/augmenters/ffmpeg/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_video_info
class VideoAugmenterByTrim(BaseVidgearFFMPEGAugmenter):
def __init__(
self,
start: Optional[float] = None,
end: Optional[float] = None,
offset_factor: float = 0.0,
duration_factor: float = 1.0,
minimum_duration: float = 0.0,
):
assert start is None or start >= 0, "Start cannot be a negative number"
assert (
end is None or (start is not None and end > start) or end > 0
), "End must be a value greater than start"
assert (
0.0 <= offset_factor <= 1.0
), "Offset factor must be a value in the range [0.0, 1.0]"
assert (
0.0 <= duration_factor <= 1.0
), "Duration factor must be a value in the range [0.0, 1.0]"
if start is not None or end is not None:
self.start = start
self.end = end
self.offset_factor = None
self.duration_factor = None
self.minimum_duration = 0.0
else:
self.start = None
self.end = None
self.offset_factor = offset_factor
self.duration_factor = duration_factor
self.minimum_duration = minimum_duration
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Trims the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
video_info = get_video_info(video_path)
duration = float(video_info["duration"])
if self.start is None and self.end is None:
self.start = self.offset_factor * duration
duration = min(
max(self.minimum_duration, self.duration_factor * duration),
duration - self.start,
)
self.end = self.start + duration
elif self.start is None:
self.start = 0
elif self.end is None:
self.end = duration
return [
*self.input_fmt(video_path),
"-vf",
f"trim={self.start}:{self.end}," + "setpts=PTS-STARTPTS",
"-af",
f"atrim={self.start}:{self.end}," + "asetpts=PTS-STARTPTS",
*self.output_fmt(output_path),
]
|
AugLy-main
|
augly/video/augmenters/ffmpeg/trim.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByColorJitter(BaseVidgearFFMPEGAugmenter):
def __init__(
self, brightness_level: float, contrast_level: float, saturation_level: float
):
assert (
-1.0 <= brightness_level <= 1.0
), "Brightness factor must be a value in the range [-1.0, 1.0]"
assert (
-1000.0 <= contrast_level <= 1000.0
), "Contrast factor must be a value in the range [-1000, 1000]"
assert (
0.0 <= saturation_level <= 3.0
), "Saturation factor must be a value in the range [0.0, 3.0]"
self.brightness_level = brightness_level
self.contrast_level = contrast_level
self.saturation_level = saturation_level
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Color jitters the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
filters = [
f"eq=brightness={self.brightness_level}"
+ f":contrast={self.contrast_level}"
+ f":saturation={self.saturation_level}"
]
return self.standard_filter_fmt(video_path, filters, output_path)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/color_jitter.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_video_info
class VideoAugmenterByCrop(BaseVidgearFFMPEGAugmenter):
def __init__(self, left: float, top: float, right: float, bottom: float):
assert 0.0 <= left <= 1.0, "Left must be a value in the range [0.0, 1.0]"
assert 0.0 <= top <= 1.0, "Top must be a value in the range [0.0, 1.0]"
assert left < right <= 1.0, "Right must be a value in the range (left, 1.0]"
assert top < bottom <= 1.0, "Bottom must be a value in the range (top, 1.0]"
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Crops the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
video_info = get_video_info(video_path)
x1 = int(video_info["width"] * self.left)
y1 = int(video_info["height"] * self.top)
width = int(video_info["width"] * (self.right - self.left))
height = int(video_info["height"] * (self.bottom - self.top))
return self.standard_filter_fmt(
video_path, [f"crop=w={width}:h={height}:x={x1}:y={y1}"], output_path
)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/crop.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByRemovingAudio(BaseVidgearFFMPEGAugmenter):
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Removes the audio from the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return [
*self.input_fmt(video_path),
"-c",
"copy",
"-an",
*self.output_fmt(output_path),
]
|
AugLy-main
|
augly/video/augmenters/ffmpeg/no_audio.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByHFlip(BaseVidgearFFMPEGAugmenter):
def get_command(self, video_path: str, output_path: str, **kwargs) -> List[str]:
"""
Horizontally flips the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(video_path, ["hflip"], output_path)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/hflip.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByLoops(BaseVidgearFFMPEGAugmenter):
def __init__(self, num_loops: int):
assert type(num_loops) == int, "Number of loops must be an integer"
assert num_loops >= 0, "Number of loops cannot be a negative number"
self.num_loops = num_loops
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Loops the video `num_loops` times
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return [
"-y",
"-stream_loop",
str(self.num_loops),
"-i",
video_path,
"-c:a",
"copy",
*self.output_fmt(output_path),
]
|
AugLy-main
|
augly/video/augmenters/ffmpeg/loops.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Tuple
from augly.utils import validate_rgb_color
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_video_info
class VideoAugmenterByPadding(BaseVidgearFFMPEGAugmenter):
def __init__(self, w_factor: float, h_factor: float, color: Tuple[int, int, int]):
assert w_factor >= 0, "w_factor cannot be a negative number"
assert h_factor >= 0, "h_factor cannot be a negative number"
validate_rgb_color(color)
self.w_factor = w_factor
self.h_factor = h_factor
self.hex_color = "%02x%02x%02x" % color
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Adds padding to the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
video_info = get_video_info(video_path)
left = int(video_info["width"] * self.w_factor)
top = int(video_info["height"] * self.h_factor)
filters = [
f"pad=width={left*2}+iw:height={top*2}+ih"
+ f":x={left}:y={top}:color={self.hex_color}"
]
return self.standard_filter_fmt(video_path, filters, output_path)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/pad.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByBlur(BaseVidgearFFMPEGAugmenter):
def __init__(self, sigma: float):
assert sigma >= 0, "Sigma cannot be a negative number"
self.sigma = sigma
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Blurs the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(
video_path, [f"gblur={self.sigma}"], output_path
)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/blur.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Union
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByResize(BaseVidgearFFMPEGAugmenter):
def __init__(self, height: Union[int, str] = "ih", width: Union[int, str] = "iw"):
"""
Constructor. See https://trac.ffmpeg.org/wiki/Scaling for height and width options.
@param height: height specification. Defaults to input if not specified.
@param width: width specification. Defaults to input width if not specified.
"""
self.new_h = height
self.new_w = width
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Resizes the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
filters = [
f"scale=height:{self.new_h}:width={self.new_w},"
+ "pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2",
]
return self.standard_filter_fmt(video_path, filters, output_path)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/resize.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.utils import pathmgr
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_video_info, has_audio_stream
class VideoAugmenterByStack(BaseVidgearFFMPEGAugmenter):
def __init__(
self, second_video_path: str, use_second_audio: bool, orientation: str
):
assert (
type(use_second_audio) == bool
), "Expected a boolean value for use_second_audio"
assert orientation in [
"hstack",
"vstack",
], "Expected orientation to be either 'hstack' or 'vstack'"
self.second_video_path = pathmgr.get_local_path(second_video_path)
self.use_second_audio = use_second_audio
self.orientation = orientation
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Stacks two videos together
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
video_info = get_video_info(video_path)
process_audio = False
if self.use_second_audio:
process_audio = has_audio_stream(self.second_video_path)
else:
process_audio = has_audio_stream(video_path)
ret = [
*self.input_fmt(video_path),
"-i",
self.second_video_path,
"-filter_complex",
f"[1:v]scale={video_info['width']}:{video_info['height']}[1v];"
+ f"[0:v][1v]{self.orientation}=inputs=2[v]",
"-map",
"[v]",
]
if process_audio:
ret += [
"-map",
f"{int(self.use_second_audio)}:a",
]
ret += [
"-vsync",
"2",
*self.output_fmt(output_path),
]
return ret
|
AugLy-main
|
augly/video/augmenters/ffmpeg/stack.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByNoise(BaseVidgearFFMPEGAugmenter):
def __init__(self, level: int):
assert 0 <= level <= 100, "Level must be a value in the range [0, 100]"
self.level = level
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Adds noise to the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(
video_path, [f"boxblur=lr=1.2,noise=c0s={self.level}:allf=t"], output_path
)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/noise.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.utils import pathmgr
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_audio_info, get_video_info
class VideoAugmenterByAudioSwap(BaseVidgearFFMPEGAugmenter):
def __init__(self, audio_path: str, offset: float):
assert offset >= 0, "Offset cannot be a negative number"
self.audio_path = pathmgr.get_local_path(audio_path)
self.offset = offset
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Swaps the audio of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
audio_info = get_audio_info(self.audio_path)
video_info = get_video_info(video_path)
audio_duration = float(audio_info["duration"])
audio_sample_rate = float(audio_info["sample_rate"])
start = self.offset
end = start + float(video_info["duration"])
audio_filters = f"atrim={start}:{end}," + "asetpts=PTS-STARTPTS"
if end > audio_duration:
pad_len = (end - audio_duration) * audio_sample_rate
audio_filters += f",apad=pad_len={pad_len}"
return [
*self.input_fmt(video_path),
"-i",
self.audio_path,
"-c:v",
"copy",
"-af",
audio_filters,
"-map",
"0:v:0",
"-map",
"1:a:0",
output_path,
]
|
AugLy-main
|
augly/video/augmenters/ffmpeg/audio_swap.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByVFlip(BaseVidgearFFMPEGAugmenter):
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Vertically flips the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(video_path, ["vflip"], output_path)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/vflip.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByBrightness(BaseVidgearFFMPEGAugmenter):
def __init__(self, level: float):
assert -1.0 <= level <= 1.0, "Level must be a value in the range [-1.0, 1.0]"
self.level = level
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Changes the brightness level of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(
video_path, [f"eq=brightness={self.level}"], output_path
)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/brightness.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByResolution(BaseVidgearFFMPEGAugmenter):
def __init__(self, resolution: float):
assert (
isinstance(resolution, (int, float)) and resolution > 0.0
), "Invalid resolution: scale factor must be positive"
self.resolution = resolution
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Alters the resolution of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
filters = [
f"scale=height:ih*{self.resolution}:width=iw*{self.resolution},"
+ "pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2"
]
return self.standard_filter_fmt(video_path, filters, output_path)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/resolution.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByRotation(BaseVidgearFFMPEGAugmenter):
def __init__(self, degrees: float):
assert isinstance(degrees, (float, int)), "Expected 'degrees' to be a number"
self.degrees = degrees
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Rotates the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(
video_path, [f"rotate={self.degrees * (math.pi / 180)}"], output_path
)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/rotate.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByQuality(BaseVidgearFFMPEGAugmenter):
def __init__(self, quality: int):
assert 0 <= quality <= 51, "Quality must be a value in the range [0, 51]"
self.quality = quality
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Alters the quality level of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return [
*self.input_fmt(video_path),
"-c:v",
"libx264",
"-crf",
f"{self.quality}",
"-c:a",
"copy",
*self.output_fmt(output_path),
]
|
AugLy-main
|
augly/video/augmenters/ffmpeg/quality.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
class VideoAugmenterByContrast(BaseVidgearFFMPEGAugmenter):
def __init__(self, level: float):
assert (
-1000.0 <= level <= 1000.0
), "Level must be a value in the range [-1000, 1000]"
self.level = level
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Changes the contrast level of the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
return self.standard_filter_fmt(
video_path, [f"eq=contrast={self.level}"], output_path
)
|
AugLy-main
|
augly/video/augmenters/ffmpeg/contrast.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Implementation of base class for video distractors
- Interface:
- `augment(self, video_temp_path: str, fps: float, **kwargs)`:
extracts frames from the video, and turns original set of frames into
augumented frames by mapping each of them using `apply_augmentation` operators.
the path to the temporary directory containing the augmented frames is returned
- Methods to override
- `apply_augmentation(self, raw_frame: np.ndarray, **kwargs)`:
applies the augmentation to each frame
"""
import os
import random
import shutil
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Iterator, List, Optional, Tuple
import cv2
import numpy as np
from augly import utils
from augly.video.helpers import extract_frames_to_dir
class BaseCV2Augmenter(ABC):
def __init__(
self,
num_dist: int = 0,
random_movement: bool = True,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
**kwargs,
):
assert type(random_movement) == bool, "Random movement must be set to a boolean"
assert topleft is None or all(
0.0 <= t <= 1.0 for t in topleft
), "Topleft must be in the range [0, 1]"
assert bottomright is None or all(
0.0 <= b <= 1.0 for b in bottomright
), "Bottomright must be in the range [0, 1]"
self.random_movement = random_movement
if self.random_movement:
self.origins = BaseCV2Augmenter.random_origins(topleft, bottomright)
else:
top, left = topleft or (0.01, 0.01)
bottom, right = bottomright or (0.99, 0.99)
y_vals = random.choices(np.linspace(top, bottom, num=15), k=num_dist)
random.shuffle(y_vals)
self.origins = [
BaseCV2Augmenter.moving_origins(left, y_val, left, right)
for y_val in y_vals
]
def augment(self, video_temp_path: str, fps: float, **kwargs) -> str:
"""
Augment a set of frames by adding distractors to each by mapping each
frame with `apply_augmentation` method
@param video_temp_path: local temp path of the video to be augmented
@param kwargs: parameters to pass into apply_augmentation
@returns: path to the temp directory containing augmented frames
"""
video_temp_dir = Path(video_temp_path).parent
frame_temp_dir = os.path.join(video_temp_dir, "raw_frames_distractor")
os.mkdir(frame_temp_dir)
aug_frame_temp_dir = os.path.join(video_temp_dir, "aug_frames_distractor")
os.mkdir(aug_frame_temp_dir)
extract_frames_to_dir(video_temp_path, frame_temp_dir)
for raw_frame_file in os.listdir(frame_temp_dir):
frame = cv2.imread(os.path.join(frame_temp_dir, raw_frame_file))
aug_frame = self.apply_augmentation(frame, **kwargs)
cv2.imwrite(os.path.join(aug_frame_temp_dir, raw_frame_file), aug_frame)
shutil.rmtree(frame_temp_dir)
return aug_frame_temp_dir
@abstractmethod
def apply_augmentation(self, raw_frame: np.ndarray, **kwargs) -> np.ndarray:
"""
Applies the specific augmentation to a single frame
@param raw_frame: raw, single RGB/Gray frame
@returns: the augumented frame
"""
raise NotImplementedError("Implement apply_augmentation method")
def get_origins(self, index: int) -> Tuple[float, float]:
if self.random_movement:
return next(self.origins)
return next(self.origins[index])
@staticmethod
def random_origins(
topleft: Optional[Tuple[float, float]],
bottomright: Optional[Tuple[float, float]],
) -> Iterator[Tuple[float, float]]:
top, left = topleft or (0.01, 0.01)
bottom, right = bottomright or (0.99, 0.99)
while True:
yield (random.uniform(left, right), random.uniform(top, bottom))
@staticmethod
def moving_origins(
x_start: float, y_val: float, x_min: float, x_max: float
) -> Iterator[Tuple[float, float]]:
x_curr = x_start
while True:
yield x_curr, y_val
x_curr += 0.02
x_curr = x_curr if x_curr <= x_max else x_min
@staticmethod
def random_colors(
colors: Optional[List[Tuple[int, int, int]]]
) -> Iterator[Tuple[int, int, int]]:
if colors is not None:
assert type(colors) == list, "Expected type 'List' for colors variable"
for color in colors:
utils.validate_rgb_color(color)
while True:
if colors:
color = colors[random.randint(0, len(colors) - 1)]
else:
color = (
random.randint(0, 255), # R
random.randint(0, 255), # G
random.randint(0, 255), # B
)
yield color
|
AugLy-main
|
augly/video/augmenters/cv2/base_augmenter.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Iterator, List, Optional, Tuple
import cv2
import numpy as np
from augly.video.augmenters.cv2.base_augmenter import BaseCV2Augmenter
class VideoDistractorByShapes(BaseCV2Augmenter):
def __init__(
self,
num_shapes: int,
shape_type: Optional[str] = None,
colors: Optional[List[Tuple[int, int, int]]] = None,
thickness: Optional[int] = None,
radius: Optional[float] = None,
random_movement: bool = True,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
**kwargs,
) -> None:
assert num_shapes > 0, "Number of shapes must be greater than zero"
assert shape_type is None or shape_type in [
"circle",
"rectangle",
], "Shape type must be set to None or to 'circle' or 'rectangle'"
assert (
thickness is None or type(thickness) == int
), "Invalid thickness provided: must be set to None or an integer"
super().__init__(num_shapes, random_movement, topleft, bottomright, **kwargs)
self.num_shapes = num_shapes
self.shape_type = VideoDistractorByShapes.random_shape_type(shape_type)
self.colors = BaseCV2Augmenter.random_colors(colors)
self.thickness = VideoDistractorByShapes.random_thickness(thickness)
self.radius = VideoDistractorByShapes.random_radius(radius)
# overrides abstract method of base class
def apply_augmentation(self, raw_frame: np.ndarray, **kwargs) -> np.ndarray:
"""
Adds shape distracts (in various colors and positions) to each frame
@param raw_frame: raw, single RGB/Gray frame
@returns: the augumented frame
"""
assert (raw_frame.ndim == 3) and (
raw_frame.shape[2] == 3
), "VideoDistractorByShapes only accepts RGB images"
height, width = raw_frame.shape[:2]
distract_frame = raw_frame.copy()
for i in range(self.num_shapes):
shape_type = next(self.shape_type)
color = next(self.colors)
thickness = next(self.thickness)
fraction_x, fraction_y = self.get_origins(i)
x = int(fraction_x * width)
y = int(fraction_y * height)
if shape_type == "circle":
smaller_side = min(height, width)
radius = int(next(self.radius) * smaller_side)
cv2.circle(distract_frame, (x, y), radius, color, thickness)
if shape_type == "rectangle":
fraction_x, fraction_y = self.get_origins(i)
x_2 = int(fraction_x * width)
y_2 = int(fraction_y * height)
cv2.rectangle(distract_frame, (x, y), (x_2, y_2), color, thickness)
return distract_frame
@staticmethod
def random_shape_type(shape_type: Optional[str]) -> Iterator[str]:
shapes = ["circle", "rectangle"]
while True:
yield shape_type if shape_type else shapes[
random.randint(0, len(shapes) - 1)
]
@staticmethod
def random_thickness(thickness: Optional[int]) -> Iterator[int]:
while True:
yield thickness or random.randint(-1, 5)
@staticmethod
def random_radius(radius: Optional[float]) -> Iterator[float]:
if radius:
radius = radius if radius < 0.5 else radius / 2
while True:
yield radius or (random.random() / 2)
|
AugLy-main
|
augly/video/augmenters/cv2/shapes.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import cv2
import numpy as np
from augly.video.augmenters.cv2.base_augmenter import BaseCV2Augmenter
from augly.video.augmenters.cv2.shapes import VideoDistractorByShapes
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class VideoDistractorByDots(BaseCV2Augmenter):
def __init__(
self, num_dots: int, dot_type: str, random_movement: bool = True, **kwargs
) -> None:
assert num_dots > 0, "Number of dots must be greater than zero"
assert dot_type in [
"colored",
"blur",
], "Dot type must be set to None or to 'colored' or 'blur'"
super().__init__(num_dots, random_movement, **kwargs)
self.num_dots = num_dots
self.dot_type = dot_type
self.shapes_distractor = None
if self.dot_type == "colored":
self.shapes_distractor = VideoDistractorByShapes(
num_dots,
shape_type="circle",
colors=[(0, 0, 0), (91, 123, 166)],
thickness=2,
radius=0.001,
random_movement=random_movement,
)
def add_blurred_dots(self, raw_frame: np.ndarray) -> np.ndarray:
height, width = raw_frame.shape[:2]
distract_frame = raw_frame.copy()
for i in range(self.num_dots):
fraction_x, fraction_y = self.get_origins(i)
x = int(fraction_x * width)
y = int(fraction_y * height)
# I think that sometimes the random positioning of the dot goes
# past the frame resulting in an error, but I can't repro this, so
# try/catching for now
try:
dot_bbox = distract_frame[y : y + 10, x : x + 10]
dot_bbox = cv2.GaussianBlur(dot_bbox, (111, 111), cv2.BORDER_DEFAULT)
distract_frame[y : y + 10, x : x + 10] = dot_bbox
except Exception as e:
logger.warning(f"Exception while adding Gaussian dot distractor: {e}")
return distract_frame
# overrides abstract method of base class
def apply_augmentation(self, raw_frame: np.ndarray, **kwargs) -> np.ndarray:
"""
Adds random dot distracts (in various colors and positions) to each frame
@param raw_frame: raw, single RGB/Gray frame
@returns: the augumented frame
"""
assert (raw_frame.ndim == 3) and (
raw_frame.shape[2] == 3
), "VideoDistractorByDots only accepts RGB images"
if self.dot_type == "colored":
return self.shapes_distractor.apply_augmentation(raw_frame, **kwargs)
return self.add_blurred_dots(raw_frame)
|
AugLy-main
|
augly/video/augmenters/cv2/dots.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.video.augmenters.cv2.base_augmenter import BaseCV2Augmenter
from augly.video.augmenters.cv2.dots import VideoDistractorByDots
from augly.video.augmenters.cv2.shapes import VideoDistractorByShapes
from augly.video.augmenters.cv2.text import VideoDistractorByText
__all__ = [
"BaseCV2Augmenter",
"VideoDistractorByText",
"VideoDistractorByShapes",
"VideoDistractorByDots",
]
|
AugLy-main
|
augly/video/augmenters/cv2/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import random
import string
from typing import Any, Iterator, List, Optional, Tuple
import cv2
import numpy as np
from augly.utils import pathmgr
from augly.video.augmenters.cv2.base_augmenter import BaseCV2Augmenter
from PIL import Image, ImageDraw, ImageFont
CV2_FONTS = [
cv2.FONT_HERSHEY_SIMPLEX,
cv2.FONT_HERSHEY_PLAIN,
cv2.FONT_HERSHEY_DUPLEX,
cv2.FONT_HERSHEY_COMPLEX,
cv2.FONT_HERSHEY_TRIPLEX,
cv2.FONT_HERSHEY_COMPLEX_SMALL,
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
cv2.FONT_HERSHEY_SCRIPT_COMPLEX,
]
class VideoDistractorByText(BaseCV2Augmenter):
def __init__(
self,
text_len: int,
text_change_nth: Optional[int] = None,
fonts: Optional[List[Tuple[Any, Optional[str]]]] = None,
fontscales: Optional[Tuple[float, float]] = None,
colors: Optional[List[Tuple[int, int, int]]] = None,
thickness: Optional[int] = None,
random_movement: bool = False,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
**kwargs,
) -> None:
assert text_len > 0, "Text length must be greater than zero"
assert (
text_change_nth is None or text_change_nth > 0
), "`text_change_nth` must be greater than zero"
assert fonts is None or all(
(isinstance(font, (str, ImageFont.ImageFont)) or (font in CV2_FONTS))
and (chars is None or isinstance(chars, str))
for font, chars in fonts
), "Fonts must be either None or a list of tuples of font (cv2 font, PIL ImageFont, or str path to a .ttf file) & chars file (str path or None)"
assert fontscales is None or (
fontscales[0] > 0 and fontscales[1] > fontscales[0]
), "Fontscale ranges must be greater than zero and the second value must be greater than the first" # noqa: B950
assert thickness is None or (
type(thickness) == int and thickness > 0
), "Invalid thickness provided: must be set to None or be an integer greater than zero" # noqa: B950
super().__init__(1, random_movement, topleft, bottomright, **kwargs)
self.texts = self.random_texts(text_len, text_change_nth)
self.fonts = self.random_fonts(fonts)
self.fontscales = self.random_fontscales(fontscales)
self.colors = BaseCV2Augmenter.random_colors(colors)
self.thickness = self.random_thickness(thickness)
def random_texts(
self, text_len: int, text_change_nth: Optional[int]
) -> Iterator[List[float]]:
def random_text(n):
return [random.random() for _ in range(n)]
iframe = 0
if not text_change_nth:
text = random_text(text_len)
while True:
if text_change_nth and iframe % text_change_nth == 0:
text = random_text(text_len)
# pyre-fixme[61]: `text` may not be initialized here.
yield text
iframe += 1
def random_fonts(
self, fonts: Optional[List[Tuple[Any, Optional[str]]]]
) -> Iterator[Tuple[Any, List[str]]]:
fonts_and_chars = fonts or [(font, None) for font in CV2_FONTS]
while True:
font_idx = random.randint(0, len(fonts_and_chars) - 1)
font, chars_path = fonts_and_chars[font_idx]
if chars_path is not None:
with pathmgr.open(chars_path, "rb") as f:
chars = [chr(c) for c in pickle.load(f)]
else:
chars = list(string.ascii_letters + string.punctuation)
yield font, chars
def random_fontscales(
self, fontscales: Optional[Tuple[float, float]]
) -> Iterator[float]:
fontscales = fontscales or (2.5, 5)
while True:
yield random.uniform(*fontscales)
def random_thickness(self, thickness: Optional[int]) -> Iterator[int]:
while True:
yield thickness or random.randint(2, 5)
# overrides abstract method of base class
def apply_augmentation(self, raw_frame: np.ndarray, **kwargs) -> np.ndarray:
"""
Adds text distracts (in various colors, fonts, and positions) to each frame
@param raw_frame: raw, single RGB/Gray frame
@returns: the augumented frame
"""
assert (raw_frame.ndim == 3) and (
raw_frame.shape[2] == 3
), "VideoDistractorByText only accepts RGB images"
height, width = raw_frame.shape[:2]
text = next(self.texts)
font, chars = next(self.fonts) # pyre-ignore
fontscale = next(self.fontscales)
color = next(self.colors)
thickness = next(self.thickness)
fraction_x, fraction_y = self.get_origins(0)
x = int(fraction_x * width)
y = int(fraction_y * height)
n = len(chars)
text_str = "".join([chars[int(c * n)] for c in text])
distract_frame = raw_frame.copy()
if isinstance(font, str):
with pathmgr.open(font, "rb") as f:
font = ImageFont.truetype(f, int(fontscale * 100))
if isinstance(
font,
(ImageFont.ImageFont, ImageFont.FreeTypeFont, ImageFont.TransposedFont),
):
# To use an ImageFont, we need to convert into PIL
distract_frame_rgb = cv2.cvtColor(distract_frame, cv2.COLOR_BGR2RGB)
distract_frame_pil = Image.fromarray(distract_frame_rgb)
# pyre-fixme[6]: Expected `Optional[ImageFont._Font]` for 3rd param but
# got `Union[ImageFont.FreeTypeFont, ImageFont.ImageFont,
# ImageFont.TransposedFont]`.
ImageDraw.Draw(distract_frame_pil).text((x, y), text_str, font=font)
distract_frame = cv2.cvtColor(
np.array(distract_frame_pil), cv2.COLOR_RGB2BGR
)
else:
cv2.putText(
distract_frame,
text_str,
(x, y),
font,
fontscale,
color,
thickness,
cv2.LINE_AA,
)
return distract_frame
|
AugLy-main
|
augly/video/augmenters/cv2/text.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from augly import utils
from augly.video import helpers
from augly.video.helpers import intensity as vdintensity
Segment = utils.Segment
def get_func_kwargs(
metadata: Optional[List[Dict[str, Any]]],
local_kwargs: Dict[str, Any],
video_path: str,
**kwargs,
) -> Dict[str, Any]:
if metadata is None:
return {}
func_kwargs = deepcopy(local_kwargs)
func_kwargs.pop("metadata")
func_kwargs.update(
{
"src_video_info": helpers.get_video_info(video_path),
"src_fps": helpers.get_video_fps(video_path),
**kwargs,
}
)
return func_kwargs
def compute_time_crop_segments(
src_segment: Segment,
dst_segment: Segment,
speed_factor: float,
crop_start: float,
crop_end: float,
new_src_segments: List[Segment],
new_dst_segments: List[Segment],
end_dst_offset: float = 0.0,
) -> None:
"""
Calculates how the given matching pair src_segment & dst_segment change
given a temporal crop starting at crop_start & ending at crop_end. We can
use the same logic here for multiple transforms, by setting the crop_start
& crop_end depending on the transform kwargs.
Doesn't return anything, but appends the new matching segments in the dst
video corresponding to the pair passed in to new_src_segments & new_dst_segments,
if the segment pair still matches in the dst video. If the passed in segment
pair is cropped out as a result of this temporal crop, nothing will be
appended to the lists, since the segment pair doesn't exist in the dst video.
"""
# Crop segment is outside of the initial clip, so this matching segment
# pair no longer exists in the new video.
if crop_start >= dst_segment.end or crop_end <= dst_segment.start:
return
# new_start represents where the matching segment starts in the dst audio
# (if negative, then part of the matching segment is getting cut out, so
# we need to adjust both the src & dst starts).
# Note: if the video was sped up before, we need to take this into account
# (the matching segment that is e.g. 10 seconds of dst audio might
# correspond to 5 seconds of src audio, if it was previously
# slowed down by 0.5x).
new_start = (dst_segment.start - crop_start) * speed_factor
src_start, src_end, src_id = src_segment
if new_start < 0:
# We're cropping the beginning of the matching segment.
src_start = src_segment.start - new_start
new_start = 0
new_end = min(dst_segment.end - crop_start, crop_end - crop_start)
if crop_end < dst_segment.end:
# We're cropping the end of the matching segment.
# Note: if the video was sped up before, we need to take this into
# account (as above).
src_end = src_segment.end - (dst_segment.end - crop_end) * speed_factor
new_src_segments.append(Segment(src_start, src_end, src_id))
new_dst_segments.append(
Segment(new_start + end_dst_offset, new_end + end_dst_offset)
)
def compute_insert_in_background_multiple_segments(
src_segment_starts: List[float],
src_segment_ends: List[float],
bkg_insertion_points: List[float],
src_ids: List[str],
transition_duration: float,
new_src_segments: List[Segment],
new_dst_segments: List[Segment],
**kwargs,
) -> None:
n = len(src_segment_starts)
assert n == len(
src_segment_ends
), "Source segment starts and ends lists must have equal length."
assert n == len(
bkg_insertion_points
), "Source segment starts and background insertion points lists must have equal length."
assert n == len(
src_ids
), "Source segment starts and source ids lists must have equal length."
if n == 0: # nothing to do
return
dst_cum_dur = 0.0 # background cumulative duration.
offset = transition_duration / 2.0
prev_bkg = 0.0
for src_start, src_end, src_id, bkg_pt in zip(
src_segment_starts, src_segment_ends, src_ids, bkg_insertion_points
):
crop_start = src_start + offset
crop_end = src_end - offset
dst_start = dst_cum_dur + (bkg_pt - prev_bkg) - offset
src_segment = Segment(start=crop_start, end=crop_end, src_id=src_id)
dst_segment = Segment(start=dst_start, end=dst_start + (crop_end - crop_start))
new_src_segments.append(src_segment)
new_dst_segments.append(dst_segment)
dst_cum_dur = dst_segment.end - offset
prev_bkg = bkg_pt
def compute_time_decimate_segments(
src_segment: Segment,
dst_segment: Segment,
src_duration: float,
speed_factor: float,
transition_duration: float,
new_src_segments: List[Segment],
new_dst_segments: List[Segment],
**kwargs,
) -> None:
start_offset = src_duration * kwargs["start_offset_factor"]
on_segment = src_duration * kwargs["on_factor"]
off_segment = on_segment * kwargs["off_factor"]
n = int((src_duration - start_offset) / (on_segment + off_segment))
dst_offset = 0
for i in range(n):
crop_start = (
start_offset
+ i * on_segment
+ i * off_segment
+ (i > 0) * transition_duration / 2.0
)
crop_end = (
start_offset
+ (i + 1) * on_segment
+ i * off_segment
- (i < n - 1) * transition_duration / 2
)
crop_end = min(src_duration, crop_end)
if crop_start > src_duration:
break
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
crop_start,
crop_end,
new_src_segments,
new_dst_segments,
end_dst_offset=dst_offset,
)
dst_offset = new_dst_segments[-1].end
def get_transition_duration(kwargs):
transition = kwargs.get("transition")
if transition:
return transition.duration
return 0.0
def compute_changed_segments(
name: str,
src_segments: List[Segment],
dst_segments: List[Segment],
src_duration: float,
dst_duration: float,
speed_factor: float,
**kwargs,
) -> Tuple[List[Segment], List[Segment]]:
"""
This function performs the logic of computing the new matching segments based
on the old ones, for the set of transforms that temporally change the video.
Returns the lists of new src segments & dst segments, respectively.
"""
new_src_segments, new_dst_segments = [], []
td = get_transition_duration(kwargs)
for src_segment, dst_segment in zip(src_segments, dst_segments):
if name == "insert_in_background":
# Note: When we implement insert_in_background, make sure to pass these kwargs
offset = kwargs["offset_factor"] * kwargs["background_video_duration"]
transition_before = int(kwargs["transition_before"])
transition_after = int(kwargs["transition_after"])
# The matching segments are just offset in the dst audio by the amount
# of background video inserted before the src video.
new_src_segments.append(
src_segment.delta(
transition_before * td / 2, -transition_after * td / 2
)
)
new_dst_segments.append(
Segment(
dst_segment.start + offset - transition_before * td / 2,
dst_segment.end
+ offset
- transition_before * td
- transition_after * td / 2,
)
)
elif name == "insert_in_background_multiple":
compute_insert_in_background_multiple_segments(
src_segment_starts=kwargs["src_segment_starts"],
src_segment_ends=kwargs["src_segment_ends"],
bkg_insertion_points=kwargs["bkg_insertion_points"],
src_ids=kwargs["src_ids"],
transition_duration=td,
new_src_segments=new_src_segments,
new_dst_segments=new_dst_segments,
)
elif name == "replace_with_background":
clip_start = kwargs["starting_background_duration"]
duration = kwargs["source_duration"]
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
clip_start,
clip_start + duration,
new_src_segments,
new_dst_segments,
end_dst_offset=clip_start,
)
elif name == "change_video_speed":
crt_factor = kwargs["factor"]
global_factor = crt_factor * speed_factor
new_src_segments.append(src_segment)
new_dst_segments.append(
Segment(
dst_segment.start / global_factor,
dst_segment.end / global_factor,
)
)
elif name == "concat":
src_index = kwargs["src_video_path_index"]
num_videos = len(kwargs["video_paths"])
transition_offset_start = td / 2 if src_index > 0 else 0.0
transition_offset_end = td / 2 if src_index < num_videos - 1 else 0.0
new_src_segments.append(
src_segment.delta(transition_offset_start, -transition_offset_end)
)
offset = sum(
float(helpers.get_video_info(vp)["duration"]) - td
for vp in kwargs["video_paths"][: kwargs["src_video_path_index"]]
)
new_dst_segments.append(
Segment(
dst_segment.start + offset + transition_offset_start,
dst_segment.end + offset - transition_offset_end,
)
)
elif name == "loop":
# The existing segments are unchanged.
new_src_segments.append(src_segment)
new_dst_segments.append(dst_segment)
# Each original src segments now additionally matches a segment in
# each loop in the dst video.
for l_idx in range(kwargs["num_loops"]):
new_src_segments.append(src_segment)
new_dst_segments.append(
Segment(
dst_segment.start + (l_idx + 1) * src_duration,
dst_segment.end + (l_idx + 1) * src_duration,
)
)
elif name == "time_crop":
crop_start = kwargs["offset_factor"] * src_duration
crop_end = crop_start + kwargs["duration_factor"] * src_duration
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
crop_start,
crop_end,
new_src_segments,
new_dst_segments,
)
elif name == "time_decimate":
compute_time_decimate_segments(
src_segment,
dst_segment,
src_duration,
speed_factor,
td,
new_src_segments,
new_dst_segments,
**kwargs,
)
elif name == "trim":
crop_start = kwargs["start"] or 0.0
crop_end = kwargs["end"] or src_duration
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
crop_start,
crop_end,
new_src_segments,
new_dst_segments,
)
elif name == "replace_with_color_frames":
# This transform is like the inverse of time_crop/trim, because
# offset & duration denote where the src content is being cropped
# out, instead of which src content is being kept.
offset = kwargs["offset_factor"] * src_duration
duration = kwargs["duration_factor"] * src_duration
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
0.0,
offset,
new_src_segments,
new_dst_segments,
)
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
offset + duration,
dst_duration,
new_src_segments,
new_dst_segments,
)
return new_src_segments, new_dst_segments
def compute_segments(
name: str,
src_duration: float,
dst_duration: float,
metadata: List[Dict[str, Any]],
**kwargs,
) -> Tuple[List[Segment], List[Segment]]:
"""
Compute matching pairs of src_segment -> dst_segment, given the kwargs of the
transform, as well as the metadata about previously applied transforms.
"""
speed_factor = 1.0
src_id = kwargs.get("src_id", None)
if not metadata:
src_segments = [Segment(0.0, src_duration, src_id)]
dst_segments = [Segment(0.0, src_duration)]
else:
src_segments = [
Segment(
segment_dict["start"], segment_dict["end"], segment_dict.get("src_id")
)
for segment_dict in metadata[-1]["src_segments"]
]
dst_segments = [
Segment(segment_dict["start"], segment_dict["end"])
for segment_dict in metadata[-1]["dst_segments"]
]
for meta in metadata:
if meta["name"] in ["change_video_speed"]:
speed_factor *= meta["factor"]
if name in [
"insert_in_background",
"insert_in_background_multiple",
"replace_with_background",
"change_video_speed",
"loop",
"time_crop",
"time_decimate",
"trim",
"replace_with_color_frames",
"concat",
]:
return compute_changed_segments(
name,
src_segments,
dst_segments,
src_duration,
dst_duration,
speed_factor,
**kwargs,
)
else:
return src_segments, dst_segments
def get_metadata(
metadata: Optional[List[Dict[str, Any]]],
function_name: str,
video_path: str,
output_path: Optional[str],
src_video_info: Dict[str, Any],
src_fps: Optional[float],
**kwargs,
) -> None:
if metadata is None:
return
assert isinstance(
metadata, list
), "Expected 'metadata' to be set to None or of type list"
assert src_fps is not None
output_path = output_path or video_path
src_video_info = src_video_info
dst_video_info = helpers.get_video_info(output_path)
src_duration = float(src_video_info["duration"])
dst_duration = float(dst_video_info["duration"])
src_segments, dst_segments = compute_segments(
function_name, src_duration, dst_duration, metadata, **kwargs
)
# Json can't represent tuples, so they're represented as lists, which should
# be equivalent to tuples. So let's avoid tuples in the metadata by
# converting any tuples to lists here.
kwargs_types_fixed = dict(
(k, list(v)) if isinstance(v, tuple) else (k, v) for k, v in kwargs.items()
)
metadata.append(
{
"name": function_name,
"src_duration": src_duration,
"dst_duration": dst_duration,
"src_fps": src_fps,
"dst_fps": helpers.get_video_fps(output_path),
"src_width": src_video_info["width"],
"src_height": src_video_info["height"],
"dst_width": dst_video_info["width"],
"dst_height": dst_video_info["height"],
"src_segments": [src_segment._asdict() for src_segment in src_segments],
"dst_segments": [dst_segment._asdict() for dst_segment in dst_segments],
**kwargs_types_fixed,
}
)
intensity_kwargs = {"metadata": metadata[-1], **kwargs}
metadata[-1]["intensity"] = getattr(
vdintensity, f"{function_name}_intensity", lambda **_: 0.0
)(**intensity_kwargs)
|
AugLy-main
|
augly/video/helpers/metadata.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
"""
This file contains 'intensity' functions for each of our augmentations.
Intensity functions give a float representation of how intense a particular
transform called with particular parameters is.
Each intensity function expects as parameters the kwargs the corresponding
transform was called with, as well as the metadata dictionary computed by the
transform (e.g. metadata[-1] after passing a list 'metadata' into the transform).
All intensity functions are normalized to be in [0, 100]. This means we are
assuming a range of valid values for each param - e.g. for change_aspect_ratio
we assume we will never change the aspect ratio of the video by more than 10x,
meaning the range of valid values for `ratio` is [0.1, 10.0], which you can see
is assumed in the intensity function below.
"""
def add_noise_intensity(level: int, **kwargs) -> float:
assert (
isinstance(level, (float, int)) and 0 <= level <= 100
), "level must be a number in [0, 100]"
return (level / 100) * 100.0
def apply_lambda_intensity(aug_function: str, **kwargs) -> float:
intensity_func = globals().get(f"{aug_function}_intensity")
return intensity_func(**kwargs) if intensity_func else 100.0
def audio_swap_intensity(offset: float, **kwargs) -> float:
return (1.0 - offset) * 100.0
def augment_audio_intensity(audio_metadata: List[Dict[str, Any]], **kwargs) -> float:
return audio_metadata[0]["intensity"]
def blend_videos_intensity(opacity: float, overlay_size: float, **kwargs) -> float:
return imint.overlay_media_intensity_helper(opacity, overlay_size)
def blur_intensity(sigma: int, **kwargs) -> float:
assert (
isinstance(sigma, (float, int)) and sigma >= 0
), "sigma must be a non-negative number"
max_sigma = 100
return min((sigma / max_sigma) * 100.0, 100.0)
def brightness_intensity(level: float, **kwargs) -> float:
assert (
isinstance(level, (float, int)) and -1 <= level <= 1
), "level must be a number in [-1, 1]"
return abs(level) * 100.0
def change_aspect_ratio_intensity(
ratio: float, metadata: Dict[str, Any], **kwargs
) -> float:
assert (
isinstance(ratio, (float, int)) and ratio > 0
), "ratio must be a positive number"
current_ratio = metadata["src_width"] / metadata["src_height"]
max_ratio_change = 10.0
ratio_change = abs(ratio - current_ratio)
return min((ratio_change / max_ratio_change) * 100.0, 100.0)
def change_video_speed_intensity(factor: float, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor = 10.0
speed_change_factor = factor if factor > 1 else 1 / factor
return min((speed_change_factor / max_factor) * 100.0, 100.0)
def color_jitter_intensity(
brightness_factor: float, contrast_factor: float, saturation_factor: float, **kwargs
) -> float:
assert (
isinstance(brightness_factor, (float, int)) and -1 <= brightness_factor <= 1
), "brightness_factor must be a number in [-1, 1]"
assert (
isinstance(contrast_factor, (float, int)) and -1000 <= contrast_factor <= 1000
), "contrast_factor must be a number in [-1000, 1000]"
assert (
isinstance(saturation_factor, (float, int)) and 0 <= saturation_factor <= 3
), "saturation_factor must be a number in [0, 3]"
brightness_intensity = abs(brightness_factor)
contrast_intensity = abs(contrast_factor) / 1000
saturation_intensity = saturation_factor / 3
return (brightness_intensity * contrast_intensity * saturation_intensity) * 100.0
def concat_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return time_crop_or_pad_intensity_helper(metadata)
def contrast_intensity(level: float, **kwargs) -> float:
assert (
isinstance(level, (float, int)) and -1000 <= level <= 1000
), "level must be a number in [-1000, 1000]"
return (abs(level) / 1000) * 100.0
def crop_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata)
def encoding_quality_intensity(quality: int, **kwargs) -> float:
assert (
isinstance(quality, int) and 0 <= quality <= 51
), "quality must be a number in [0, 51]"
return (quality / 51) * 100.0
def fps_intensity(fps: int, metadata: Dict[str, Any], **kwargs) -> float:
assert isinstance(fps, (float, int)), "fps must be a number"
src_fps = metadata["src_fps"]
return min(((src_fps - fps) / src_fps) * 100.0, 100.0)
def grayscale_intensity(**kwargs) -> float:
return 100.0
def hflip_intensity(**kwargs) -> float:
return 100.0
def hstack_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata)
def identity_function_intensity(**kwargs) -> float:
return 0.0
def insert_in_background_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return time_crop_or_pad_intensity_helper(metadata)
def loop_intensity(num_loops: int, **kwargs) -> float:
max_num_loops = 100
return min((num_loops / max_num_loops) * 100.0, 100.0)
def meme_format_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata)
def overlay_intensity(
overlay_size: Optional[float], overlay_path: str, metadata: Dict[str, Any], **kwargs
) -> float:
assert overlay_size is None or (
isinstance(overlay_size, (float, int)) and 0 < overlay_size <= 1
), "overlay_size must be a value in the range (0, 1]"
if overlay_size is not None:
return (overlay_size**2) * 100.0
try:
img = imutils.validate_and_load_image(overlay_path)
overlay_area = img.width * img.height
except Exception:
video_info = get_video_info(overlay_path)
overlay_area = video_info["width"] * video_info["height"]
src_area = metadata["src_width"] * metadata["src_height"]
return min((overlay_area / src_area) * 100.0, 100.0)
def overlay_dots_intensity(num_dots: int, **kwargs) -> float:
max_num_dots = 10000
return min((num_dots / max_num_dots) * 100.0, 100.0)
def overlay_emoji_intensity(
emoji_size: float, opacity: float, metadata: Dict[str, Any], **kwargs
) -> float:
assert (
isinstance(emoji_size, (float, int)) and 0 <= emoji_size <= 1
), "emoji_size must be a number in [0, 1]"
assert (
isinstance(opacity, (float, int)) and 0 <= opacity <= 1
), "opacity must be a number in [0, 1]"
video_area = metadata["dst_width"] * metadata["dst_height"]
emoji_width = min(metadata["dst_width"], metadata["dst_height"] * emoji_size)
emoji_height = metadata["dst_height"] * emoji_size
emoji_area = emoji_width * emoji_height
area_intensity = emoji_area / video_area
return area_intensity * opacity * 100.0
def overlay_onto_background_video_intensity(
overlay_size: Optional[float], metadata: Dict[str, Any], **kwargs
) -> float:
if overlay_size is not None:
return (1 - overlay_size**2) * 100.0
src_area = metadata["src_width"] * metadata["src_height"]
dst_area = metadata["dst_width"] * metadata["dst_height"]
return min(100.0, max(0.0, 1.0 - src_area / dst_area) * 100.0)
def overlay_onto_screenshot_intensity(
template_filepath: str,
template_bboxes_filepath: str,
metadata: Dict[str, Any],
**kwargs,
) -> float:
_, bbox = imutils.get_template_and_bbox(template_filepath, template_bboxes_filepath)
bbox_area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
dst_area = metadata["dst_width"] * metadata["dst_height"]
return ((dst_area - bbox_area) / dst_area) * 100.0
def overlay_shapes_intensity(
topleft: Optional[Tuple[float, float]],
bottomright: Optional[Tuple[float, float]],
num_shapes: int,
**kwargs,
) -> float:
return distractor_overlay_intensity_helper(topleft, bottomright, num_shapes)
def overlay_text_intensity(
topleft: Optional[Tuple[float, float]],
bottomright: Optional[Tuple[float, float]],
**kwargs,
) -> float:
return distractor_overlay_intensity_helper(topleft, bottomright, 1)
def pad_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata)
def perspective_transform_and_shake_intensity(
sigma: float, shake_radius: float, **kwargs
) -> float:
assert (
isinstance(sigma, (float, int)) and sigma >= 0
), "sigma must be a non-negative number"
assert (
isinstance(shake_radius, (float, int)) and shake_radius >= 0
), "shake_radius must be a non-negative number"
max_sigma_val = 100
max_shake_radius_val = 100
sigma_intensity = sigma / max_sigma_val
shake_radius_intensity = shake_radius / max_shake_radius_val
return min((sigma_intensity * shake_radius_intensity) * 100.0, 100.0)
def pixelization_intensity(ratio: float, **kwargs) -> float:
assert (
isinstance(ratio, (float, int)) and 0 <= ratio <= 1
), "ratio must be a number in [0, 1]"
return (1 - ratio) * 100.0
def remove_audio_intensity(**kwargs) -> float:
return 100.0
def insert_in_background_multiple_intensity(
metadata: Dict[str, Any], **kwargs
) -> float:
"""
The intensity is calculated as the percentage of the result video
that contains inserted segments.
"""
dst_duration = metadata["dst_duration"]
starts = metadata["src_segment_starts"]
ends = metadata["src_segment_ends"]
inserted = np.sum(ends - starts)
return inserted / dst_duration
def replace_with_background_intensity(metadata: Dict[str, Any], **kwargs) -> float:
"""
The intensity of replace_with_background is the fraction of the source video duration
that was replaced with background. Because the overall duration of the video is preserved,
the background segments together must be shorter than the source duration so the intensity is never
greater than 100.
"""
src_duration = metadata["src_duration"]
total_bg_duration = (
metadata["starting_background_duration"]
+ metadata["ending_background_duration"]
)
return min((total_bg_duration / src_duration) * 100.0, 100.0)
def replace_with_color_frames_intensity(
duration_factor: float, offset_factor: float, **kwargs
) -> float:
assert (
isinstance(duration_factor, (float, int)) and 0 <= duration_factor <= 1
), "duration_factor must be a number in [0, 1]"
assert (
isinstance(offset_factor, (float, int)) and 0 <= offset_factor <= 1
), "offset_factor must be a number in [0, 1]"
# The proportion of the video that is replaced by color frames is generally
# equal to duration factor, unless offset_factor + duration_factor > 1, in
# which case it will be 1 - offset_factor.
return min(duration_factor, 1 - offset_factor) * 100.0
def resize_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata)
def rotate_intensity(degrees: float, **kwargs) -> float:
assert isinstance(degrees, (float, int)), "degrees must be a number"
max_degrees_val = 180
degrees = abs(degrees) % 180
return (degrees / max_degrees_val) * 100.0
def scale_intensity(factor: float, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor_val = 10.0
scale_factor = factor if factor > 1 else 1 / factor
return min((scale_factor / max_factor_val) * 100.0, 100.0)
def shift_intensity(x_factor: float, y_factor: float, **kwargs) -> float:
assert (
isinstance(x_factor, (float, int))
and 0 <= x_factor <= 1
and isinstance(y_factor, (float, int))
and 0 <= y_factor <= 1
), "x_factor & y_factor must be positive numbers in [0, 1]"
return (1 - x_factor) * (1 - y_factor) * 100.0
def time_crop_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return time_crop_or_pad_intensity_helper(metadata)
def time_decimate_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return time_crop_or_pad_intensity_helper(metadata)
def trim_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return time_crop_or_pad_intensity_helper(metadata)
def vflip_intensity(**kwargs) -> float:
return 100.0
def vstack_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata)
def distractor_overlay_intensity_helper(
topleft: Optional[Tuple[float, float]],
bottomright: Optional[Tuple[float, float]],
num_overlay_content: int,
**kwargs,
) -> float:
"""
Computes intensity of any distractor-type transform, which adds some kind
of media (images, emojis, text, dots, logos) on top of the src video within
a specified bounding box.
"""
assert topleft is None or all(
0.0 <= t <= 1.0 for t in topleft
), "Topleft must be in the range [0, 1]"
assert bottomright is None or all(
0.0 <= b <= 1.0 for b in bottomright
), "Bottomright must be in the range [0, 1]"
assert (
isinstance(num_overlay_content, int) and num_overlay_content >= 0
), "num_overlay_content must be a nonnegative int"
if topleft is None or bottomright is None:
return 100.0
max_num_overlay_content_val = 100
num_overlay_content_intensity = num_overlay_content / max_num_overlay_content_val
x1, y1 = topleft
x2, y2 = bottomright
distractor_area = (x2 - x1) * (y2 - y1)
return min((distractor_area * num_overlay_content_intensity) * 100.0, 100.0)
def time_crop_or_pad_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of a transform that consists of temporal cropping or
padding. For these types of transforms the intensity is defined as the
percentage of video time that has been cut out (for cropping) or added
(for padding). When computing the percentage, the denominator should be
the longer of the src & dst durations so the resulting percentage isn't
greater than 100.
"""
dst_duration = metadata["dst_duration"]
src_duration = metadata["src_duration"]
larger_duration = max(src_duration, dst_duration)
return (abs(dst_duration - src_duration) / larger_duration) * 100.0
|
AugLy-main
|
augly/video/helpers/intensity.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional
import ffmpeg
import numpy as np
from augly.audio import utils as audutils
from augly.utils import pathmgr, SILENT_AUDIO_PATH
from augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH
from vidgear.gears import WriteGear
def combine_frames_and_audio_to_file(
raw_frames: str,
audio: Optional[str],
output_path: str,
framerate: float,
) -> None:
frame_dir = os.path.dirname(raw_frames)
if not os.path.isdir(frame_dir):
raise RuntimeError(
f"Got raw frames glob path of {raw_frames}, but {frame_dir} is not "
"a directory"
)
with tempfile.TemporaryDirectory() as tmpdir:
temp_video_path = os.path.join(tmpdir, "out.mp4")
ffmpeg_command = [
"-y",
"-framerate",
str(framerate),
"-pattern_type",
"glob",
"-i",
raw_frames,
"-c:v",
"libx264",
"-pix_fmt",
"yuv420p",
"-preset",
"ultrafast",
"-vf",
"pad=ceil(iw/2)*2:ceil(ih/2)*2",
temp_video_path,
]
execute_vidgear_command(temp_video_path, ffmpeg_command)
temp_padded_video_path = os.path.join(tmpdir, "out1.mp4")
ffmpeg_command = [
"-y",
"-i",
temp_video_path,
"-vf",
"pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2",
"-preset",
"ultrafast",
temp_padded_video_path,
]
execute_vidgear_command(temp_padded_video_path, ffmpeg_command)
merge_video_and_audio(temp_padded_video_path, audio, output_path)
def execute_vidgear_command(output_path: str, ffmpeg_command: List[str]) -> None:
writer = WriteGear(output_filename=output_path, logging=True)
writer.execute_ffmpeg_cmd(ffmpeg_command)
writer.close()
def extract_audio_to_file(video_path: str, output_audio_path: str) -> None:
audio_info = get_audio_info(video_path)
sample_rate = str(audio_info["sample_rate"])
codec = audio_info["codec_name"]
if os.path.splitext(output_audio_path)[-1] == ".aac":
(
ffmpeg.input(video_path, loglevel="quiet")
.output(output_audio_path, acodec=codec, ac=1)
.overwrite_output()
.run(cmd=FFMPEG_PATH)
)
else:
out, err = (
ffmpeg.input(video_path, loglevel="quiet")
.output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sample_rate)
.run(cmd=FFMPEG_PATH, capture_stdout=True, capture_stderr=True)
)
audio = np.frombuffer(out, np.float32)
audutils.ret_and_save_audio(audio, output_audio_path, int(sample_rate))
def extract_frames_to_dir(
video_path: str,
output_dir: str,
output_pattern: str = "raw_frame%08d.jpg",
quality: int = 0,
scale: float = 1,
) -> None:
video_info = get_video_info(video_path)
ffmpeg_command = [
"-y",
"-i",
video_path,
"-vf",
f"scale=iw*{scale}:ih*{scale}",
"-vframes",
str(video_info["nb_frames"]),
"-qscale:v",
str(quality),
"-preset",
"ultrafast",
os.path.join(output_dir, output_pattern),
]
execute_vidgear_command(os.path.join(output_dir, output_pattern), ffmpeg_command)
def get_audio_info(media_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the
encoder ("codec_name") used for audio encoding, the sample rate ("sample_rate"),
and length in seconds ("duration")
Accepts as input either an audio or video path.
"""
try:
local_media_path = pathmgr.get_local_path(media_path)
except RuntimeError:
raise FileNotFoundError(f"Provided media path {media_path} does not exist")
probe = ffmpeg.probe(local_media_path, cmd=FFPROBE_PATH)
audio_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "audio"),
None,
)
assert (
audio_info is not None
), "Error retrieving audio metadata, please verify that an audio stream exists"
return audio_info
def get_video_fps(video_path: str) -> Optional[float]:
video_info = get_video_info(video_path)
try:
frame_rate = video_info["avg_frame_rate"]
# ffmpeg often returns fractional framerates, e.g. 225480/7523
if "/" in frame_rate:
num, denom = (float(f) for f in frame_rate.split("/"))
return num / denom
else:
return float(frame_rate)
except Exception:
return None
def get_video_info(video_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the FPS
("avg_frame_rate"), number of raw frames ("nb_frames"), height and width of each
frame ("height", "width") and length in seconds ("duration")
"""
try:
local_video_path = pathmgr.get_local_path(video_path)
except RuntimeError:
raise FileNotFoundError(f"Provided video path {video_path} does not exist")
probe = ffmpeg.probe(local_video_path, cmd=FFPROBE_PATH)
video_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "video"),
None,
)
assert (
video_info is not None
), "Error retrieving video metadata, please verify that the video file exists"
return video_info
def has_audio_stream(video_path: str) -> bool:
streams = ffmpeg.probe(video_path, cmd=FFPROBE_PATH)["streams"]
for stream in streams:
if stream["codec_type"] == "audio":
return True
return False
def add_silent_audio(
video_path: str,
output_path: Optional[str] = None,
duration: Optional[float] = None,
) -> None:
local_video_path = pathmgr.get_local_path(video_path)
if local_video_path != video_path:
assert (
output_path is not None
), "If remote video_path is provided, an output_path must be provided"
video_path = local_video_path
output_path = output_path or video_path
if has_audio_stream(video_path):
if video_path != output_path:
shutil.copy(video_path, output_path)
return
duration = duration or float(get_video_info(video_path)["duration"])
video = ffmpeg.input(video_path).video
silent_audio_path = pathmgr.get_local_path(SILENT_AUDIO_PATH)
audio = ffmpeg.input(silent_audio_path, stream_loop=math.ceil(duration)).audio
output = ffmpeg.output(video, audio, output_path, pix_fmt="yuv420p", t=duration)
output.overwrite_output().run(cmd=FFMPEG_PATH)
def merge_video_and_audio(
video_path: str,
audio_path: Optional[str],
output_path: str,
) -> None:
ffmpeg_command = []
if audio_path:
ffmpeg_command = [
"-y",
"-i",
video_path,
"-i",
audio_path,
"-vf",
"format=pix_fmts=yuv420p",
"-c:v",
"libx264",
"-c:a",
"copy",
"-bsf:a",
"aac_adtstoasc",
"-preset",
"ultrafast",
output_path,
]
else:
ffmpeg_command = [
"-y",
"-i",
video_path,
"-vf",
"format=pix_fmts=yuv420p",
"-c:v",
"libx264",
"-c:a",
"copy",
"-bsf:a",
"aac_adtstoasc",
"-preset",
"ultrafast",
output_path,
]
execute_vidgear_command(output_path, ffmpeg_command)
|
AugLy-main
|
augly/video/helpers/ffmpeg.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.video.helpers.ffmpeg import (
add_silent_audio,
combine_frames_and_audio_to_file,
extract_audio_to_file,
extract_frames_to_dir,
get_audio_info,
get_video_fps,
get_video_info,
has_audio_stream,
)
from augly.video.helpers.intensity import (
add_noise_intensity,
apply_lambda_intensity,
audio_swap_intensity,
augment_audio_intensity,
blend_videos_intensity,
blur_intensity,
brightness_intensity,
change_aspect_ratio_intensity,
change_video_speed_intensity,
color_jitter_intensity,
concat_intensity,
contrast_intensity,
crop_intensity,
encoding_quality_intensity,
fps_intensity,
grayscale_intensity,
hflip_intensity,
hstack_intensity,
insert_in_background_intensity,
insert_in_background_multiple_intensity,
loop_intensity,
meme_format_intensity,
overlay_dots_intensity,
overlay_emoji_intensity,
overlay_intensity,
overlay_onto_background_video_intensity,
overlay_onto_screenshot_intensity,
overlay_shapes_intensity,
overlay_text_intensity,
pad_intensity,
perspective_transform_and_shake_intensity,
pixelization_intensity,
remove_audio_intensity,
replace_with_background_intensity,
replace_with_color_frames_intensity,
rotate_intensity,
scale_intensity,
shift_intensity,
time_crop_intensity,
time_decimate_intensity,
trim_intensity,
vflip_intensity,
vstack_intensity,
)
from augly.video.helpers.metadata import (
compute_changed_segments,
compute_segments,
compute_time_crop_segments,
compute_time_decimate_segments,
get_func_kwargs,
get_metadata,
)
from augly.video.helpers.utils import (
create_color_video,
create_video_from_image,
get_local_path,
identity_function,
validate_input_and_output_paths,
)
__all__ = [
# -- ffmpeg --
"add_silent_audio",
"combine_frames_and_audio_to_file",
"extract_audio_to_file",
"extract_frames_to_dir",
"get_audio_info",
"get_video_fps",
"get_video_info",
"has_audio_stream",
# -- intensity --
"add_noise_intensity",
"apply_lambda_intensity",
"audio_swap_intensity",
"augment_audio_intensity",
"blend_videos_intensity",
"blur_intensity",
"brightness_intensity",
"change_aspect_ratio_intensity",
"change_video_speed_intensity",
"color_jitter_intensity",
"concat_intensity",
"contrast_intensity",
"crop_intensity",
"encoding_quality_intensity",
"fps_intensity",
"grayscale_intensity",
"hflip_intensity",
"hstack_intensity",
"insert_in_background_intensity",
"insert_in_background_multiple_intensity",
"loop_intensity",
"meme_format_intensity",
"overlay_intensity",
"overlay_dots_intensity",
"overlay_emoji_intensity",
"overlay_onto_background_video_intensity",
"overlay_onto_screenshot_intensity",
"overlay_shapes_intensity",
"overlay_text_intensity",
"pad_intensity",
"perspective_transform_and_shake_intensity",
"pixelization_intensity",
"remove_audio_intensity",
"replace_with_background_intensity",
"replace_with_color_frames_intensity",
"rotate_intensity",
"scale_intensity",
"shift_intensity",
"time_crop_intensity",
"time_decimate_intensity",
"trim_intensity",
"vflip_intensity",
"vstack_intensity",
# -- metadata --
"compute_changed_segments",
"compute_segments",
"compute_time_crop_segments",
"compute_time_decimate_segments",
"get_func_kwargs",
"get_metadata",
# -- utils --
"create_color_video",
"create_video_from_image",
"get_local_path",
"identity_function",
"validate_input_and_output_paths",
]
|
AugLy-main
|
augly/video/helpers/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional, Tuple
import cv2
import ffmpeg
import numpy as np
from augly import utils
from augly.utils.ffmpeg import FFMPEG_PATH
from augly.video import helpers
DEFAULT_FRAME_RATE = 10
def create_color_video(
output_path: str,
duration: float,
height: int,
width: int,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
) -> None:
"""
Creates a video with frames of the specified color
@param output_path: the path in which the resulting video will be stored
@param duration: how long the video should be, in seconds
@param height: the desired height of the video to be generated
@param width: the desired width of the video to be generated
@param color: RGB color of the video. Default color is black
"""
utils.validate_output_path(output_path)
assert duration > 0, "Duration of the video must be a positive value"
assert height > 0, "Height of the video must be a positive value"
assert width > 0, "Width of the video must be a positive value"
with tempfile.TemporaryDirectory() as tmpdir:
image_path = os.path.join(tmpdir, "image.png")
color_frame = np.full((height, width, 3), color[::-1])
cv2.imwrite(image_path, color_frame)
create_video_from_image(output_path, image_path, duration)
def create_video_from_image(output_path: str, image_path: str, duration: float) -> None:
"""
Creates a video with all frames being the image provided
@param output_path: the path in which the resulting video will be stored
@param image_path: the path to the image to use to create the video
@param duration: how long the video should be, in seconds
"""
utils.validate_output_path(output_path)
utils.validate_image_path(image_path)
assert duration > 0, "Duration of the video must be a positive value"
im_stream = ffmpeg.input(image_path)
video = (
im_stream.filter("loop", 1)
.filter("framerate", utils.DEFAULT_FRAME_RATE)
.filter("pad", **{"width": "ceil(iw/2)*2", "height": "ceil(ih/2)*2"})
)
silent_audio_path = utils.pathmgr.get_local_path(utils.SILENT_AUDIO_PATH)
audio = ffmpeg.input(silent_audio_path, stream_loop=math.ceil(duration)).audio
output = ffmpeg.output(video, audio, output_path, pix_fmt="yuv420p", t=duration)
output.overwrite_output().run(cmd=FFMPEG_PATH)
def get_local_path(video_path: str) -> str:
return utils.pathmgr.get_local_path(video_path)
def identity_function(
video_path: str,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
video_path, output_path = validate_input_and_output_paths(video_path, output_path)
if output_path is not None and output_path != video_path:
shutil.copy(video_path, output_path)
if metadata is not None:
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
helpers.get_metadata(
metadata=metadata, function_name="identity_function", **func_kwargs
)
return output_path or video_path
def validate_input_and_output_paths(
video_path: str, output_path: Optional[str]
) -> Tuple[str, str]:
local_video_path = get_local_path(video_path)
utils.validate_video_path(local_video_path)
if output_path is None:
assert (
video_path == local_video_path
), "If using a nonlocal input path, you must specify an output path"
output_path = output_path or video_path
utils.validate_output_path(output_path)
return local_video_path, output_path
|
AugLy-main
|
augly/video/helpers/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.tests.base_configs import AudioAugConfig, ImageAugConfig, VideoAugConfig
__all__ = [
"AudioAugConfig",
"ImageAugConfig",
"VideoAugConfig",
]
|
AugLy-main
|
augly/tests/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from dataclasses import dataclass
from typing import Optional, Sequence, Tuple
from augly.utils import TEST_URI
@dataclass
class BaseAugConfig:
output_dir: str
input_dir: str
input_files: Sequence[str]
input_file_index: int = 0
def get_input_path(self, input_file_index: Optional[int] = None) -> Tuple[str, str]:
if input_file_index is None:
input_file_index = self.input_file_index
filename = self.input_files[input_file_index]
filepath = os.path.join(self.input_dir, filename)
return filepath, filename
def get_output_path(self, filename: str, prefix: str = "") -> str:
aug_filename = f"aug_{prefix}{filename}"
output_path = os.path.join(self.output_dir, aug_filename)
return output_path
@dataclass
class ImageAugConfig(BaseAugConfig):
output_dir: str = os.path.join(TEST_URI, "image", "outputs")
input_dir: str = os.path.join(TEST_URI, "image", "inputs")
input_files: Sequence[str] = ("dfdc_1.jpg", "dfdc_2.jpg", "dfdc_3.jpg")
@dataclass
class VideoAugConfig(BaseAugConfig):
output_dir: str = os.path.join(TEST_URI, "video", "outputs")
input_dir: str = os.path.join(TEST_URI, "video", "inputs")
input_files: Sequence[str] = ("input_1.mp4", "input_2.mp4", "input_3.mp4")
input_audio_file: str = os.path.join(TEST_URI, "video", "inputs", "input_1.aac")
@dataclass
class AudioAugConfig(BaseAugConfig):
output_dir: str = os.path.join(TEST_URI, "audio", "outputs")
input_dir: str = os.path.join(TEST_URI, "audio", "inputs")
input_files: Sequence[str] = ("vad-go-mono-32000.wav", "vad-go-stereo-44100.wav")
|
AugLy-main
|
augly/tests/base_configs.py
|
AugLy-main
|
augly/tests/image_tests/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from augly import image as imaugs
from augly.tests.image_tests.base_unit_test import BaseImageUnitTest
from augly.utils import EMOJI_PATH, IMG_MASK_PATH
from PIL import Image
class FunctionalImageUnitTest(BaseImageUnitTest):
def test_apply_lambda(self):
self.evaluate_function(imaugs.apply_lambda)
def test_apply_pil_filter(self):
self.evaluate_function(imaugs.apply_pil_filter)
def test_blur(self):
self.evaluate_function(imaugs.blur)
def test_brightness(self):
self.evaluate_function(imaugs.brightness)
def test_change_aspect_ratio(self):
self.evaluate_function(imaugs.change_aspect_ratio)
def test_clip_image_size(self):
self.evaluate_function(imaugs.clip_image_size, max_resolution=1500000)
def test_color_jitter(self):
self.evaluate_function(imaugs.color_jitter)
def test_contrast(self):
self.evaluate_function(imaugs.contrast)
def test_convert_color(self):
self.evaluate_function(imaugs.convert_color, mode="L")
def test_crop(self):
self.evaluate_function(imaugs.crop)
def test_encoding_quality(self):
self.evaluate_function(imaugs.encoding_quality, quality=30)
def test_grayscale(self):
self.evaluate_function(imaugs.grayscale)
def test_hflip(self):
self.evaluate_function(imaugs.hflip)
def test_masked_composite(self):
self.evaluate_function(
imaugs.masked_composite,
mask=IMG_MASK_PATH,
transform_function=imaugs.Brightness(factor=0.1),
)
@unittest.skip("Failing on some envs, will fix")
def test_meme_format(self):
self.evaluate_function(imaugs.meme_format)
def test_opacity(self):
self.evaluate_function(imaugs.opacity)
def test_overlay_emoji(self):
self.evaluate_function(imaugs.overlay_emoji)
def test_overlay_image(self):
self.evaluate_function(
imaugs.overlay_image, overlay=EMOJI_PATH, overlay_size=0.15, y_pos=0.8
)
def test_overlay_onto_background_image(self):
self.evaluate_function(
imaugs.overlay_onto_background_image,
background_image=EMOJI_PATH,
overlay_size=0.5,
scale_bg=True,
)
def test_overlay_onto_screenshot(self):
self.evaluate_function(
imaugs.overlay_onto_screenshot, resize_src_to_match_template=False
)
def test_overlay_stripes(self):
self.evaluate_function(imaugs.overlay_stripes)
@unittest.skip("Failing on some envs, will fix")
def test_overlay_text(self):
text_indices = [5, 3, 1, 2, 1000, 221]
self.evaluate_function(imaugs.overlay_text, text=text_indices)
def test_pad(self):
self.evaluate_function(imaugs.pad)
def test_pad_square(self):
self.evaluate_function(imaugs.pad_square)
def test_perspective_transform(self):
self.evaluate_function(imaugs.perspective_transform, sigma=100.0)
def test_pixelization(self):
self.evaluate_function(imaugs.pixelization)
def test_random_noise(self):
self.evaluate_function(imaugs.random_noise)
def test_resize(self):
self.evaluate_function(imaugs.resize, resample=Image.BICUBIC)
def test_rotate(self):
self.evaluate_function(imaugs.rotate)
def test_saturation(self):
self.evaluate_function(imaugs.saturation, factor=0.5)
def test_scale(self):
self.evaluate_function(imaugs.scale)
def test_sharpen(self):
self.evaluate_function(imaugs.sharpen, factor=2.0)
def test_shuffle_pixels(self):
self.evaluate_function(imaugs.shuffle_pixels, factor=0.5)
def test_skew(self):
self.evaluate_function(imaugs.skew)
def test_vflip(self):
self.evaluate_function(imaugs.vflip)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/image_tests/functional_unit_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torchvision.transforms as transforms # @manual
from augly import image as imaugs
from augly.tests import ImageAugConfig
from augly.utils import pathmgr
from PIL import Image
COLOR_JITTER_PARAMS = {
"brightness_factor": 1.2,
"contrast_factor": 1.2,
"saturation_factor": 1.4,
}
AUGMENTATIONS = [
imaugs.ColorJitter(**COLOR_JITTER_PARAMS),
imaugs.OneOf(
[imaugs.OverlayOntoScreenshot(), imaugs.OverlayEmoji(), imaugs.OverlayText()]
),
]
TRANSFORMS = imaugs.Compose(AUGMENTATIONS)
TENSOR_TRANSFORMS = transforms.Compose(AUGMENTATIONS + [transforms.ToTensor()])
class ComposeAugmentationsTestCase(unittest.TestCase):
def test_torchvision_compose_compability(self) -> None:
config = ImageAugConfig()
image_path, image_file = config.get_input_path()
local_img_path = pathmgr.get_local_path(image_path)
image = Image.open(local_img_path)
tsfm_image = TENSOR_TRANSFORMS(image)
self.assertIsInstance(tsfm_image, torch.Tensor)
def test_augly_image_compose(self) -> None:
config = ImageAugConfig()
image_path, image_file = config.get_input_path()
local_img_path = pathmgr.get_local_path(image_path)
image = Image.open(local_img_path)
tsfm_image = TRANSFORMS(image)
self.assertIsInstance(tsfm_image, Image.Image)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/image_tests/pytorch_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
import unittest
from typing import Any, Callable, Dict, List, Optional
import numpy as np
from augly.tests import ImageAugConfig
from augly.utils import pathmgr, TEST_URI
from PIL import Image
def are_equal_images(a: Image.Image, b: Image.Image) -> bool:
return a.size == b.size and np.allclose(np.array(a), np.array(b))
def are_equal_metadata(
actual_meta: List[Dict[str, Any]],
expected_meta: List[Dict[str, Any]],
exclude_keys: Optional[List[str]],
) -> bool:
if actual_meta == expected_meta:
return True
for actual_dict, expected_dict in zip(actual_meta, expected_meta):
for (act_k, act_v), (exp_k, exp_v) in zip(
sorted(actual_dict.items(), key=lambda kv: kv[0]),
sorted(expected_dict.items(), key=lambda kv: kv[0]),
):
if exclude_keys is not None and act_k in exclude_keys:
continue
if act_k != exp_k:
return False
if act_v == exp_v:
continue
# Bboxes are tuples but stored as lists in expected metadata
if (
isinstance(act_v, list)
and all(isinstance(x, tuple) for x in zip(act_v, exp_v))
and len(act_v) == len(exp_v)
and all(list(x) == y for x, y in zip(act_v, exp_v))
):
continue
"""
Allow relative paths in expected metadata: just check that the end of the
actual path matches the expected path
"""
if not (
isinstance(act_v, str)
and isinstance(exp_v, str)
and act_v[-len(exp_v) :] == exp_v
):
return False
return True
class BaseImageUnitTest(unittest.TestCase):
ref_img_dir = os.path.join(TEST_URI, "image", "dfdc_expected_output")
def test_import(self) -> None:
try:
from augly import image as imaugs
except ImportError:
self.fail("imaugs failed to import")
self.assertTrue(dir(imaugs), "Image directory does not exist")
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.config = ImageAugConfig(input_file_index=0)
img_path, img_file = cls.config.get_input_path()
cls.local_img_path = pathmgr.get_local_path(img_path)
cls.img = Image.open(cls.local_img_path)
def evaluate_function(self, aug_function: Callable[..., Image.Image], **kwargs):
ref = self.get_ref_image(aug_function.__name__)
with tempfile.NamedTemporaryFile(suffix=".png") as tmpfile:
aug_function(self.local_img_path, output_path=tmpfile.name, **kwargs)
file_dst = Image.open(tmpfile.name)
pil_dst = aug_function(self.img, **kwargs)
self.assertTrue(
are_equal_images(pil_dst, ref), "Expected and outputted images do not match"
)
self.assertTrue(
are_equal_images(file_dst, ref),
"Expected and outputted images do not match",
)
def evaluate_class(
self,
transform_class: Callable[..., Image.Image],
fname: str,
metadata_exclude_keys: Optional[List[str]] = None,
check_mode: bool = True,
):
metadata = []
bboxes, bbox_format = [(0.5, 0.5, 0.25, 0.75)], "yolo"
ref = self.get_ref_image(fname)
dst = transform_class(
self.img, metadata=metadata, bboxes=bboxes, bbox_format=bbox_format
)
if check_mode:
self.assertTrue(
self.img.mode == dst.mode,
"Expected and outputted image modes do not match",
)
self.assertTrue(
are_equal_metadata(metadata, self.metadata[fname], metadata_exclude_keys),
"Expected and outputted metadata do not match",
)
self.assertTrue(
are_equal_images(dst, ref), "Expected and outputted images do not match"
)
def get_ref_image(self, fname: str) -> Image.Image:
ref_img_name = f"test_{fname}.png"
ref_local_path = pathmgr.get_local_path(
os.path.join(self.ref_img_dir, ref_img_name)
)
return Image.open(ref_local_path)
|
AugLy-main
|
augly/tests/image_tests/base_unit_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import random
import unittest
from augly import image as imaugs
from augly.tests.image_tests.base_unit_test import BaseImageUnitTest
from augly.utils import EMOJI_PATH, IMAGE_METADATA_PATH, IMG_MASK_PATH
from PIL import Image
class TransformsImageUnitTest(BaseImageUnitTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
with open(IMAGE_METADATA_PATH, "r") as f:
cls.metadata = json.load(f)
def test_ApplyLambda(self):
self.evaluate_class(imaugs.ApplyLambda(), fname="apply_lambda")
def test_ApplyPILFilter(self):
self.evaluate_class(imaugs.ApplyPILFilter(), fname="apply_pil_filter")
def test_Blur(self):
self.evaluate_class(imaugs.Blur(), fname="blur")
def test_Brightness(self):
self.evaluate_class(imaugs.Brightness(), fname="brightness")
def test_ChangeAspectRatio(self):
self.evaluate_class(imaugs.ChangeAspectRatio(), fname="change_aspect_ratio")
def test_ClipImageSize(self):
self.evaluate_class(
imaugs.ClipImageSize(max_resolution=1500000), fname="clip_image_size"
)
def test_ColorJitter(self):
self.evaluate_class(imaugs.ColorJitter(), fname="color_jitter")
def test_Compose(self):
random.seed(1)
self.evaluate_class(
imaugs.Compose(
[
imaugs.Blur(),
imaugs.ColorJitter(saturation_factor=1.5),
imaugs.OneOf(
[
imaugs.OverlayOntoScreenshot(),
imaugs.OverlayEmoji(),
imaugs.OverlayText(),
]
),
]
),
fname="compose",
)
def test_Contrast(self):
self.evaluate_class(imaugs.Contrast(), fname="contrast")
def test_ConvertColor(self):
self.evaluate_class(
imaugs.ConvertColor(mode="L"),
fname="convert_color",
check_mode=False,
)
def test_Crop(self):
self.evaluate_class(imaugs.Crop(), fname="crop")
def test_EncodingQuality(self):
self.evaluate_class(
imaugs.EncodingQuality(quality=30), fname="encoding_quality"
)
def test_Grayscale(self):
self.evaluate_class(imaugs.Grayscale(), fname="grayscale")
def test_HFlip(self):
self.evaluate_class(imaugs.HFlip(), fname="hflip")
def test_MaskedComposite(self):
self.evaluate_class(
imaugs.MaskedComposite(
mask=IMG_MASK_PATH,
transform_function=imaugs.Brightness(factor=0.1),
),
fname="masked_composite",
)
@unittest.skip("Failing on some envs, will fix")
def test_MemeFormat(self):
self.evaluate_class(imaugs.MemeFormat(), fname="meme_format")
def test_Opacity(self):
self.evaluate_class(imaugs.Opacity(), fname="opacity")
def test_OverlayEmoji(self):
self.evaluate_class(imaugs.OverlayEmoji(), fname="overlay_emoji")
def test_OverlayImage(self):
self.evaluate_class(
imaugs.OverlayImage(overlay=EMOJI_PATH, overlay_size=0.15, y_pos=0.8),
fname="overlay_image",
)
def test_OverlayOntoBackgroundImage(self):
self.evaluate_class(
imaugs.OverlayOntoBackgroundImage(
background_image=EMOJI_PATH, overlay_size=0.5, scale_bg=True
),
fname="overlay_onto_background_image",
)
def test_OverlayOntoScreenshot(self):
self.evaluate_class(
imaugs.OverlayOntoScreenshot(resize_src_to_match_template=False),
fname="overlay_onto_screenshot",
metadata_exclude_keys=[
"dst_bboxes",
"dst_height",
"dst_width",
"intensity",
"template_filepath",
],
)
def test_OverlayStripes(self):
self.evaluate_class(imaugs.OverlayStripes(), fname="overlay_stripes")
@unittest.skip("Failing on some envs, will fix")
def test_OverlayText(self):
text_indices = [5, 3, 1, 2, 1000, 221]
self.evaluate_class(imaugs.OverlayText(text=text_indices), fname="overlay_text")
def test_Pad(self):
self.evaluate_class(imaugs.Pad(), fname="pad")
def test_PadSquare(self):
self.evaluate_class(imaugs.PadSquare(), fname="pad_square")
def test_PerspectiveTransform(self):
self.evaluate_class(
imaugs.PerspectiveTransform(sigma=100.0), fname="perspective_transform"
)
def test_Pixelization(self):
self.evaluate_class(imaugs.Pixelization(), fname="pixelization")
def test_RandomAspectRatio(self):
random.seed(1)
self.evaluate_class(imaugs.RandomAspectRatio(), fname="RandomAspectRatio")
def test_RandomBlur(self):
random.seed(1)
self.evaluate_class(imaugs.RandomBlur(), fname="RandomBlur")
def test_RandomBrightness(self):
random.seed(1)
self.evaluate_class(imaugs.RandomBrightness(), fname="RandomBrightness")
@unittest.skip("Failing on some envs, will fix")
def test_RandomEmojiOverlay(self):
random.seed(1)
self.evaluate_class(
imaugs.RandomEmojiOverlay(emoji_size=(0.15, 0.3)),
fname="RandomEmojiOverlay",
)
def test_RandomNoise(self):
self.evaluate_class(imaugs.RandomNoise(), fname="random_noise")
def test_RandomPixelization(self):
random.seed(1)
self.evaluate_class(imaugs.RandomPixelization(), fname="RandomPixelization")
def test_RandomRotation(self):
random.seed(1)
self.evaluate_class(imaugs.RandomRotation(), fname="RandomRotation")
def test_Resize(self):
self.evaluate_class(imaugs.Resize(resample=Image.BICUBIC), fname="resize")
def test_Rotate(self):
self.evaluate_class(imaugs.Rotate(), fname="rotate")
def test_Saturation(self):
self.evaluate_class(imaugs.Saturation(factor=0.5), fname="saturation")
def test_Scale(self):
self.evaluate_class(imaugs.Scale(), fname="scale")
def test_Sharpen(self):
self.evaluate_class(imaugs.Sharpen(factor=2.0), fname="sharpen")
def test_ShufflePixels(self):
self.evaluate_class(imaugs.ShufflePixels(factor=0.5), fname="shuffle_pixels")
def test_Skew(self):
self.evaluate_class(imaugs.Skew(), fname="skew")
def test_VFlip(self):
self.evaluate_class(imaugs.VFlip(), fname="vflip")
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/image_tests/transforms_unit_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
from augly import audio as audaugs
class IntensityAudioUnitTest(unittest.TestCase):
def test_add_background_noise_intensity(self):
intensity = audaugs.add_background_noise_intensity(
metadata={}, snr_level_db=10.0
)
self.assertAlmostEqual(intensity, 90.90909091)
def test_apply_lambda_intensity(self):
intensity = audaugs.apply_lambda_intensity(
metadata={}, aug_function=lambda x, y: (x, y)
)
self.assertAlmostEqual(intensity, 100.0)
def test_change_volume_intensity(self):
intensity = audaugs.change_volume_intensity(metadata={}, volume_db=10.0)
self.assertAlmostEqual(intensity, 9.090909091)
def test_clicks_intensity(self):
intensity = audaugs.clicks_intensity(
metadata={}, seconds_between_clicks=0.5, snr_level_db=1.0
)
self.assertAlmostEqual(intensity, 98.26515152)
def test_clip_intensity(self):
intensity = audaugs.clip_intensity(metadata={}, duration_factor=0.75)
self.assertAlmostEqual(intensity, 25.0)
def test_harmonic_intensity(self):
intensity = audaugs.harmonic_intensity(metadata={})
self.assertAlmostEqual(intensity, 100.0)
def test_high_pass_filter_intensity(self):
intensity = audaugs.high_pass_filter_intensity(metadata={}, cutoff_hz=3000.0)
self.assertAlmostEqual(intensity, 15.0)
def test_insert_in_background_intensity(self):
intensity = audaugs.insert_in_background_intensity(
metadata={"src_duration": 10.0, "dst_duration": 15.0}
)
self.assertAlmostEqual(intensity, 33.3333333)
def test_invert_channels_intensity(self):
intensity = audaugs.invert_channels_intensity(metadata={"src_num_channels": 2})
self.assertAlmostEqual(intensity, 100.0)
def test_loop_intensity(self):
intensity = audaugs.loop_intensity(metadata={}, n=1)
self.assertAlmostEqual(intensity, 1.0)
def test_low_pass_filter_intensity(self):
intensity = audaugs.low_pass_filter_intensity(metadata={}, cutoff_hz=500.0)
self.assertAlmostEqual(intensity, 97.5)
def test_normalize_intensity(self):
intensity = audaugs.normalize_intensity(metadata={}, norm=np.inf)
self.assertAlmostEqual(intensity, 100.0)
def test_peaking_equalizer_intensity(self):
intensity = audaugs.peaking_equalizer_intensity(q=1.0, gain_db=-20.0)
self.assertAlmostEqual(intensity, 17.786561264822133)
def test_percussive_intensity(self):
intensity = audaugs.percussive_intensity(metadata={})
self.assertAlmostEqual(intensity, 100.0)
def test_pitch_shift_intensity(self):
intensity = audaugs.pitch_shift_intensity(metadata={}, n_steps=2.0)
self.assertAlmostEqual(intensity, 2.380952381)
def test_reverb_intensity(self):
intensity = audaugs.reverb_intensity(
metadata={}, reverberance=75.0, wet_only=False, room_scale=100.0
)
self.assertAlmostEqual(intensity, 75.0)
def test_speed_intensity(self):
intensity = audaugs.speed_intensity(metadata={}, factor=2.0)
self.assertAlmostEqual(intensity, 20.0)
def test_tempo_intensity(self):
intensity = audaugs.tempo_intensity(metadata={}, factor=0.5)
self.assertAlmostEqual(intensity, 20.0)
def test_time_stretch_intensity(self):
intensity = audaugs.time_stretch_intensity(metadata={}, factor=1.5)
self.assertAlmostEqual(intensity, 15.0)
def test_to_mono_intensity(self):
intensity = audaugs.to_mono_intensity(metadata={"src_num_channels": 1})
self.assertAlmostEqual(intensity, 0.0)
|
AugLy-main
|
augly/tests/audio_tests/intensity_unit_test.py
|
AugLy-main
|
augly/tests/audio_tests/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from augly import audio as audaugs
from augly.tests.audio_tests.base_unit_test import BaseAudioUnitTest
class FunctionalAudioUnitTest(BaseAudioUnitTest):
def test_add_background_noise(self):
self.evaluate_function(
audaugs.add_background_noise, background_audio=None, snr_level_db=10.0
)
def test_apply_lambda(self):
self.evaluate_function(audaugs.apply_lambda)
def test_change_volume(self):
self.evaluate_function(audaugs.change_volume, volume_db=10.0)
def test_clicks(self):
self.evaluate_function(audaugs.clicks, seconds_between_clicks=0.5)
def test_clip(self):
self.evaluate_function(audaugs.clip, offset_factor=0.25, duration_factor=0.5)
def test_harmonic(self):
self.evaluate_function(audaugs.harmonic, kernel_size=31, power=2.0, margin=1.0)
def test_high_pass_filter(self):
self.evaluate_function(audaugs.high_pass_filter, cutoff_hz=3000)
def test_insert_in_background(self):
self.evaluate_function(audaugs.insert_in_background, offset_factor=0.3)
def test_invert_channels(self):
self.evaluate_function(audaugs.invert_channels)
def test_loop(self):
self.evaluate_function(audaugs.loop, n=1)
def test_low_pass_filter(self):
self.evaluate_function(audaugs.low_pass_filter, cutoff_hz=500)
def test_normalize(self):
self.evaluate_function(audaugs.normalize)
def test_peaking_equalizer(self):
self.evaluate_function(audaugs.peaking_equalizer, gain_db=-20.0)
def test_percussive(self):
self.evaluate_function(
audaugs.percussive, kernel_size=31, power=2.0, margin=1.0
)
def test_pitch_shift(self):
self.evaluate_function(audaugs.pitch_shift, n_steps=4)
def test_reverb(self):
self.evaluate_function(audaugs.reverb, reverberance=100.0)
def test_speed(self):
self.evaluate_function(audaugs.speed, factor=3.0)
def test_tempo(self):
self.evaluate_function(audaugs.tempo, factor=2.0)
def test_time_stretch(self):
self.evaluate_function(audaugs.time_stretch, rate=1.5)
def test_to_mono(self):
self.evaluate_function(audaugs.to_mono)
def test_fft_convolve(self):
self.evaluate_function(audaugs.fft_convolve)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/audio_tests/functional_unit_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.