python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
import torch
import torch.nn as nn
from timm.models.layers import DropPath, trunc_normal_
import math
from packnet_sfm.networks.DEST.simplified_attention import OverlapPatchEmbed, Mlp, Attention_MaxPool
class Attention_Joint_MaxPool(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Conv1d(dim, dim, 1, bias=qkv_bias)
self.k = nn.Conv1d(dim, dim, 1, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv1d(dim, dim, 1)
self.proj_drop = nn.Dropout(proj_drop)
self.norm2 = nn.BatchNorm1d(self.dim)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.BatchNorm1d(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv1d):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x, y, H, W):
B, C, N = x.shape
q = self.q(x)
q = q.reshape(B, self.num_heads, C // self.num_heads, N)
q = q.permute(0, 1, 3, 2)
if self.sr_ratio > 1:
x_ = x.reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1)
x_ = self.norm(x_)
k = self.k(x_).reshape(B, self.num_heads, C // self.num_heads, -1)
else:
k = self.k(x).reshape(B, self.num_heads, C // self.num_heads, -1)
v = torch.mean(x, 2, True).repeat(1, 1, self.num_heads).transpose(-2, -1)
attn = (q @ k) * self.scale
attn, _ = torch.max(attn, -1)
out = (attn.transpose(-2, -1) @ v)
out = out.transpose(-2, -1)
out = self.proj(out)
return out
class JointBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.ReLU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm0 = nn.BatchNorm1d(dim)
self.norm1_ref = nn.BatchNorm1d(dim)
self.norm1_src = nn.BatchNorm1d(dim)
self.norm2 = nn.BatchNorm1d(dim)
self.attn_joint = Attention_Joint_MaxPool(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, ref_feat, src_feat, H, W):
src_feat = src_feat + self.drop_path(self.attn_joint(self.norm1_ref(ref_feat), self.norm1_src(src_feat), H, W))
src_feat = src_feat + self.drop_path(self.mlp(self.norm2(src_feat), H, W))
return src_feat
class SimplifiedJointTransformer(nn.Module):
def __init__(self, img_size=(224,224), patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.embed_dims = embed_dims
self.sr_ratios = sr_ratios
# patch_embed
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=(img_size[0] // 4, img_size[1] // 4), patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=(img_size[0] // 8, img_size[1] // 8), patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=(img_size[0] // 16, img_size[1] // 16), patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
# encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([JointBlock(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
self.norm1 = nn.BatchNorm1d(self.patch_embed1.N)
cur += depths[0]
self.block2 = nn.ModuleList([JointBlock(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
self.norm2 = nn.BatchNorm1d(self.patch_embed2.N)
cur += depths[1]
self.block3 = nn.ModuleList([JointBlock(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
self.norm3 = nn.BatchNorm1d(self.patch_embed3.N)
cur += depths[2]
self.block4 = nn.ModuleList([JointBlock(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.norm4 = nn.BatchNorm1d(self.patch_embed4.N)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def freeze_patch_emb(self):
self.patch_embed1.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, ref_feat, x):
B = x.shape[0]
# stage 1
x, H, W = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
if i > len(ref_feat['1']) - 1 : i = -1
x = blk(ref_feat['1'][i], x, H, W)
x = self.norm1(x.transpose(-2, -1)).transpose(-2, -1)
x = x.reshape(B, -1, H, W).contiguous()
# stage 2
x, H, W = self.patch_embed2(x)
for i, blk in enumerate(self.block2):
if i > len(ref_feat['2']) -1: i=-1
x = blk(ref_feat['2'][i], x, H, W)
x = self.norm2(x.transpose(-2, -1)).transpose(-2, -1)
x = x.reshape(B, -1, H, W).contiguous()
# stage 3
x, H, W = self.patch_embed3(x)
for i, blk in enumerate(self.block3):
if i > len(ref_feat['3']) -1: i = -1
x = blk(ref_feat['3'][i], x, H, W)
x = self.norm3(x.transpose(-2, -1)).transpose(-2, -1)
x = x.reshape(B, -1, H, W).contiguous()
# stage 4
x, H, W = self.patch_embed4(x)
for i, blk in enumerate(self.block4):
if i > len(ref_feat['4'])-1: i = -1
x = blk(ref_feat['4'][i], x, H, W)
x = self.norm4(x.transpose(-2, -1)).transpose(-2, -1)
x = x.reshape(B, -1, H, W).contiguous()
return x
def forward(self, ref_feat, x):
return self.forward_features(ref_feat, x)
|
DL4AGX-master
|
DEST/networks/DEST/simplified_joint_attention.py
|
DL4AGX-master
|
DEST/networks/DEST/__init__.py
|
|
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
from functools import partial
import torch
from torch import nn
from packnet_sfm.networks.DEST.simplified_attention import SimplifiedTransformer as SimpTR
from packnet_sfm.networks.DEST.simplified_joint_attention import SimplifiedJointTransformer as SimpTR_Joint
def exists(val):
return val is not None
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
class DEST_Encoder_Decoder(nn.Module):
def __init__(
self,
*,
img_size=(192, 640),
dims=(32, 64, 160, 256),
heads=(1, 2, 4, 8),
ff_expansion=(8, 8, 4, 4),
reduction_ratio=(8, 4, 2, 1),
num_layers=(2, 2, 2, 2),
channels=3,
decoder_dim=128,
num_classes=64,
semseg=False
):
super().__init__()
dims, heads, ff_expansion, reduction_ratio, num_layers = map(partial(cast_tuple, depth = 4), (dims, heads, ff_expansion, reduction_ratio, num_layers))
assert all([*map(lambda t: len(t) == 4, (dims, heads, ff_expansion, reduction_ratio, num_layers))]), 'only four stages are allowed, all keyword arguments must be either a single value or a tuple of 4 values'
self.dest_encoder = SimpTR(
img_size=img_size, in_chans=channels, num_classes=num_classes,
embed_dims=dims, num_heads=heads, mlp_ratios=ff_expansion, qkv_bias=True, qk_scale=None, drop_rate=0,
drop_path_rate=0.1, attn_drop_rate=0., norm_layer=nn.LayerNorm, depths=num_layers, sr_ratios=reduction_ratio)
self.dims = dims
self.fuse_conv1 = nn.Sequential(nn.Conv2d(dims[-1], dims[-1], 1), nn.ReLU(inplace=True))
self.fuse_conv2 = nn.Sequential(nn.Conv2d(dims[-2], dims[-2], 1), nn.ReLU(inplace=True))
self.fuse_conv3 = nn.Sequential(nn.Conv2d(dims[-3], dims[-3], 1), nn.ReLU(inplace=True))
self.fuse_conv4 = nn.Sequential(nn.Conv2d(dims[-4], dims[-4], 1), nn.ReLU(inplace=True))
self.upsample = nn.ModuleList([nn.Sequential(nn.Upsample(scale_factor=2, mode='nearest'))]*len(dims))
self.fused_1 = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(dims[-1], dims[-1], 3), nn.ReLU(inplace=True))
self.fused_2 = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(dims[-2] + dims[-1], dims[-2], 3), nn.ReLU(inplace=True))
self.fused_3 = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(dims[-3] + dims[-2], dims[-3], 3), nn.ReLU(inplace=True))
self.fused_4 = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(dims[-4] + dims[-3], dims[-4], 3), nn.ReLU(inplace=True))
self.fused_5 = nn.Sequential(nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(dims[-4], dims[-4], 1),
nn.ReLU(True))
self.semseg = semseg
def dest_decoder(self, lay_out):
fused_1 = self.fuse_conv1(lay_out[-1])
fused_1 = self.upsample[-1](fused_1)
fused_1 = self.fused_1(fused_1)
fused_2 = torch.cat([fused_1, self.fuse_conv2(lay_out[-2])], 1)
fused_2 = self.upsample[-2](fused_2)
fused_2 = self.fused_2(fused_2)
fused_3 = torch.cat([fused_2, self.fuse_conv3(lay_out[-3])], 1)
fused_3 = self.upsample[-3](fused_3)
fused_3 = self.fused_3(fused_3)
fused_4 = torch.cat([fused_3, self.fuse_conv4(lay_out[-4])], 1)
fused_4 = self.upsample[-4](fused_4)
fused_4 = self.fused_4(fused_4)
if self.semseg:
return fused_4
fused_5 = self.fused_5(fused_4)
return fused_5, fused_4, fused_3, fused_2
def forward(self, x):
layer_outputs, ref_feat = self.dest_encoder(x)
out = self.dest_decoder(layer_outputs)
return out, layer_outputs, ref_feat
def DEST_Pose(
img_size=(192, 640),
dims = (32, 64, 160, 256),
heads = (1, 2, 5, 8),
ff_expansion = (8, 8, 8, 8),
reduction_ratio = (8, 4, 2, 1),
num_layers = (2, 2, 2, 2),
channels=3,
num_classes=512,
connectivity=True):
dims, heads, ff_expansion, reduction_ratio, num_layers = map(partial(cast_tuple, depth = 4), (dims, heads, ff_expansion, reduction_ratio, num_layers))
assert all([*map(lambda t: len(t) == 4, (dims, heads, ff_expansion, reduction_ratio, num_layers))]), 'only four stages are allowed, all keyword arguments must be either a single value or a tuple of 4 values'
if connectivity :
model = SimpTR_Joint(
img_size=img_size, in_chans=channels, num_classes=num_classes,
embed_dims=dims, num_heads=heads, mlp_ratios=ff_expansion, qkv_bias=True, qk_scale=None, drop_rate=0.,
drop_path_rate=0.1, attn_drop_rate= 0., norm_layer=nn.LayerNorm, depths=num_layers, sr_ratios=reduction_ratio)
else:
model = SimpTR(
img_size=img_size, in_chans=channels, num_classes=num_classes,
embed_dims=dims, num_heads=heads, mlp_ratios=ff_expansion, qkv_bias=True, qk_scale=None, drop_rate=0.,
drop_path_rate=0.1, attn_drop_rate= 0., norm_layer=nn.LayerNorm, depths=num_layers, sr_ratios=reduction_ratio)
return num_classes, model
def SimpleTR_B0(img_size=(192, 640), num_out_ch=64, semseg=False):
model = DEST_Encoder_Decoder(
img_size=img_size,
dims=(32, 64, 160, 256),
heads=(1, 2, 5, 8),
ff_expansion=(8, 8, 4, 4),
reduction_ratio=(8, 4, 2, 1),
num_layers=(2, 2, 2, 2),
channels=3,
decoder_dim=256,
num_classes=num_out_ch,
semseg=semseg)
return num_out_ch, model
def SimpleTR_B1(img_size=(192, 640), num_out_ch=256, semseg=False):
model = DEST_Encoder_Decoder(
img_size=img_size,
dims=(64, 128, 250, 320),
heads=(1, 2, 5, 8),
ff_expansion=(8, 8, 4, 4),
reduction_ratio=(8, 4, 2, 1),
num_layers=(2, 2, 2, 2),
channels=3,
decoder_dim=num_out_ch,
num_classes=num_out_ch,
semseg=semseg)
return num_out_ch, model
def SimpleTR_B2(img_size=(192, 640), num_out_ch=256, semseg=False):
model = DEST_Encoder_Decoder(
img_size=img_size,
dims=(64, 128, 250, 320),
heads=(1, 2, 5, 8),
ff_expansion=(8, 8, 4, 4),
reduction_ratio=(8, 4, 2, 1),
num_layers=(3, 3, 6, 3),
channels=3,
decoder_dim=num_out_ch,
num_classes=num_out_ch,
semseg=semseg)
return num_out_ch, model
def SimpleTR_B3(img_size=(192, 640), num_out_ch=256, semseg=False):
model = DEST_Encoder_Decoder(
img_size=img_size,
dims=(64, 128, 250, 320),
heads=(1, 2, 5, 8),
ff_expansion=(8, 8, 4, 4),
reduction_ratio=(8, 4, 2, 1),
num_layers=(3, 6, 8, 3),
channels=3,
decoder_dim=512,
num_classes=256,
semseg=semseg)
return num_out_ch, model
def SimpleTR_B4(img_size=(192, 640), num_out_ch=512, semseg=False):
model = DEST_Encoder_Decoder(
img_size=img_size,
dims=(64, 128, 250, 320),
heads=(1, 2, 5, 8),
ff_expansion=(8, 8, 4, 4),
reduction_ratio=(8, 4, 2, 1),
num_layers=(3, 8, 12, 5),
channels=3,
decoder_dim=num_out_ch,
num_classes=num_out_ch,
semseg=semseg)
return num_out_ch, model
def SimpleTR_B5(img_size=(192, 640), num_out_ch=512, semseg=False):
model = DEST_Encoder_Decoder(
img_size=img_size,
dims=(64, 128, 250, 320),
heads=(1, 2, 5, 8),
ff_expansion=(8, 8, 4, 4),
reduction_ratio=(8, 4, 2, 1),
num_layers=(3, 10, 16, 5),
channels=3,
decoder_dim=num_out_ch,
num_classes=num_out_ch,
semseg=semseg)
return num_out_ch, model
|
DL4AGX-master
|
DEST/networks/DEST/DEST_EncDec.py
|
#!/usr/bin/env python3
######################################################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/convert_to_trt.py
# Description: Script to convert a pb file to a trt engine (with uff in the middle)
#####################################################################################################
import sys
import os
import ctypes
import argparse
import glob
import numpy as np
import tensorrt as trt
from PIL import Image
# Utility functions
import utils.inference as inference_utils # TRT/TF inference wrappers
import utils.model as model_utils # UFF conversion
# Model used for inference
# MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
# MODEL_NAME = 'ssd_mobilenet_v1_coco_2018_01_28'
# MODEL_NAME = 'ssd_mobilenet_v2_coco_2018_03_29'
# MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'
# Precision command line argument -> TRT Engine datatype
TRT_PRECISION_TO_DATATYPE = {
# 8: trt.DataType.INT8,
16: trt.DataType.HALF,
32: trt.DataType.FLOAT
}
TRT_PRECISION_TO_LABEL = {
# 8: 'INT8',
16: 'HALF',
32: 'FLOAT'
}
def main(args):
# Loading FlattenConcat plugin library using CDLL has a side
# effect of loading FlattenConcat plugin into internal TensorRT
# PluginRegistry data structure. This will be needed when parsing
# network into UFF, since some operations will need to use this plugin
try:
ctypes.CDLL(args.flatten_concat)
except FileNotFoundError:
print("Error: {}\n{}".format("Could not find {}".format(args.flatten_concat),
"Make sure you have compiled FlattenConcat custom plugin layer"))
sys.exit(1)
model_path = args.model
model_filename = os.path.basename(model_path)
uff_path = os.path.join(args.output_dir, model_filename[:model_filename.rfind('.')] + '.uff')
trt_engine_path = os.path.join(
args.output_dir,
model_filename[:model_filename.rfind('.')] + '{}.trt'.format(TRT_PRECISION_TO_LABEL[args.precision]))
model_utils.model_to_uff(
model_path,
uff_path,
n_classes=args.n_classes + 1, # +1 for background
input_dims=args.input_dims,
feature_dims=args.feature_dims)
# TODO: create calibrator here!
inference_utils.TRTInference(trt_engine_path,
uff_path,
TRT_PRECISION_TO_DATATYPE[args.precision],
input_shape=args.input_dims,
batch_size=args.max_batch_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert a TF pb file to a TRT engine.')
parser.add_argument('-p',
'--precision',
type=int,
choices=[32, 16],
default=32,
help='desired TensorRT float precision to build an engine with')
parser.add_argument('-m', '--model', help='model file')
parser.add_argument('-o', '--output_dir', help='output directory')
parser.add_argument('-fc', '--flatten_concat', help='path of built FlattenConcat plugin')
# parser.add_argument('-voc', '--voc_dir', default=None,
# help='VOC2007 root directory (for calibration)')
parser.add_argument('-b', '--max_batch_size', type=int, default=64, help='max TensorRT engine batch size')
parser.add_argument('-c', '--n_classes', type=int, default=90, help='number of classes')
parser.add_argument('-d',
'--input_dims',
type=int,
default=[3, 300, 300],
nargs=3,
help='channel, height, and width of input')
parser.add_argument(
'-f',
'--feature_dims',
type=int,
default=[19, 10, 5, 3, 2, 1],
nargs='+',
help=
'feature extractor dimensions (inspect training graph and look for spatial dimension of the "BoxPredictor_*/BoxEncodingPredictor/BiasAdd" nodes)'
)
# Parse arguments passed
args = parser.parse_args()
main(args)
|
DL4AGX-master
|
MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/convert_to_trt.py
|
######################################################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/paths.py
# Description: Path management singleton class
#####################################################################################################
import os
import sys
import tensorrt as trt
class Paths(object):
def __init__(self):
self._SAMPLE_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
self._FLATTEN_CONCAT_PLUGIN_PATH = os.path.join(self._SAMPLE_ROOT, 'build', 'libflattenconcat.so')
self._WORKSPACE_DIR_PATH = os.path.join(self._SAMPLE_ROOT, 'workspace')
self._VOC_DIR_PATH = \
os.path.join(self._SAMPLE_ROOT, 'VOCdevkit', 'VOC2007')
# User configurable paths
def set_workspace_dir_path(self, workspace_dir):
self._WORKSPACE_DIR_PATH = workspace_dir
def get_workspace_dir_path(self):
return self._WORKSPACE_DIR_PATH
def set_flatten_concat_plugin_path(self, plugin_path):
self._FLATTEN_CONCAT_PLUGIN_PATH = plugin_path
def get_flatten_concat_plugin_path(self):
return self._FLATTEN_CONCAT_PLUGIN_PATH
def set_voc_dir_path(self, voc_dir_path):
self._VOC_DIR_PATH = voc_dir_path
def get_voc_dir_path(self):
return self._VOC_DIR_PATH
# Fixed paths
def get_sample_root(self):
return self._SAMPLE_ROOT
def get_models_dir_path(self):
return os.path.join(self.get_workspace_dir_path(), 'models')
def get_engines_dir_path(self):
return os.path.join(self.get_workspace_dir_path(), 'engines')
def get_engine_path(self, model_name, inference_type=trt.DataType.FLOAT, max_batch_size=1):
inference_type_to_str = {
trt.DataType.FLOAT: 'FLOAT',
trt.DataType.HALF: 'HALF',
trt.DataType.INT32: 'INT32',
trt.DataType.INT8: 'INT8'
}
return os.path.join(self.get_engines_dir_path(), inference_type_to_str[inference_type],
'{}_{}.buf'.format(model_name, max_batch_size))
def get_voc_annotation_cache_path(self):
return os.path.join(self.get_workspace_dir_path(), 'annotations_cache')
def get_voc_image_set_path(self):
return os.path.join(self.get_voc_dir_path(), 'ImageSets', 'Main', 'test.txt')
def get_voc_annotation_path(self):
return os.path.join(self.get_voc_dir_path(), 'annotations', '{}.xml')
# return os.path.join(self.get_voc_dir_path(), 'Annotations', '{}.xml')
def get_voc_ppm_img_path(self):
# return os.path.join(self.get_voc_dir_path(), 'PPMImages512x512', '{}.ppm')
return os.path.join(self.get_voc_dir_path(), 'PPMImages300x300', '{}.ppm')
# return os.path.join(self.get_voc_dir_path(), 'PPMImages', '{}.ppm')
def get_voc_jpg_img_path(self):
return os.path.join(self.get_voc_dir_path(), 'JPEGImages', '{}.jpg')
def get_voc_tensorflow_model_detections_path(self, model_name):
return os.path.join(self.get_workspace_dir_path(), 'results', model_name, 'tensorflow')
def get_voc_tensorrt_model_detections_path(self, model_name, trt_engine_datatype=trt.DataType.FLOAT):
trt_results_path = \
os.path.join(self.get_workspace_dir_path(), 'results', model_name, 'tensorrt')
if trt_engine_datatype == trt.DataType.HALF:
return os.path.join(trt_results_path, 'HALF')
else:
return os.path.join(trt_results_path, 'FLOAT')
def get_voc_model_detections_path(self, model_name, backend='tensorrt', use_fp16=False):
if backend != 'tensorrt':
return self.get_voc_tensorflow_model_detections_path(model_name)
else:
return self.get_voc_tensorrt_model_detections_path(model_name, use_fp16)
def get_model_url(self, model_name):
return 'http://download.tensorflow.org/models/object_detection/{}.tar.gz'.format(model_name)
def get_model_dir_path(self, model_name):
return os.path.join(self.get_models_dir_path(), model_name)
def get_model_pb_path(self, model_name):
return os.path.join(self.get_model_dir_path(model_name), 'frozen_inference_graph.pb')
def get_model_uff_path(self, model_name):
return os.path.join(self.get_model_dir_path(model_name), 'frozen_inference_graph.uff')
# Paths correctness verifier
def verify_all_paths(self, should_verify_voc=False):
error = False
if should_verify_voc:
error = self._verify_voc_paths()
if not os.path.exists(self.get_workspace_dir_path()):
error = True
if error:
print("An error occured when running the script.")
sys.exit(1)
def _verify_voc_paths(self):
error = False
voc_dir = self.get_voc_dir_path()
voc_image_list = self.get_voc_image_set_path()
# 1) Check if directory and image list file are present
if not os.path.exists(voc_dir) or \
not os.path.exists(voc_image_list):
self._print_incorrect_voc_error(voc_dir)
error = True
# 2) Check if all images listed in image list are present
with open(voc_image_list, 'r') as f:
image_numbers = f.readlines()
image_numbers = [line.strip() for line in image_numbers]
if not self._verify_voc(image_numbers):
self._print_incorrect_voc_error(voc_dir)
error = True
return error
def _verify_voc(self, voc_image_list):
voc_image_path = self.get_voc_jpg_img_path()
for img_number in voc_image_list:
img = voc_image_path.format(img_number)
if not os.path.exists(img):
return False
return True
# Error printers
def _print_incorrect_voc_error(self, voc_dir):
print("Error: {}\n{}\n{}".format("Incomplete VOC dataset detected (voc_dir: {})".format(voc_dir),
"Try redownloading VOC or check if --voc_dir is set up correctly",
"For more details, check README.md"))
PATHS = Paths()
|
DL4AGX-master
|
MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/paths.py
|
######################################################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/__init__.py
#####################################################################################################
|
DL4AGX-master
|
MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/__init__.py
|
######################################################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/paths.py
# Description: Model download and UFF conversion utils
#####################################################################################################
import os
import sys
import tarfile
import requests
import tensorflow as tf
import tensorrt as trt
import graphsurgeon as gs
import uff
# from utils.paths import PATHS
from utils.paths import PATHS
# UFF conversion functionality
def build_nms_node(
name="NMS",
op="NMS_TRT",
backgroundLabelId=0,
confSigmoid=True,
confidenceThreshold=1e-8,
isNormalized=True,
topK=100,
keepTopK=100,
nmsThreshold=0.6,
numClasses=91, # +1 for background
scoreConverter='SIGMOID',
shareLocation=True,
varianceEncodedInTarget=False,
**kw_args):
return gs.create_plugin_node(name,
op=op,
backgroundLabelId=backgroundLabelId,
confSigmoid=confSigmoid,
confidenceThreshold=confidenceThreshold,
isNormalized=isNormalized,
topK=topK,
keepTopK=keepTopK,
nmsThreshold=nmsThreshold,
numClasses=numClasses,
scoreConverter=scoreConverter,
shareLocation=shareLocation,
varianceEncodedInTarget=varianceEncodedInTarget,
**kw_args)
def build_grid_anchor_node(name="GridAnchor",
op='GridAnchor_TRT',
aspectRatios=[1.0, 2.0, 0.5, 3.0, 0.33],
featureMapShapes=[19, 10, 5, 3, 2, 1],
maxSize=0.95,
minSize=0.2,
numLayers=6,
variance=[0.1, 0.1, 0.2, 0.2],
**kw_args):
return gs.create_plugin_node(name,
op=op,
aspectRatios=aspectRatios,
featureMapShapes=featureMapShapes,
maxSize=maxSize,
minSize=minSize,
numLayers=numLayers,
variance=variance,
**kw_args)
# This class contains converted (UFF) model metadata
class ModelData(object):
# Name of input node
INPUT_NAME = "Input"
# Name of output node
OUTPUT_NAME = "NMS"
def ssd_unsupported_nodes_to_plugin_nodes(ssd_graph, n_classes, input_dims, feature_dims):
"""Makes ssd_graph TensorRT comparible using graphsurgeon.
This function takes ssd_graph, which contains graphsurgeon
DynamicGraph data structure. This structure describes frozen Tensorflow
graph, that can be modified using graphsurgeon (by deleting, adding,
replacing certain nodes). The graph is modified by removing
Tensorflow operations that are not supported by TensorRT's UffParser
and replacing them with custom layer plugin nodes.
Note: This specific implementation works only for
ssd_inception_v2_coco_2017_11_17 network.
Args:
ssd_graph (gs.DynamicGraph): graph to convert
Returns:
gs.DynamicGraph: UffParser compatible SSD graph
"""
# Remove assert nodes
all_assert_nodes = ssd_graph.find_nodes_by_op("Assert")
# Remove those nodes from the graph.
ssd_graph.remove(all_assert_nodes, remove_exclusive_dependencies=True)
# Find all identity nodes.
all_identity_nodes = ssd_graph.find_nodes_by_op("Identity")
# Forward inputs those in the graph i.e. forward their inputs.
ssd_graph.forward_inputs(all_identity_nodes)
# Create TRT plugin nodes to replace unsupported ops in Tensorflow graph
channels = input_dims[0]
height = input_dims[1]
width = input_dims[2]
nodes = ssd_graph.node_map
node_names = ssd_graph.node_map.keys()
break_now = False
class_predictor_label = 'concat_1'
box_loc_label = 'concat'
for inode in nodes['concat'].input:
if break_now:
break
for jnode in nodes[inode].input:
if break_now:
break
if 'ClassPredictor' in jnode:
class_predictor_label = 'concat'
box_loc_label = 'concat_1'
break_now = True
concat_namespace = "Concatenate"
include_anchors = False
for k in node_names:
if "MultipleGridAnchorGenerator" in k:
include_anchors = True
if "MultipleGridAnchorGenerator/Concatenate" in k:
concat_namespace = "MultipleGridAnchorGenerator/Concatenate"
# Now we need to collapse a few namespaces.
if include_anchors:
Concat = gs.create_node("concat_priorbox", op="ConcatV2", dtype=tf.float32, axis=2)
Input = gs.create_plugin_node(ModelData.INPUT_NAME,
op="Placeholder",
dtype=tf.float32,
shape=[1, channels, height, width])
FlattenConcat_box_conf = gs.create_plugin_node(
"concat_box_conf",
op="FlattenConcat_TRT",
dtype=tf.float32,
)
FlattenConcat_box_loc = gs.create_plugin_node(
"concat_box_loc",
op="FlattenConcat_TRT",
dtype=tf.float32,
)
NMS = build_nms_node(numClasses=n_classes)
# Create a mapping of namespace names -> plugin nodes.
namespace_plugin_map = {
"Postprocessor": NMS,
"Preprocessor": Input,
"ToFloat": Input,
"image_tensor": Input,
box_loc_label: FlattenConcat_box_loc,
class_predictor_label: FlattenConcat_box_conf
}
# # Now create a new graph by collapsing namespaces
# ssd_graph.collapse_namespaces(namespace_plugin_map)
# # Determine the parameter for the GridAnchors
# # print(ssd_graph.as_graph_def().node)
# # print(tf.import_graph_def(ssd_graph.as_graph_def()))
# for node in ssd_graph.as_graph_def().node:
# if node.name == "BoxPredictor_2/BoxEncodingPredictor/BiasAdd":
# print(type(node))
# print(node)
# # print([dict(nodes[x].attr) for x in ssd_graph.node_map.keys() if 'BoxEncodingPredictor/BiasAdd' in x])
# # print(ssd_graph.find_nodes_by_name("BoxPredictor_2/BoxEncodingPredictor/BiasAdd"))
# exit()
if include_anchors:
GridAnchor = build_grid_anchor_node(featureMapShapes=feature_dims)
namespace_plugin_map[concat_namespace] = Concat
namespace_plugin_map["MultipleGridAnchorGenerator"] = GridAnchor
# Now create a new graph by collapsing namespaces
ssd_graph.collapse_namespaces(namespace_plugin_map)
# Remove the outputs, so we just have a single output node (NMS).
# If remove_exclusive_dependencies is True, the whole graph will be removed!
ssd_graph.remove(ssd_graph.graph_outputs, remove_exclusive_dependencies=False)
# add in grid anchors for SSDLite
if not include_anchors:
Const = gs.create_node("Const", op="Const", dtype=tf.float32, value=[128, 128])
GridAnchor = build_grid_anchor_node(inputs=[Const], featureMapShapes=feature_dims)
Concat = gs.create_node("concat_priorbox", inputs=[GridAnchor], op="ConcatV2", dtype=tf.float32, axis=2)
ssd_graph.append(Const)
ssd_graph.append(GridAnchor)
ssd_graph.append(Concat)
NMS = build_nms_node(inputs=list(NMS.input) + ["concat_priorbox"], numClasses=n_classes)
namespace_plugin_map = {"NMS": NMS}
ssd_graph.collapse_namespaces(namespace_plugin_map)
# For exported graphs, we need to remove the squeeze node between concat plugin and the NMS plugin.
# Downloaded graphs don't need this step. Refer to convert_ssd_v1.py
all_squeeze_nodes = ssd_graph.find_nodes_by_name("Squeeze")
# Forward inputs those in the graph i.e. forward their inputs.
ssd_graph.forward_inputs(all_squeeze_nodes)
# clean up NMS and Input nodes
actualInputOrder = []
for node in ssd_graph._internal_graphdef.node:
if node.name == "NMS":
if ModelData.INPUT_NAME in node.input:
node.input.remove(ModelData.INPUT_NAME)
for input_name in node.input:
if "loc" in input_name:
actualInputOrder.append(0)
elif "conf" in input_name:
actualInputOrder.append(1)
elif "priorbox" in input_name:
actualInputOrder.append(2)
elif node.name == ModelData.INPUT_NAME:
if "image_tensor:0" in node.input:
node.input.remove("image_tensor:0")
# NOTE: since the actual order of the NMS nodes inputs differ between versions, I'll reinsert the NMS trt op
NMS = build_nms_node(inputOrder=actualInputOrder, numClasses=n_classes)
namespace_plugin_map = {"NMS": NMS}
ssd_graph.collapse_namespaces(namespace_plugin_map)
return ssd_graph
def model_to_uff(model_path, output_uff_path, n_classes, input_dims, feature_dims, silent=False):
"""Takes frozen .pb graph, converts it to .uff and saves it to file.
Args:
model_path (str): .pb model path
output_uff_path (str): .uff path where the UFF file will be saved
silent (bool): if True, writes progress messages to stdout
"""
dynamic_graph = gs.DynamicGraph(model_path)
dynamic_graph = ssd_unsupported_nodes_to_plugin_nodes(dynamic_graph, n_classes, input_dims, feature_dims)
dynamic_graph.write_tensorboard(os.path.join(os.path.dirname(output_uff_path), 'trt_tensorboard'))
uff.from_tensorflow(dynamic_graph.as_graph_def(), [ModelData.OUTPUT_NAME],
output_filename=output_uff_path,
text=True)
# Model download functionality
def maybe_print(should_print, print_arg):
"""Prints message if supplied boolean flag is true.
Args:
should_print (bool): if True, will print print_arg to stdout
print_arg (str): message to print to stdout
"""
if should_print:
print(print_arg)
def maybe_mkdir(dir_path):
"""Makes directory if it doesn't exist.
Args:
dir_path (str): directory path
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
|
DL4AGX-master
|
MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/model.py
|
######################################################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/engine.py
# Description: Utility functions for building/saving/loading TensorRT Engines
#####################################################################################################
import sys
import os
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
from utils.model import ModelData
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32}
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = binding_to_type[str(binding)]
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def build_engine(uff_model_path,
trt_logger,
trt_engine_datatype=trt.DataType.FLOAT,
batch_size=1,
input_shape=[3, 300, 300],
silent=False):
with trt.Builder(trt_logger) as builder, builder.create_network() as network, trt.UffParser() as parser:
builder.max_workspace_size = 1 << 30
if trt_engine_datatype == trt.DataType.HALF:
builder.fp16_mode = True
builder.max_batch_size = batch_size
parser.register_input(ModelData.INPUT_NAME, input_shape)
parser.register_output("MarkOutput_0")
parser.parse(uff_model_path, network)
if not silent:
print("Building TensorRT engine. This may take few minutes.")
return builder.build_cuda_engine(network)
def save_engine(engine, engine_dest_path):
buf = engine.serialize()
with open(engine_dest_path, 'wb') as f:
f.write(buf)
def load_engine(trt_runtime, engine_path):
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
|
DL4AGX-master
|
MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/engine.py
|
######################################################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/inference.py
# Description: Utility functions for performing image inference
#####################################################################################################
import os
import time
import tensorrt as trt
# import tensorflow as tf
from PIL import Image
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import utils.engine as engine_utils # TRT Engine creation/save/load utils
import utils.model as model_utils # UFF conversion uttils
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def GiB(val):
return val * 1 << 30
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
class TRTInference(object):
"""Manages TensorRT objects for model inference."""
def __init__(self,
trt_engine_path,
uff_model_path,
trt_engine_datatype=trt.DataType.FLOAT,
input_shape=[3, 300, 300],
batch_size=1):
"""Initializes TensorRT objects needed for model inference.
Args:
trt_engine_path (str): path where TensorRT engine should be stored
uff_model_path (str): path of .uff model
trt_engine_datatype (trt.DataType):
requested precision of TensorRT engine used for inference
batch_size (int): batch size for which engine
should be optimized for
"""
# We first load all custom plugins shipped with TensorRT,
# some of them will be needed during inference
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
# Initialize runtime needed for loading TensorRT engine from file
self.trt_runtime = trt.Runtime(TRT_LOGGER)
# TRT engine placeholder
self.trt_engine = None
self.input_shape = input_shape
# Display requested engine settings to stdout
print("TensorRT inference engine settings:")
print(" * Inference precision - {}".format(trt_engine_datatype))
print(" * Max batch size - {}\n".format(batch_size))
# If engine is not cached, we need to build it
if not os.path.exists(trt_engine_path):
# This function uses supplied .uff file
# alongside with UffParser to build TensorRT
# engine. For more details, check implmentation
self.trt_engine = engine_utils.build_engine(uff_model_path,
TRT_LOGGER,
trt_engine_datatype=trt_engine_datatype,
input_shape=self.input_shape,
batch_size=batch_size)
# Save the engine to file
engine_utils.save_engine(self.trt_engine, trt_engine_path)
# If we get here, the file with engine exists, so we can load it
if not self.trt_engine:
print("Loading cached TensorRT engine from {}".format(trt_engine_path))
self.trt_engine = engine_utils.load_engine(self.trt_runtime, trt_engine_path)
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = \
engine_utils.allocate_buffers(self.trt_engine)
# Execution context is needed for inference
self.context = self.trt_engine.create_execution_context()
# Allocate memory for multiple usage [e.g. multiple batch inference]
input_volume = trt.volume(input_shape)
self.numpy_array = np.zeros((self.trt_engine.max_batch_size, input_volume))
def infer(self, image_path):
"""Infers model on given image.
Args:
image_path (str): image to run object detection model on
"""
# Load image into CPU
img = self._load_img(image_path)
# Copy it into appropriate place into memory
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, img.ravel())
# When infering on single image, we measure inference
# time to output it to the user
inference_start_time = time.time()
# Fetch output from the model
[detection_out, keepCount_out] = do_inference(self.context,
bindings=self.bindings,
inputs=self.inputs,
outputs=self.outputs,
stream=self.stream)
# Output inference time
print("TensorRT inference time: {} ms".format(int(round((time.time() - inference_start_time) * 1000))))
# And return results
return detection_out, keepCount_out
def infer_batch(self, image_paths):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.trt_engine.max_batch_size
actual_batch_size = len(image_paths)
if actual_batch_size > max_batch_size:
raise ValueError("image_paths list bigger ({}) than engine max batch size ({})".format(
actual_batch_size, max_batch_size))
# Load all images to CPU...
imgs = self._load_imgs(image_paths)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, imgs.ravel())
# ...fetch model outputs...
[detection_out, keep_count_out] = do_inference(self.context,
bindings=self.bindings,
inputs=self.inputs,
outputs=self.outputs,
stream=self.stream,
batch_size=max_batch_size)
# ...and return results.
return detection_out, keep_count_out
def _load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image).reshape((im_height, im_width, self.input_shape[0])).astype(np.uint8)
def _load_imgs(self, image_paths):
batch_size = self.trt_engine.max_batch_size
for idx, image_path in enumerate(image_paths):
img_np = self._load_img(image_path)
self.numpy_array[idx] = img_np
return self.numpy_array
def _load_img(self, image_path):
image = Image.open(image_path)
model_input_width = self.input_shape[1]
model_input_height = self.input_shape[2]
# Note: Bilinear interpolation used by Pillow is a little bit
# different than the one used by Tensorflow, so if network receives
# an image that is not 300x300, the network output may differ
# from the one output by Tensorflow
image_resized = image.resize(size=(model_input_width, model_input_height), resample=Image.BILINEAR)
img_np = self._load_image_into_numpy_array(image_resized)
# HWC -> CHW
img_np = img_np.transpose((2, 0, 1))
# Normalize to [-1.0, 1.0] interval (expected by model)
img_np = (2.0 / 255.0) * img_np - 1.0
img_np = img_np.ravel()
return img_np
# # This class is similar as TRTInference inference, but it manages Tensorflow
# class TensorflowInference(object):
# def __init__(self, pb_model_path):
# self.detection_graph = tf.Graph()
# with self.detection_graph.as_default():
# od_graph_def = tf.GraphDef()
# with tf.gfile.GFile(pb_model_path, 'rb') as fid:
# serialized_graph = fid.read()
# od_graph_def.ParseFromString(serialized_graph)
# tf.import_graph_def(od_graph_def, name='')
# self.sess = tf.Session(graph=self.detection_graph)
# def infer(self, image_path):
# img_np = self._load_img(image_path)
# return self._run_tensorflow_graph(np.expand_dims(img_np, axis=0))
# def infer_batch(self, image_paths):
# img_np = self._load_imgs(image_paths)
# return self._run_tensorflow_graph(img_np)
# def _run_tensorflow_graph(self, image_input):
# ops = self.detection_graph.get_operations()
# all_tensor_names = {output.name for op in ops for output in op.outputs}
# tensor_dict = {}
# for key in [
# 'num_detections', 'detection_boxes',
# 'detection_scores', 'detection_classes'
# ]:
# tensor_name = key + ':0'
# if tensor_name in all_tensor_names:
# tensor_dict[key] = self.detection_graph.get_tensor_by_name(
# tensor_name)
# image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# output_dict = self.sess.run(tensor_dict,
# feed_dict={image_tensor: image_input})
# # All outputs are float32 numpy arrays, so convert types as appropriate
# output_dict['num_detections'] = output_dict['num_detections'].astype(np.int32)
# output_dict['detection_classes'] = output_dict[
# 'detection_classes'].astype(np.uint8)
# return output_dict
# def _load_image_into_numpy_array(self, image):
# (im_width, im_height) = image.size
# return np.array(image).reshape(
# (im_height, im_width, self.input_shape[0])
# ).astype(np.uint8)
# def _load_imgs(self, image_paths):
# numpy_array = np.zeros((len(image_paths),) + (300, 300, 3))
# for idx, image_path in enumerate(image_paths):
# img_np = self._load_img(image_path)
# numpy_array[idx] = img_np
# return numpy_array
# def _load_img(self, image_path):
# img = Image.open(image_path)
# img_np = self._load_image_into_numpy_array(img)
# return img_np
|
DL4AGX-master
|
MultiDeviceInferencePipeline/training/objectDetection/ssdConvertUFF/utils/inference.py
|
######################################################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/MultiDeviceInferencePipeline/training/laneSegmentation/pb2uff.py
# Description: Convert pb file to uff file
#####################################################################################################
import tensorrt as trt
import uff
import argparse
def main():
parser = argparse.ArgumentParser(description="Generate UFF file from protobuf file.")
parser.add_argument("-p",
"--pb_file_name",
type=str,
required=True,
help="""A protobuf file containing a frozen tensorflow graph""")
parser.add_argument("-u", "--uff_filename", type=str, required=True, help="""Output UFF file""")
parser.add_argument("-o", "--out_tensor_names", type=str, required=True, help="""Output Tensor names""")
args, unknown_args = parser.parse_known_args()
out_tensor_names = [args.out_tensor_names]
uff.from_tensorflow_frozen_model(args.pb_file_name,
out_tensor_names,
output_filename=args.uff_filename,
text=True,
quiet=False,
write_preprocessed=True,
debug_mode=False)
if __name__ == '__main__':
main()
|
DL4AGX-master
|
MultiDeviceInferencePipeline/training/laneSegmentation/pb2uff.py
|
#############################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/MultiDeviceInferencePipeline/enginecreator/utils/coco_eval.py
# Description: Evaluate a inference on COCO
############################################################################
import os
import sys
sys.path.append(os.path.join(os.environ['HOME'], 'cocoapi/PythonAPI'))
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import argparse
def parse_command_line():
"""
Parse command-line.
Returns:
Namespace with members for all parsed arguments.
"""
parser = argparse.ArgumentParser(description='Run evaluation on COCO dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-a',
'--annotation_file',
required=True,
type=str,
default='instances_val2017.json',
help='Full path to the annotation.json file.')
parser.add_argument('-r',
'--result_file',
required=True,
type=str,
default='COCO_val2017_float_eval_MV2-tmp2.json',
help='Full path to result.json file.')
return parser.parse_args()
def main():
args = parse_command_line()
annType = "bbox"
annFile = args.annotation_file
resFile = args.result_file
cocoGt = COCO(annFile)
cocoDt = cocoGt.loadRes(resFile)
# running evaluation
imgIds = sorted(cocoGt.getImgIds())
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if __name__ == '__main__':
main()
|
DL4AGX-master
|
MultiDeviceInferencePipeline/enginecreator/utils/coco_eval.py
|
#!/opt/ss/bin/python
# Copyright (c) 2011-2020, NVIDIA CORPORATION.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see
# <http://www.gnu.org/licenses/>.
from distutils.core import setup
setup(
name="collectd_plugin",
version="1.0.0",
description="Collectd Python Plugin",
author="NVIDIA CORPORATION",
author_email="dbishop@nvidia.com",
license="GPLv2",
packages=["collectd_plugin"],
)
|
swiftstack-collectd-plugin-main
|
setup.py
|
# Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
import contextlib
import os
import json
import mock
import shutil
import inspect
import unittest
import tempfile
from six.moves.queue import Queue, Empty
from collectd_plugin import collectd_plugin
class TestRead(unittest.TestCase):
maxDiff = None
def setUp(self):
self.metrics_queue = Queue()
self.node_uuid = "0e1da3a6-6531-4a1b-951a-ad099d259753"
self.data = {
"metrics_queue": self.metrics_queue,
"node_uuid": self.node_uuid,
}
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
@mock.patch.multiple(
"collectd_plugin.collectd_plugin",
time=mock.DEFAULT,
network_stats=mock.DEFAULT,
xfs_stats=mock.DEFAULT,
recon_stats=mock.DEFAULT,
replication_progress_stats=mock.DEFAULT,
)
def test_read_plumbing(
self, time, network_stats, xfs_stats, recon_stats, replication_progress_stats
):
stub_time = time.return_value = 1380210264.539025
self.assertEqual(1380210264.539025, collectd_plugin.time())
stub_time_int = int(stub_time)
network_stats.return_value = [
("net.stat.foobie", (stub_time, 123.394)),
("net.stat.barbie", (stub_time, 492)),
]
xfs_stats.return_value = [
("xfs.get.jiggy", (stub_time, 323)),
("xfs.with.it", (stub_time, 59343945)),
]
recon_stats.return_value = [
("swift.recon.party", (stub_time + 7, 4839)),
("swift.recon.time", (stub_time - 1, stub_time)),
]
replication_progress_stats.return_value = [
("replication.d1.accounts.primary", (stub_time, 1)),
("replication.d1.accounts.handoff", (stub_time, 2)),
("replication.d1.containers.primary", (stub_time, 3)),
("replication.d1.containers.handoff", (stub_time, 4)),
("replication.d1.objects.primary", (stub_time, 5)),
("replication.d1.objects.handoff", (stub_time, 6)),
]
collectd_plugin.read(self.data)
got_metrics = []
while True:
try:
got_metrics.append(self.metrics_queue.get_nowait())
except Empty:
break
# Whatever the collectors return is just used (i.e. the stub returns
# hi-res timestamps, so we expect hi-res timestamps even if the
# collectors will really be using the timestamp they're handed and
# they'll be handed an int() of the hi-res time).
self.assertListEqual(
[
[
("%s.net.stat.foobie" % self.node_uuid, (stub_time, 123.394)),
("%s.net.stat.barbie" % self.node_uuid, (stub_time, 492)),
("%s.xfs.get.jiggy" % self.node_uuid, (stub_time, 323)),
("%s.xfs.with.it" % self.node_uuid, (stub_time, 59343945)),
("%s.swift.recon.party" % self.node_uuid, (stub_time + 7, 4839)),
(
"%s.swift.recon.time" % self.node_uuid,
(stub_time - 1, stub_time),
),
(
"%s.replication.d1.accounts.primary" % self.node_uuid,
(stub_time, 1),
),
(
"%s.replication.d1.accounts.handoff" % self.node_uuid,
(stub_time, 2),
),
(
"%s.replication.d1.containers.primary" % self.node_uuid,
(stub_time, 3),
),
(
"%s.replication.d1.containers.handoff" % self.node_uuid,
(stub_time, 4),
),
(
"%s.replication.d1.objects.primary" % self.node_uuid,
(stub_time, 5),
),
(
"%s.replication.d1.objects.handoff" % self.node_uuid,
(stub_time, 6),
),
]
],
got_metrics,
)
# The hi-res time is int()'ed before being passed into the collectors
self.assertListEqual(
[mock.call(self.data, stub_time_int)], network_stats.mock_calls
)
self.assertListEqual(
[mock.call(self.data, stub_time_int)], xfs_stats.mock_calls
)
self.assertListEqual(
[mock.call(self.data, stub_time_int)], recon_stats.mock_calls
)
def test_recon_stats_default_cache_dir(self):
argspec = inspect.getargspec(collectd_plugin.recon_stats)
self.assertEqual(
(["data", "timestamp", "cache_dir"], None, None, ("/var/cache/swift",)),
argspec,
)
def test_recon_stats_old_swift(self):
with open(os.path.join(self.tempdir, "account.recon"), "wb") as afp:
afp.write(
json.dumps(
{
"replication_stats": {
"no_change": 12,
"attempted": 6,
"ts_repl": 3,
"remote_merge": 2,
"failure": 1,
"diff": 21,
"rsync": 19,
"success": 13,
"remove": 14,
"diff_capped": 7,
"start": 1380221451.92608,
"hashmatch": 5,
"empty": 4,
},
"replication_time": 0.0981740951538086,
"account_audits_since": 1380224610.071976,
"account_audits_passed": 12,
"account_audits_failed": 0,
"account_auditor_pass_completed": 0.010714054107666016,
}
).encode("utf-8")
)
with open(os.path.join(self.tempdir, "container.recon"), "wb") as cfp:
cfp.write(
json.dumps(
{
"replication_stats": {
"no_change": 16,
"attempted": 8,
"ts_repl": 4,
"remote_merge": 2,
"failure": 1,
"diff": 3,
"rsync": 6,
"success": 12,
"remove": 24,
"diff_capped": 48,
"start": 1380221432.501042,
"hashmatch": 7,
"empty": 5,
},
"container_updater_sweep": 0.25456881523132324,
"replication_time": 0.06824707984924316,
"container_audits_passed": 16,
"container_audits_failed": 0,
"container_audits_since": 1380225328.220907,
"container_auditor_pass_completed": 0.02747488021850586,
}
).encode("utf-8")
)
with open(os.path.join(self.tempdir, "object.recon"), "wb") as ofp:
ofp.write(
json.dumps(
{
"object_replication_time": 0.13569668531417847, # minutes!
"async_pending": 42,
"object_updater_sweep": 0.04616594314575195,
}
).encode("utf-8")
)
self.assertEqual(
{
"account": {
"replication_duration": 0.0981740951538086,
# start + replication_time
"replication_last": 1380221451.92608 + 0.098174095153808,
"auditor_duration": 0.010714054107666016,
},
"container": {
"replication_duration": 0.06824707984924316,
# start + replication_time
"replication_last": 1380221432.501042 + 0.06824707984924316,
"auditor_duration": 0.02747488021850586,
"updater_duration": 0.25456881523132324,
},
"object": {
"async_pending": 42,
"replication_duration": 0.13569668531417847 * 60,
# Provide the latest time at which the last full replication
# run could possibly have completed at.
"replication_last": 1380228784 - (0.13569668531417847 * 60),
"updater_duration": 0.04616594314575195,
},
},
collectd_plugin.normalized_recon_data(1380228784, cache_dir=self.tempdir),
)
self.assertEqual(
[
# NOTE: replication_duration/replication_last are now collected
# using a fingerprint message, not through carbon/whisper.
("recon.account.auditor_duration", (1380228784, 0.010714054107666016)),
("recon.container.auditor_duration", (1380228784, 0.02747488021850586)),
("recon.container.updater_duration", (1380228784, 0.25456881523132324)),
("recon.object.async_pending", (1380228784, 42)),
("recon.object.updater_duration", (1380228784, 0.04616594314575195)),
],
collectd_plugin.recon_stats(self.data, 1380228784, cache_dir=self.tempdir),
)
def test_recon_stats_newer_swift(self):
with open(os.path.join(self.tempdir, "account.recon"), "wb") as afp:
afp.write(
json.dumps(
{
"replication_stats": {
"no_change": 12,
"attempted": 6,
"ts_repl": 3,
"remote_merge": 2,
"failure": 1,
"diff": 21,
"rsync": 19,
"success": 13,
"remove": 14,
"diff_capped": 7,
"start": 1380221451.92608,
"hashmatch": 5,
"empty": 4,
},
"replication_time": 0.0981740951538086, # seconds
"replication_last": 1380233365.809194,
"account_audits_since": 1380228210.269836,
"account_audits_passed": 12,
"account_audits_failed": 0,
"account_auditor_pass_completed": 0.006899118423461914,
}
).encode("utf-8")
)
with open(os.path.join(self.tempdir, "container.recon"), "wb") as cfp:
cfp.write(
json.dumps(
{
"replication_stats": {
"no_change": 16,
"attempted": 8,
"ts_repl": 4,
"remote_merge": 2,
"failure": 1,
"diff": 3,
"rsync": 6,
"success": 12,
"remove": 24,
"diff_capped": 48,
"start": 1380221432.501042,
"hashmatch": 7,
"empty": 5,
},
"container_updater_sweep": 0.25456881523132324,
"replication_time": 0.06824707984924316, # seconds
"replication_last": 1380233362.596543, # last completion
"container_audits_passed": 16,
"container_audits_failed": 0,
"container_audits_since": 1380225328.220907,
"container_auditor_pass_completed": 0.02747488021850586,
}
).encode("utf-8")
)
with open(os.path.join(self.tempdir, "object.recon"), "wb") as ofp:
ofp.write(
json.dumps(
{
"object_replication_time": 0.13569668531417847, # minutes!
"object_replication_last": 1380233375.494914,
"async_pending": 42,
"object_updater_sweep": 0.04616594314575195, # seconds
# We don't bother to collect object auditor stats because
# they seem a bit unreliable. For instance, "start_time" is
# actually only the time since stats were last output, not the
# start of the current audit sweep. And AFAICT, stats won't
# even get output unless the total sweep time is > 1 hr.
"object_auditor_stats_ALL": {
"audit_time": 73.19667983055115,
"bytes_processed": 12545977,
"errors": 34,
"passes": 1470,
"quarantined": 17,
"start_time": 1380557981.751367,
},
}
).encode("utf-8")
)
self.assertEqual(
{
"account": {
"replication_duration": 0.0981740951538086,
# from replication_last, not replication_stats.start
"replication_last": 1380233365.809194,
"auditor_duration": 0.006899118423461914,
},
"container": {
"replication_duration": 0.06824707984924316,
# from replication_last, not replication_stats.start
"replication_last": 1380233362.596543,
"auditor_duration": 0.02747488021850586,
"updater_duration": 0.25456881523132324,
},
"object": {
"async_pending": 42,
"replication_duration": 0.13569668531417847 * 60,
"replication_last": 1380233375.494914,
"updater_duration": 0.04616594314575195,
},
},
collectd_plugin.normalized_recon_data(1380228784, cache_dir=self.tempdir),
)
self.assertEqual(
[
# NOTE: replication_duration/replication_last are now collected
# using a fingerprint message, not through carbon/whisper.
("recon.account.auditor_duration", (1380228784, 0.006899118423461914)),
("recon.container.auditor_duration", (1380228784, 0.02747488021850586)),
("recon.container.updater_duration", (1380228784, 0.25456881523132324)),
("recon.object.async_pending", (1380228784, 42)),
("recon.object.updater_duration", (1380228784, 0.04616594314575195)),
],
collectd_plugin.recon_stats(self.data, 1380228784, cache_dir=self.tempdir),
)
def test_recon_stats_ec_swift(self):
with open(os.path.join(self.tempdir, "object.recon"), "wb") as ofp:
ofp.write(
json.dumps(
{
"object_replication_time": 0.13569668531417847, # minutes!
"object_replication_last": 1380233375.494914,
"object_reconstruction_time": 0.28092991511027016, # minutes!
"object_reconstruction_last": 1481064466.128906,
"async_pending": 42,
"object_updater_sweep": 0.04616594314575195, # seconds
# We don't bother to collect object auditor stats because
# they seem a bit unreliable. For instance, "start_time" is
# actually only the time since stats were last output, not the
# start of the current audit sweep. And AFAICT, stats won't
# even get output unless the total sweep time is > 1 hr.
"object_auditor_stats_ALL": {
"audit_time": 73.19667983055115,
"bytes_processed": 12545977,
"errors": 34,
"passes": 1470,
"quarantined": 17,
"start_time": 1380557981.751367,
},
}
).encode("utf-8")
)
self.assertEqual(
{
"account": {},
"container": {},
"object": {
"async_pending": 42,
"replication_duration": 0.13569668531417847 * 60,
"replication_last": 1380233375.494914,
"reconstruction_duration": 0.28092991511027016 * 60,
"reconstruction_last": 1481064466.128906,
"updater_duration": 0.04616594314575195,
},
},
collectd_plugin.normalized_recon_data(1380228784, cache_dir=self.tempdir),
)
self.assertEqual(
[
# NOTE: replication_duration/replication_last are now collected
# using a fingerprint message, not through carbon/whisper.
# reconstruction_duration/reconstruction_last were *never*
# collected through carbon/whisper
("recon.object.async_pending", (1380228784, 42)),
("recon.object.updater_duration", (1380228784, 0.04616594314575195)),
],
collectd_plugin.recon_stats(self.data, 1380228784, cache_dir=self.tempdir),
)
def test_recon_stats_no_files(self):
self.assertEqual(
[],
collectd_plugin.recon_stats(self.data, 1380228784, cache_dir=self.tempdir),
)
def test_recon_stats_bad_json(self):
with open(os.path.join(self.tempdir, "account.recon"), "wb") as afp:
afp.write("slap-happy".encode("utf-8"))
with open(os.path.join(self.tempdir, "container.recon"), "wb") as cfp:
cfp.write("slim-shady".encode("utf-8"))
with open(os.path.join(self.tempdir, "object.recon"), "wb") as ofp:
ofp.write("slip-slop".encode("utf-8"))
self.assertEqual(
[],
collectd_plugin.recon_stats(self.data, 1380228784, cache_dir=self.tempdir),
)
def test_recon_stats_no_keys(self):
with open(os.path.join(self.tempdir, "account.recon"), "wb") as afp:
afp.write(json.dumps({}).encode("utf-8"))
with open(os.path.join(self.tempdir, "container.recon"), "wb") as cfp:
cfp.write(json.dumps({}).encode("utf-8"))
with open(os.path.join(self.tempdir, "object.recon"), "wb") as ofp:
ofp.write(json.dumps({}).encode("utf-8"))
self.assertEqual(
[],
collectd_plugin.recon_stats(self.data, 1380228784, cache_dir=self.tempdir),
)
@contextlib.contextmanager
def _patch_stats_file(self, stub=None):
self.stats_file = os.path.join(self.tempdir, "replication_progress.json")
if stub is not None:
with open(self.stats_file, "w") as f:
f.write(stub)
with mock.patch(
"collectd_plugin.collectd_plugin.REPLICATION_PROGRESS_STATS_FILE", self.stats_file
):
yield
def test_read_replication_progress(self):
# test no file
with self._patch_stats_file():
stats = collectd_plugin.read_replication_progress()
self.assertEqual(stats, {})
self.assertFalse(os.path.exists(self.stats_file))
# test empty file
with self._patch_stats_file(stub=""):
stats = collectd_plugin.read_replication_progress()
self.assertEqual(stats, {})
self.assertEqual(open(self.stats_file).read(), "")
# test with stub data
stub = {
"d1": {
"accounts": {"primary": 1, "handoff": 2},
"containers": {"primary": 3, "handoff": 4},
"objects": {"primary": 5, "handoff": 6},
},
"d2": {
"accounts": {"primary": 7, "handoff": 8},
"containers": {"primary": 9, "handoff": 10},
"objects": {"primary": 11, "handoff": 12},
},
}
# test with stub data
with self._patch_stats_file(stub=json.dumps(stub)):
stats = collectd_plugin.read_replication_progress()
self.assertEqual(stats, stub)
def test_replication_progress_stats(self):
d = {}
t = 1462999369
# test no file
with self._patch_stats_file():
metrics = collectd_plugin.replication_progress_stats(d, t)
self.assertEqual(metrics, [])
# test empty file
with self._patch_stats_file(stub=""):
metrics = collectd_plugin.replication_progress_stats(d, t)
self.assertEqual(metrics, [])
# test stub data
stub = {
"d1": {
"accounts": {"primary": 1, "handoff": 2},
"containers": {"primary": 3, "handoff": 4},
"objects": {"primary": 5, "handoff": 6},
},
"d2": {
"accounts": {"primary": 7, "handoff": 8},
"containers": {"primary": 9, "handoff": 10},
"objects": {"primary": 11, "handoff": 12},
},
"d3": {},
}
with self._patch_stats_file(stub=json.dumps(stub)):
metrics = collectd_plugin.replication_progress_stats(d, t)
expected = [
("replication.d1.accounts.primary", (t, 1)),
("replication.d1.accounts.handoff", (t, 2)),
("replication.d1.containers.primary", (t, 3)),
("replication.d1.containers.handoff", (t, 4)),
("replication.d1.objects.primary", (t, 5)),
("replication.d1.objects.handoff", (t, 6)),
("replication.d2.accounts.primary", (t, 7)),
("replication.d2.accounts.handoff", (t, 8)),
("replication.d2.containers.primary", (t, 9)),
("replication.d2.containers.handoff", (t, 10)),
("replication.d2.objects.primary", (t, 11)),
("replication.d2.objects.handoff", (t, 12)),
("replication.ALL.accounts.primary", (t, 8)),
("replication.ALL.accounts.handoff", (t, 10)),
("replication.ALL.containers.primary", (t, 12)),
("replication.ALL.containers.handoff", (t, 14)),
("replication.ALL.objects.primary", (t, 16)),
("replication.ALL.objects.handoff", (t, 18)),
]
self.assertEqual(set(metrics), set(expected))
|
swiftstack-collectd-plugin-main
|
tests/test_collectd_plugin.py
|
# Copyright (c) 2011-2020, NVIDIA CORPORATION.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see
# <http://www.gnu.org/licenses/>.
from collections import defaultdict
from datetime import datetime
import errno
import json
import os
import re
from six.moves.queue import Queue, Empty
from six.moves import cPickle as pickle
from six.moves import range
from six.moves.urllib.parse import urlsplit, urlunsplit
import socket
import struct
import sys
import threading
from time import time, sleep
import traceback
running_inside_collectd = False
try:
import collectd
running_inside_collectd = True
except ImportError:
pass
# Work around a threading bug present in (at least) 2.7.3
# This hack appears safe for 2.7.6 (which has this fixed).
# See https://bugs.python.org/msg160297
threading._DummyThread._Thread__stop = lambda x: 42
def escape_metric_segment(segment):
"""
Escape a string that needs to be a single Whisper metric segment.
"""
return segment.replace(".", "_")
# The second argument here is just to work around flexmock's
# deficiencies.
def resolve_hostname_in_url(url, resolve=socket.gethostbyname):
parsed = urlsplit(url)
hostname = parsed.hostname
if _looks_like_ipv4(hostname):
ip = hostname
elif _looks_like_ipv6(hostname):
ip = "[%s]" % hostname
else:
ip = resolve(hostname)
if parsed.port is not None:
new_netloc = "%s:%s" % (ip, parsed.port)
else:
new_netloc = ip
return urlunsplit(
(parsed.scheme, new_netloc, parsed.path, parsed.query, parsed.fragment)
)
def _looks_like_ipv4(maybe_ip):
try:
socket.inet_pton(socket.AF_INET, maybe_ip)
return True
except socket.error:
return False
def _looks_like_ipv6(maybe_ip):
try:
socket.inet_pton(socket.AF_INET6, maybe_ip)
return True
except socket.error:
return False
db = {}
PING_TIMEOUT = 15 # seconds
LINGER_PERIOD = 1800 # half an hour
WRITE_BATCH_THRESHOLD = 2 # no metrics for X seconds? send batch.
def _log(msg, *args):
# Poor-man's logger which just goes to stderr
# (special-case escaping for messages with no args that contain percent)
if not args:
msg = msg.replace("%", "%%")
msg = "[%%s] %s\n" % (msg,)
msg_args = [datetime.now().isoformat(" ")]
msg_args.extend(args)
sys.stderr.write(msg % tuple(msg_args))
def _seq_chunker(seq, size):
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
def init():
# Populate db with types.db data (DSes)
types_db = open("/opt/ss/share/collectd/types.db")
line = types_db.readline()
while line:
matches = re.findall(r"(?:^(\w+)|\s+(\w+):\S+,?)", line)
if len(matches) > 1:
match_list = []
for match in matches[1:]:
match_list.append(match[1])
db[matches[0][0]] = match_list
line = types_db.readline()
types_db.close()
def config(config_obj, data):
"""Store configuration data like the zmq socket endpoint."""
try:
for sub_obj in config_obj.children:
data[sub_obj.key] = sub_obj.values[0]
data["statsd_flush_interval"] = int(data["statsd_flush_interval"])
if "statsd_pct_thresholds" in data:
data["statsd_pct_thresholds"] = [
float(s) for s in data["statsd_pct_thresholds"].split()
]
elif "statsd_pct_threshold" in data:
data["statsd_pct_thresholds"] = [float(data["statsd_pct_threshold"])]
# We're piggy-backing initialization in config(), which is mixing
# semantics, but init() doesn't have access to our opaque callback
# "data" dict.
data["metrics_queue"] = Queue()
data["threads"] = []
if data.get("monitor_endpoint"):
if data.get("ping_endpoint"):
pinger = threading.Thread(
name="collectd pinger",
target=zmq_pinger_thread,
args=(data["ping_endpoint"], data["node_uuid"]),
)
pinger.daemon = True # must set before .start()
pinger.start()
data["threads"].append(pinger)
writer = threading.Thread(
target=zmq_writer_thread,
args=(
data["metrics_queue"],
data["monitor_endpoint"],
data["node_uuid"],
),
)
writer.daemon = True # must set before .start()
writer.start()
data["threads"].append(writer)
elif data.get("carbon_host") and data.get("carbon_port"):
writer = threading.Thread(
name="collectd carbon writer",
target=carbon_writer_thread,
args=(data["metrics_queue"], data["carbon_host"], data["carbon_port"]),
)
writer.daemon = True
writer.start()
data["threads"].append(writer)
if "statsd_bind_ip" in data:
statsd = threading.Thread(
name="statsd",
target=statsd_thread,
args=(
data["metrics_queue"],
data["statsd_bind_ip"],
data["statsd_port"],
data["statsd_pct_thresholds"],
data["statsd_flush_interval"],
data["node_uuid"],
data.get("statsd_forward_host"),
data.get("statsd_forward_port"),
data.get("statsd_forward_prefix_hostname"),
),
)
statsd.daemon = True
statsd.start()
data["threads"].append(statsd)
except Exception as e:
_log("Error in config: %r", e)
os._exit(2)
def statsd_thread(
metrics_queue,
bind_ip,
port,
pct_thresholds,
flush_interval_seconds,
node_uuid,
statsd_forward_host,
statsd_forward_port,
statsd_forward_prefix_hostname,
):
from pystatsd import Server
if statsd_forward_host and statsd_forward_port:
_log(
"Forwarding StatsD stats to %s:%s", statsd_forward_host, statsd_forward_port
)
statsd_forward_prefix = ""
if statsd_forward_prefix_hostname:
statsd_forward_prefix = socket.gethostname()
_log("Forwarding StatsD stats with hostname prefix %r", statsd_forward_prefix)
if data.get("monitor_endpoint") or (
data.get("carbon_host") and data.get("carbon_port")):
transport = "graphite_queue"
else:
# force our hacked-up pystatsd Server to use TransportNop
transport = "graphite"
server = Server(
pct_thresholds=pct_thresholds,
transport=transport,
queue=metrics_queue,
flush_interval=flush_interval_seconds,
counters_prefix=node_uuid,
timers_prefix=node_uuid,
statsd_forward_host=statsd_forward_host,
statsd_forward_port=statsd_forward_port,
statsd_forward_prefix=statsd_forward_prefix,
)
server.serve(hostname=bind_ip, port=int(port))
def zmq_pinger_thread(ping_endpoint, node_uuid):
zmq = 1 # appease pyflakes which can't understand the following import
exec("import zmq")
context = zmq.Context.instance()
while True:
endpoint = resolve_hostname_in_url(ping_endpoint)
ping_socket = context.socket(zmq.REQ)
ping_socket.setsockopt(zmq.LINGER, 0)
ping_socket.connect(endpoint)
ping_poller = zmq.Poller()
ping_poller.register(ping_socket, zmq.POLLIN)
ping_socket.send("PING:%s" % (node_uuid,))
# wait for a PONG response
socks = dict(ping_poller.poll(1000 * PING_TIMEOUT))
if socks.get(ping_socket) == zmq.POLLIN:
# loop to soak any extra PONGs just in case
while socks.get(ping_socket) == zmq.POLLIN:
pong = ping_socket.recv()
if pong != "PONG":
_log("Received unexpected PING response: %r", pong)
socks = dict(ping_poller.poll(0))
else:
# Failed to receive a PONG within timeout
_log("Failed to receive a PONG; exiting!")
os._exit(3)
ping_socket.close()
sleep(10)
def zmq_writer_thread(metrics_queue, monitor_endpoint, node_uuid):
zmq = 1 # appease pyflakes which can't understand the following import
exec("import zmq")
context = zmq.Context.instance()
sock = None
while True:
metrics = metrics_queue.get() # blocking
# Don't let more than 5x WRITE_BATCH_THRESHOLD seconds go by without a
# flush
start = time()
while True and time() - start < 5 * WRITE_BATCH_THRESHOLD:
try:
additional_metrics = metrics_queue.get(timeout=WRITE_BATCH_THRESHOLD)
metrics.extend(additional_metrics)
except Empty:
break
if sock:
sock.close()
endpoint = resolve_hostname_in_url(monitor_endpoint)
sock = context.socket(zmq.PUSH)
# Setting a linger time is only meant as a brake on memory
# usage in weird network situations.
#
# In normal good times, pings and monitoring data both work,
# so data doesn't sit around for more than a couple RTTs
# unacknowledged, and this setting does nothing.
#
# In normal bad times, pings and monitoring data both fail, so
# after a failed ping, we call os._exit() and our process goes
# kaput immediately, so the monitoring data doesn't accumulate
# in memory.
#
# However, on bizarro-net, pings get through, but monitoring
# data doesn't. In that case, up to $LINGER_PERIOD worth of
# monitoring data will queue up, in memory, waiting to be
# shipped out. If we didn't set the linger time for the
# monitoring data, then since the default linger time is 0
# (i.e. keep data forever), that would be a memory leak.
sock.setsockopt(zmq.LINGER, LINGER_PERIOD * 1000)
sock.connect(endpoint)
# Send out at most 150 metrics in a single batch
for metrics_chunk in _seq_chunker(metrics, 150):
pickled = pickle.dumps(metrics_chunk)
payload = (
struct.pack("!LL", 8 + len(node_uuid) + len(pickled), len(node_uuid))
+ node_uuid
+ pickled
)
sock.send(payload)
def carbon_writer_thread(metrics_queue, carbon_host, carbon_port):
carbon_receiver = (carbon_host, int(carbon_port))
carbon_sock = None
while True:
try:
if not carbon_sock:
_log("Connecting to carbon at %s:%d", *carbon_receiver)
carbon_sock = socket.create_connection(carbon_receiver)
_log("Connected to carbon at %s:%d", *carbon_receiver)
metrics = metrics_queue.get() # blocking
for metric in metrics:
m_name = metric[0]
datapoints = metric[1:]
for datapoint in datapoints:
m_time = datapoint[0]
m_val = datapoint[1]
carbon_sock.sendall("%s %s %s\n" % (m_name, m_val, m_time))
except socket.error as sockerr:
_log("Something happened to my carbon socket: %r", sockerr)
carbon_sock = None
sleep(0.05) # don't peg the CPU if carbon is down
def write(values, data):
"""Pass collected data to the writer thread. Also catch thread death."""
if not data.get("monitor_endpoint"):
return
if not all(t.isAlive() for t in data["threads"]):
# In general, suicide is not the answer. Here, we make an exception.
os._exit(4)
# Pickled datapoints look like this:
# [
# (<metric_string>, (<time>, <value>))
# ]
# so the fact that they wait around a while does not affect their
# correctness.
data["metrics_queue"].put(
[
# We cast to float since we saw some metrics with values like '0' (a
# one-character string). It only broke the metric diversion code,
# which has been fixed, but we should make sure we're sending only
# float values out of here, too.
(metric, (values.time, float(v)))
for metric, v in split_values(values, data)
]
)
def read(data):
"""
Collect some metrics and send 'em directly to the writer thread.
The node_uuid is prepended to metric names generated by the actual
collection function(s).
Pickled datapoints look like this:
[
(<metric_string>, (<time>, <value>))
]
so the fact that they wait around a while does not affect their
correctness.
"""
if not data.get("monitor_endpoint"):
return
t = int(time())
metrics = []
try:
# Don't let otherwise-untrapped exceptions (code bugs) in the
# individual collectors prevent us from sending metrics we did manage
# to get.
metrics.extend(network_stats(data, t))
metrics.extend(xfs_stats(data, t))
metrics.extend(recon_stats(data, t))
metrics.extend(replication_progress_stats(data, t))
except Exception:
_log(traceback.format_exc())
# Prepend the node_uuid once, to make the actual collector function(s)
# simpler.
if data["node_uuid"]:
metrics = [(data["node_uuid"] + "." + m, tv) for m, tv in metrics]
data["metrics_queue"].put(metrics)
def normalized_sharding_data(cache_dir="/var/cache/swift"):
container_file = os.path.join(cache_dir, "container.recon")
try:
with open(container_file, "r") as fp:
data = json.load(fp)
except (OSError, IOError, ValueError):
# Missing file, bad perms, JSON decode error...
return {}
sharding_info = data.get("sharding_stats", {}).get("sharding", {})
# If present, sharding_candidates should be of the form
# {
# "found": 1234,
# "top": [
# {"account": "AUTH_foo",
# "container": "c",
# "file_size": 1234,
# "meta_timestamp": "1522952237.71391"
# "node_index": 0,
# "object_count": 1049,
# "path": "/srv/node/d2/containers/.../hash.db",
# "root": "AUTH_a/c2",
# },
# ...
# ]
# }
# where 'top' is limited by recon_candidates_limit. shrinking_candidates
# doesn't actually exist yet, but presumably will be somewhat similar.
#
# sharding_in_progress, meanwhile, should be of the form
# {
# "all": [
# {"account": "AUTH_foo",
# "container": "c",
# "file_size": 1234,
# "meta_timestamp": "1522952237.71391"
# "node_index": 0,
# "object_count": 1049,
# "path": "/srv/node/d2/containers/.../hash.db",
# "root": "AUTH_a/c2",
# "error": null or string,
# "state": "sharded" (or whatever),
# "db_state": "sharding" (or whatever),
# "found": x,
# "cleaved": y,
# "active": z,
# },
# ...
# ]
# }
return {
k: sharding_info.get(k, {})
for k in (
"sharding_candidates",
"sharding_in_progress",
"shrinking_candidates",
)
}
def normalized_recon_data(now, cache_dir="/var/cache/swift"):
account_file = os.path.join(cache_dir, "account.recon")
container_file = os.path.join(cache_dir, "container.recon")
object_file = os.path.join(cache_dir, "object.recon")
data = {"account": {}, "container": {}, "object": {}}
if os.path.exists(account_file):
try:
account_stats = json.loads(open(account_file).read())
if "replication_time" in account_stats:
data["account"]["replication_duration"] = account_stats[
"replication_time"
]
if "replication_last" in account_stats:
data["account"]["replication_last"] = account_stats["replication_last"]
elif (
"replication_stats" in account_stats
and "replication_time" in account_stats
):
if "start" in account_stats["replication_stats"]:
data["account"]["replication_last"] = (
account_stats["replication_stats"]["start"]
+ account_stats["replication_time"]
)
if "account_auditor_pass_completed" in account_stats:
data["account"]["auditor_duration"] = account_stats[
"account_auditor_pass_completed"
]
except (ValueError, OSError):
pass # JSON decode error
if os.path.exists(container_file):
try:
container_stats = json.loads(open(container_file).read())
if "replication_time" in container_stats:
data["container"]["replication_duration"] = container_stats[
"replication_time"
]
if "replication_last" in container_stats:
data["container"]["replication_last"] = container_stats[
"replication_last"
]
elif (
"replication_stats" in container_stats
and "replication_time" in container_stats
):
if "start" in container_stats["replication_stats"]:
data["container"]["replication_last"] = (
container_stats["replication_stats"]["start"]
+ container_stats["replication_time"]
)
if "sharding_time" in container_stats:
data["container"]["sharding_duration"] = container_stats[
"sharding_time"
]
if "sharding_last" in container_stats:
data["container"]["sharding_last"] = container_stats["sharding_last"]
if "container_auditor_pass_completed" in container_stats:
data["container"]["auditor_duration"] = container_stats[
"container_auditor_pass_completed"
]
if "container_updater_sweep" in container_stats:
data["container"]["updater_duration"] = container_stats[
"container_updater_sweep"
]
except (ValueError, OSError):
pass # JSON decode error
if os.path.exists(object_file):
try:
with open(object_file) as fp:
object_stats = json.load(fp)
if "async_pending" in object_stats:
data["object"]["async_pending"] = object_stats["async_pending"]
if "object_replication_time" in object_stats:
# normalize to seconds
data["object"]["replication_duration"] = (
object_stats["object_replication_time"] * 60
)
if "object_replication_last" in object_stats:
data["object"]["replication_last"] = object_stats[
"object_replication_last"
]
elif "object_replication_time" in object_stats:
data["object"]["replication_last"] = (
now - object_stats["object_replication_time"] * 60
)
if "object_reconstruction_time" in object_stats:
# normalize to seconds
data["object"]["reconstruction_duration"] = (
object_stats["object_reconstruction_time"] * 60
)
if "object_reconstruction_last" in object_stats:
data["object"]["reconstruction_last"] = object_stats[
"object_reconstruction_last"
]
if "object_updater_sweep" in object_stats:
data["object"]["updater_duration"] = object_stats[
"object_updater_sweep"
]
except (ValueError, OSError):
pass # JSON decode error
return data
def recon_stats(data, timestamp, cache_dir="/var/cache/swift"):
metrics = []
recon_data = normalized_recon_data(timestamp, cache_dir)
account_stats = recon_data["account"]
# NOTE: replication_duration/replication_last no longer tracked using
# Whisper metrics.
if "auditor_duration" in account_stats:
metrics.append(
(
"recon.account.auditor_duration",
(timestamp, account_stats["auditor_duration"]),
)
)
container_stats = recon_data["container"]
# NOTE: replication_duration/replication_last no longer tracked using
# Whisper metrics.
if "auditor_duration" in container_stats:
metrics.append(
(
"recon.container.auditor_duration",
(timestamp, container_stats["auditor_duration"]),
)
)
if "updater_duration" in container_stats:
metrics.append(
(
"recon.container.updater_duration",
(timestamp, container_stats["updater_duration"]),
)
)
object_stats = recon_data["object"]
if "async_pending" in object_stats:
metrics.append(
("recon.object.async_pending", (timestamp, object_stats["async_pending"]))
)
# NOTE: replication_duration/replication_last no longer tracked using
# Whisper metrics.
if "updater_duration" in object_stats:
metrics.append(
(
"recon.object.updater_duration",
(timestamp, object_stats["updater_duration"]),
)
)
return metrics
REPLICATION_PROGRESS_STATS_FILE = "/opt/ss/var/lib/replication_progress.json"
def read_replication_progress():
"""
Read the stats file from disk.
"""
try:
with open(REPLICATION_PROGRESS_STATS_FILE) as f:
return json.load(f)
except (OSError, IOError) as e:
if e.errno not in (errno.ENOENT,):
raise
except ValueError:
pass
return {}
def replication_progress_stats(data, timestamp):
metrics = []
stats = read_replication_progress()
aggregate = defaultdict(lambda: defaultdict(int))
for device, type_stats in stats.items():
for type_, stats in type_stats.items():
prefix = "replication.%s.%s." % (device, type_)
for key, value in stats.items():
metrics.append((prefix + key, (timestamp, value)))
aggregate[type_][key] += value
for type_, stats in aggregate.items():
prefix = "replication.ALL.%s." % type_
for key, value in stats.items():
metrics.append((prefix + key, (timestamp, value)))
return metrics
def network_stats(data, timestamp):
"""Collect some network stats and return them in the format:
[(<metric_name>, (timestamp, <value>))]
"""
stats_file = "/proc/net/snmp"
parsed = {}
if os.path.exists(stats_file):
try:
with open(stats_file, "rb") as stats_fh:
line_type, labels = None, None
for line in stats_fh.readlines():
parts = line.split()
if parts[1].isdigit() and parts[0] == line_type + ":":
parsed[line_type] = dict(zip(labels, parts[1:]))
elif not parts[1].isdigit():
line_type = parts[0].split(":")[0]
labels = parts[1:]
except Exception:
_log(traceback.format_exc())
return []
else:
return []
return [
("net.udp.InErrors", (timestamp, parsed["Udp"]["InErrors"])),
("net.udp.RcvbufErrors", (timestamp, parsed["Udp"]["RcvbufErrors"])),
("net.tcp.AttemptFails", (timestamp, parsed["Tcp"]["AttemptFails"])),
("net.tcp.RetransSegs", (timestamp, parsed["Tcp"]["RetransSegs"])),
("net.ip.InHdrErrors", (timestamp, parsed["Ip"]["InHdrErrors"])),
("net.ip.FragFails", (timestamp, parsed["Ip"]["FragFails"])),
("net.ip.FragCreates", (timestamp, parsed["Ip"]["FragCreates"])),
]
def xfs_stats(data, timestamp):
"""
Collect some xfs stats and return them in the format:
[(<metric_name>, (timestamp, <value>))]
See http://xfs.org/index.php/Runtime_Stats
"""
stats_file = "/proc/fs/xfs/stat"
parsed = {}
if os.path.exists(stats_file):
try:
with open(stats_file, "rb") as stats_fh:
for line in stats_fh.readlines():
label, stats = line.split(" ", 1)
parsed[label] = map(int, stats.split())
except Exception:
_log(traceback.format_exc())
return []
else:
return []
stats = []
potential_stats = {
("dir", "dir_ops"): [
# This is a count of the number of file name directory lookups in
# XFS filesystems. It counts only those lookups which miss in the
# operating system's directory name lookup cache and must search
# the real directory structure for the name in question. The count
# is incremented once for each level of a pathname search that
# results in a directory lookup.
("lookup", 0),
# This is the number of times the XFS directory getdents operation
# was performed. The getdents operation is used by programs to read
# the contents of directories in a file system independent fashion.
# This count corresponds exactly to the number of times the
# getdents(2) system call was successfully used on an XFS
# directory.
("getdents", 3),
],
("ig", "inode_ops"): [
# This is the number of times the operating system looked for an
# XFS inode in the inode cache and found it. The closer this count
# is to the ig_attempts count the better the inode cache is
# performing.
("ig_found", 1),
# This is the number of times the operating system looked for an
# XFS inode in the inode cache and the inode was not there. The
# further this count is from the ig_attempts count the better.
("ig_missed", 3),
# This is the number of times the operating system recycled an XFS
# inode from the inode cache in order to use the memory for that
# inode for another purpose.
("ig_reclaims", 5),
],
("log", "log"): [
# This variable counts the number of log buffer writes going to the
# physical log partitions of all XFS filesystems.
("writes", 0),
# This variable counts (in 512-byte units) the information being
# written to the physical log partitions of all XFS filesystems.
("blocks", 1),
# This variable keeps track of times when a logged transaction can
# not get any log buffer space. When this occurs, all of the
# internal log buffers are busy flushing their data to the physical
# on-disk log.
("noiclogs", 2),
# The number of times the in-core log is forced to disk. It is
# equivalent to the number of successful calls to the function
# xfs_log_force().
("force", 3),
],
("xstrat", "xstrat"): [
# This is the number of buffers flushed out by the XFS flushing
# daemons which are written to contiguous space on disk. This one
# is GOOD.
("quick", 0),
# This is the number of buffers flushed out by the XFS flushing
# daemons which are written to non-contiguous space on disk. This
# one is BAD.
("split", 1),
],
("xpc", "xpc"): [
# This is a count of bytes of file data flushed out by the XFS
# flushing daemons. 64-bit counter.
("xstrat_bytes", 0),
# This is a count of bytes written via write(2) system calls to
# files in XFS file systems. It can be used in conjunction with the
# write_calls count to calculate the average size of the write
# operations to files in XFS file systems.
("write_bytes", 1),
# This is a count of bytes read via read(2) system calls to files
# in XFS file systems. It can be used in conjunction with the
# read_calls count to calculate the average size of the read
# operations to files in XFS file systems.
("read_bytes", 2),
],
("rw", "rw"): [
# This is the number of write(2) system calls made to files in XFS
# file systems.
("write_calls", 0),
# This is the number of read(2) system calls made to files in XFS
# file systems.
("read_calls", 1),
],
}
for parsed_key, metric_group in potential_stats.iterkeys():
if parsed_key in parsed:
parsed_len = len(parsed[parsed_key])
for metric_key, parsed_idx in potential_stats[(parsed_key, metric_group)]:
if parsed_idx < parsed_len:
stats.append(
(
"xfs.%s.%s" % (metric_group, metric_key),
(timestamp, parsed[parsed_key][parsed_idx]),
)
)
return stats
def pascal_to_snake(pascal_str):
first_char = True
snake_str = ""
for char in pascal_str:
if char.isupper():
if first_char:
first_char = False
snake_str += char.lower()
else:
snake_str += "_%s" % char.lower()
else:
snake_str += char
return snake_str
def split_values(values, data):
return_list = []
if len(values.values) == 1:
# simple case
metric = _metric(values, data)
if metric:
return_list.append((metric, values.values[0]))
else:
for ds, value in zip(db[values.type], values.values):
metric = _metric(values, data, ds)
if metric:
return_list.append((metric, value))
return return_list
def _metric(values, data, ds=None):
"""
Generate a carbon metric string something along the lines of:
<node_uuid>.<plugin>.<plugin_instance>.<type>.<type_instance>.<ds>
Don't include metric parts which are empty-string. Also suppress
duplicates between plugin & plugin_instance and between type and plugin.
This is also where filtering can take place, as a false-y return will
suppress the sending of the metric.
"""
if _should_filter(values, data, ds):
return None
# Compensate for the Aggregation plugin's introduced one-interval lag.
# When Aggregation emits a data point, the value it has is *actually* for
# one "interval" ago.
if values.plugin == "aggregation":
values.time -= data["statsd_flush_interval"]
metric = ".".join(
escape_metric_segment(part)
for part in (
values.plugin,
(values.plugin_instance if values.plugin_instance != values.plugin else ""),
(values.type if values.type != values.plugin else ""),
values.type_instance,
ds,
)
if part
)
# HACK: we don't want this escaped, as it lets us use things like
# "ssman.g01" as hostnames to get all our metrics prefixed
if data["node_uuid"]:
metric = data["node_uuid"] + "." + metric
return metric
FILTER = dict(
cpu=[dict(plugin="cpu")], # Filter all cpu metrics; we aggregate 'em
memcached=[
dict(type="ps_cputime"),
dict(type="ps_count"),
dict(type="memcached_command", type_instance="flush"),
dict(type="memcached_command", type_instance="touch"),
dict(type="percent", type_instance="incr_hitratio"),
dict(type="percent", type_instance="decr_hitratio"),
dict(type="memcached_octets"),
],
df=[
dict(plugin_instance="root", type_instance="used"),
dict(type_instance="reserved"),
],
load=[dict(ds="longterm")],
processes=[
dict(type="ps_stacksize"),
dict(type="ps_pagefaults"),
dict(type="ps_data"),
dict(type="ps_code"),
dict(type="ps_state"),
# For fd count, our patched collectd also sends them as the old name
# "ps_fd_count", so here we filter out the new name; this prevents us
# having to migrate whisper data or combine 2 metric names.
dict(type="file_handles"),
# These just measure system calls doing "I/O" which could be terminals
# or sockets--which isn't as interesting to me... so we filter them.
dict(type="io_ops"),
dict(type="io_octets"),
],
ipvs=[dict(type="if_packets"), dict(type="if_octets")],
swap=[
dict(type="swap", type_instance="free"),
dict(type="swap", type_instance="cached"),
],
openvpn=[dict(type_instance="overhead"), dict(plugin_instance="UNDEF")],
interface=[dict(plugin_instance="tun0")],
)
def _should_filter(values, data, ds):
for f in FILTER.get(values.plugin, []):
should_filter = True
for k in f.keys():
if k == "ds":
if ds != f[k]:
should_filter = False
else:
if getattr(values, k) != f[k]:
should_filter = False
if should_filter:
return True
return False
# Only hook in to collectd code if running inside collectd. Otherwise it's
# really difficult to test the code in here.
if running_inside_collectd:
data = {}
collectd.register_init(init)
collectd.register_config(config, data)
collectd.register_write(write, data)
collectd.register_read(read, data=data)
|
swiftstack-collectd-plugin-main
|
collectd_plugin/collectd_plugin.py
|
# Copyright (c) 2011-2020, NVIDIA CORPORATION.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see
# <http://www.gnu.org/licenses/>.
|
swiftstack-collectd-plugin-main
|
collectd_plugin/__init__.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
abs_path = os.path.dirname(os.path.realpath(__file__))
library_dirs = [abs_path]
extra_libraries = ['wavenet_infer']
extra_includes = [abs_path]
setup(name='nv_wavenet_ext',
ext_modules=[CUDAExtension(name='nv_wavenet_ext',
sources=['wavenet_infer_wrapper.cpp'],
library_dirs=library_dirs,
runtime_library_dirs=library_dirs,
libraries=extra_libraries,
include_dirs=extra_includes)],
cmdclass={'build_ext': BuildExtension})
|
nv-wavenet-master
|
pytorch/build.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
"""
Tests that the NV-WaveNet class is producing audio
"""
import torch
from scipy.io.wavfile import write
import nv_wavenet
import utils
if __name__ == '__main__':
model = torch.load("model.pt")
wavenet = nv_wavenet.NVWaveNet(**model)
cond_input = torch.load("cond_input.pt")
samples = wavenet.infer(cond_input, nv_wavenet.Impl.PERSISTENT)[0]
audio = utils.mu_law_decode_numpy(samples.cpu().numpy(), 256)
audio = utils.MAX_WAV_VALUE * audio
wavdata = audio.astype('int16')
write('audio.wav',16000, wavdata)
|
nv-wavenet-master
|
pytorch/nv_wavenet_test.py
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import wavenet
import math
class Conv(torch.nn.Module):
"""
A convolution with the option to be causal and use xavier initialization
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
dilation=1, bias=True, w_init_gain='linear', is_causal=False):
super(Conv, self).__init__()
self.is_causal = is_causal
self.kernel_size = kernel_size
self.dilation = dilation
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
dilation=dilation, bias=bias)
torch.nn.init.xavier_uniform(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
if self.is_causal:
padding = (int((self.kernel_size - 1) * (self.dilation)), 0)
signal = torch.nn.functional.pad(signal, padding)
return self.conv(signal)
class WaveNet(torch.nn.Module):
def __init__(self, n_in_channels, n_layers, max_dilation,
n_residual_channels, n_skip_channels, n_out_channels,
n_cond_channels, upsamp_window, upsamp_stride):
super(WaveNet, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_cond_channels,
n_cond_channels,
upsamp_window,
upsamp_stride)
self.n_layers = n_layers
self.max_dilation = max_dilation
self.n_residual_channels = n_residual_channels
self.n_out_channels = n_out_channels
self.cond_layers = Conv(n_cond_channels, 2*n_residual_channels*n_layers,
w_init_gain='tanh')
self.dilate_layers = torch.nn.ModuleList()
self.res_layers = torch.nn.ModuleList()
self.skip_layers = torch.nn.ModuleList()
self.embed = torch.nn.Embedding(n_in_channels,
n_residual_channels)
self.conv_out = Conv(n_skip_channels, n_out_channels,
bias=False, w_init_gain='relu')
self.conv_end = Conv(n_out_channels, n_out_channels,
bias=False, w_init_gain='linear')
loop_factor = math.floor(math.log2(max_dilation)) + 1
for i in range(n_layers):
dilation = 2 ** (i % loop_factor)
# Kernel size is 2 in nv-wavenet
in_layer = Conv(n_residual_channels, 2*n_residual_channels,
kernel_size=2, dilation=dilation,
w_init_gain='tanh', is_causal=True)
self.dilate_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_layer = Conv(n_residual_channels, n_residual_channels,
w_init_gain='linear')
self.res_layers.append(res_layer)
skip_layer = Conv(n_residual_channels, n_skip_channels,
w_init_gain='relu')
self.skip_layers.append(skip_layer)
def forward(self, forward_input):
features = forward_input[0]
forward_input = forward_input[1]
cond_input = self.upsample(features)
assert(cond_input.size(2) >= forward_input.size(1))
if cond_input.size(2) > forward_input.size(1):
cond_input = cond_input[:, :, :forward_input.size(1)]
forward_input = self.embed(forward_input.long())
forward_input = forward_input.transpose(1, 2)
cond_acts = self.cond_layers(cond_input)
cond_acts = cond_acts.view(cond_acts.size(0), self.n_layers, -1, cond_acts.size(2))
for i in range(self.n_layers):
in_act = self.dilate_layers[i](forward_input)
in_act = in_act + cond_acts[:,i,:,:]
t_act = torch.nn.functional.tanh(in_act[:, :self.n_residual_channels, :])
s_act = torch.nn.functional.sigmoid(in_act[:, self.n_residual_channels:, :])
acts = t_act * s_act
if i < len(self.res_layers):
res_acts = self.res_layers[i](acts)
forward_input = res_acts + forward_input
if i == 0:
output = self.skip_layers[i](acts)
else:
output = self.skip_layers[i](acts) + output
output = torch.nn.functional.relu(output, True)
output = self.conv_out(output)
output = torch.nn.functional.relu(output, True)
output = self.conv_end(output)
# Remove last probabilities because they've seen all the data
last = output[:, :, -1]
last = last.unsqueeze(2)
output = output[:, :, :-1]
# Replace probability for first value with 0's because we don't know
first = last * 0.0
output = torch.cat((first, output), dim=2)
return output
def export_weights(self):
"""
Returns a dictionary with tensors ready for nv_wavenet wrapper
"""
model = {}
# We're not using a convolution to start to this does nothing
model["embedding_prev"] = torch.cuda.FloatTensor(self.n_out_channels,
self.n_residual_channels).fill_(0.0)
model["embedding_curr"] = self.embed.weight.data
model["conv_out_weight"] = self.conv_out.conv.weight.data
model["conv_end_weight"] = self.conv_end.conv.weight.data
dilate_weights = []
dilate_biases = []
for layer in self.dilate_layers:
dilate_weights.append(layer.conv.weight.data)
dilate_biases.append(layer.conv.bias.data)
model["dilate_weights"] = dilate_weights
model["dilate_biases"] = dilate_biases
model["max_dilation"] = self.max_dilation
res_weights = []
res_biases = []
for layer in self.res_layers:
res_weights.append(layer.conv.weight.data)
res_biases.append(layer.conv.bias.data)
model["res_weights"] = res_weights
model["res_biases"] = res_biases
skip_weights = []
skip_biases = []
for layer in self.skip_layers:
skip_weights.append(layer.conv.weight.data)
skip_biases.append(layer.conv.bias.data)
model["skip_weights"] = skip_weights
model["skip_biases"] = skip_biases
model["use_embed_tanh"] = False
return model
def get_cond_input(self, features):
"""
Takes in features and gets the 2*R x batch x # layers x samples tensor
"""
# TODO(rcosta): trim conv artifacts. mauybe pad spec to kernel multiple
cond_input = self.upsample(features)
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
cond_input = cond_input[:, :, :-time_cutoff]
cond_input = self.cond_layers(cond_input).data
cond_input = cond_input.view(cond_input.size(0), self.n_layers, -1, cond_input.size(2))
# This makes the data channels x batch x num_layers x samples
cond_input = cond_input.permute(2,0,1,3)
return cond_input
|
nv-wavenet-master
|
pytorch/wavenet.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import sys
import time
import subprocess
import argparse
import torch
import torch.distributed as dist
from torch.nn.modules import Module
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= num_gpus
return rt
def init_distributed(rank, num_gpus, group_name, dist_backend, dist_url):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(dist_backend, init_method=dist_url,
world_size=num_gpus, rank=rank,
group_name=group_name)
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
def apply_gradient_allreduce(module):
"""
Modifies existing model to do gradient allreduce, but doesn't change class
so you don't need "module"
"""
if not hasattr(dist, '_backend'):
module.warn_on_half = True
else:
module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
for p in module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(module.needs_reduction):
module.needs_reduction = False
buckets = {}
for param in module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if module.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
module.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(module.parameters()):
def allreduce_hook(*unused):
param._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
dir(param)
def set_needs_reduction(self, input, output):
self.needs_reduction = True
module.register_forward_hook(set_needs_reduction)
return module
def main(config, stdout_dir, args_str):
args_list = ['train.py']
args_list += args_str.split(' ') if len(args_str) > 0 else []
args_list.append('--config={}'.format(config))
num_gpus = torch.cuda.device_count()
args_list.append('--num_gpus={}'.format(num_gpus))
args_list.append("--group_name=group_{}".format(time.strftime("%Y_%m_%d-%H%M%S")))
#args_list.append(json_path)
if not os.path.isdir(stdout_dir):
os.makedirs(stdout_dir)
os.chmod(stdout_dir, 0o775)
workers = []
for i in range(num_gpus):
args_list[-2] = '--rank={}'.format(i)
stdout = None if i == 0 else open(
os.path.join(stdout_dir, "GPU_{}.log".format(i)), "w")
print(args_list)
p = subprocess.Popen([str(sys.executable)]+args_list, stdout=stdout)
workers.append(p)
for p in workers:
p.wait()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True,
help='JSON file for configuration')
parser.add_argument('-s', '--stdout_dir', type=str, default=".",
help='directory to save stoud logs')
parser.add_argument(
'-a', '--args_str', type=str, default='',
help='double quoted string with space separated key value pairs')
args = parser.parse_args()
main(args.config, args.stdout_dir, args.args_str)
|
nv-wavenet-master
|
pytorch/distributed.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import torch
import numpy as np
from scipy.io.wavfile import read
MAX_WAV_VALUE = 32768.0
def load_wav_to_torch(full_path):
"""
Loads wavdata into torch array
"""
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def files_to_list(filename):
"""
Takes a text file of filenames and makes a list of filenames
"""
with open(filename, encoding='utf-8') as f:
files = f.readlines()
files = [f.rstrip() for f in files]
return files
def load_filepaths_and_text(filename, split="|"):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
def mu_law_decode_numpy(x, mu_quantization=256):
assert(np.max(x) <= mu_quantization)
assert(np.min(x) >= 0)
mu = mu_quantization - 1.
# Map values back to [-1, 1].
signal = 2 * (x / mu) - 1
# Perform inverse of mu-law transformation.
magnitude = (1 / mu) * ((1 + mu)**np.abs(signal) - 1)
return np.sign(signal) * magnitude
def mu_law_decode(x, mu_quantization=256):
assert(torch.max(x) <= mu_quantization)
assert(torch.min(x) >= 0)
x = x.float()
mu = mu_quantization - 1.
# Map values back to [-1, 1].
signal = 2 * (x / mu) - 1
# Perform inverse of mu-law transformation.
magnitude = (1 / mu) * ((1 + mu)**torch.abs(signal) - 1)
return torch.sign(signal) * magnitude
def mu_law_encode(x, mu_quantization=256):
assert(torch.max(x) <= 1.0)
assert(torch.min(x) >= -1.0)
mu = mu_quantization - 1.
scaling = np.log1p(mu)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / scaling
encoding = ((x_mu + 1) / 2 * mu + 0.5).long()
return encoding
|
nv-wavenet-master
|
pytorch/utils.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import nv_wavenet_ext
def interleave_lists(a, b, c, d, e, f, g):
return [x for t in zip(a, b, c, d, e, f, g) for x in t]
def column_major(x):
"""
PyTorch Tensors are row major, so this just returns a contiguous transpose
"""
assert(x.is_contiguous)
if len(x.size()) == 1:
return x
if len(x.size()) == 3:
assert(x.size(2)==1)
x = torch.squeeze(x)
if len(x.size())==2:
return torch.t(x).contiguous()
if len(x.size())==4:
return x.permute(3,2,1,0).contiguous()
def enum(**enums):
return type('Enum', (), enums)
Impl = enum(AUTO=0, SINGLE_BLOCK=1, DUAL_BLOCK=2, PERSISTENT=3)
class NVWaveNet:
def __init__(self, embedding_prev,
embedding_curr,
conv_out_weight,
conv_end_weight,
dilate_weights,
dilate_biases,
max_dilation,
res_weights,
res_biases,
skip_weights,
skip_biases,
use_embed_tanh):
self.R = nv_wavenet_ext.num_res_channels()
self.S = nv_wavenet_ext.num_skip_channels()
self.A = nv_wavenet_ext.num_out_channels()
self.max_dilation = max_dilation
self.use_embed_tanh = use_embed_tanh
assert embedding_prev.size() == (self.A, self.R), \
("embedding_prev: {} doesn't match compiled"
" nv-wavenet size: {}").format(embedding_prev.size(),
(self.A, self.R))
self.embedding_prev = column_major(torch.t(embedding_prev))
assert embedding_curr.size() == (self.A, self.R), \
("embedding_curr: {} doesn't match compiled"
" nv-wavenet size: {}").format(embedding_curr.size(),
(self.A, self.R))
self.embedding_curr = column_major(torch.t(embedding_curr))
assert conv_out_weight.size()[:2] == (self.A, self.S), \
("conv_out_weight: {} doesn't match compiled"
" nv-wavenet size: {}").format(conv_out_weight.size()[:2],
(self.A, self.S))
self.conv_out = column_major(conv_out_weight)
assert conv_end_weight.size()[:2] == (self.A, self.A), \
("conv_end_weight: {} doesn't match compiled"
" nv-wavenet size: {}").format(conv_end_weight.size()[:2],
(self.A, self.A))
self.conv_end = column_major(conv_end_weight)
dilate_weights_prev = []
dilate_weights_curr = []
for weight in dilate_weights:
assert weight.size(2) == 2, \
"nv-wavenet only supports kernel_size 2"
assert weight.size()[:2] == (2*self.R, self.R), \
("dilated weight: {} doesn't match compiled"
" nv-wavenet size: {}").format(weight.size()[:2],
(2*self.R, self.R))
Wprev = column_major(weight[:,:,0])
Wcurr = column_major(weight[:,:,1])
dilate_weights_prev.append(Wprev)
dilate_weights_curr.append(Wcurr)
for bias in dilate_biases:
assert(bias.size(0) == 2*self.R)
for weight in res_weights:
assert weight.size()[:2] == (self.R, self.R), \
("residual weight: {} doesn't match compiled"
" nv-wavenet size: {}").format(weight.size()[:2],
(self.R, self.R))
for bias in res_biases:
assert(bias.size(0) == self.R), \
("residual bias: {} doesn't match compiled"
" nv-wavenet size: {}").format(bias.size(0), self.R)
for weight in skip_weights:
assert weight.size()[:2] == (self.S, self.R), \
("skip weight: {} doesn't match compiled"
" nv-wavenet size: {}").format(weight.size()[:2],
(self.S, self.R))
for bias in skip_biases:
assert(bias.size(0) == self.S), \
("skip bias: {} doesn't match compiled"
" nv-wavenet size: {}").format(bias.size(0), self.S)
dilate_biases = [column_major(bias) for bias in dilate_biases]
res_weights = [column_major(weight) for weight in res_weights]
res_biases = [column_major(bias) for bias in res_biases]
skip_weights = [column_major(weight) for weight in skip_weights]
skip_biases = [column_major(bias) for bias in skip_biases]
# There's an extra residual layer that's not used
res_weights.append(torch.zeros(self.R,self.R))
res_biases.append(torch.zeros(self.R))
assert(len(res_biases)==len(skip_biases) and
len(res_biases)==len(dilate_biases) and
len(res_weights)==len(skip_weights) and
len(res_weights)==len(dilate_weights)), \
"""Number of layers is inconsistent for different parameter types.
The list sizes should be the same for skip weights/biases and
dilate weights/biases. Additionally the residual weights/biases
lists should be one shorter. But their sizes are:
len(dilate_weights) = {}
len(dilale_biases) = {}
len(skip_weights) = {}
len(skip_biases) = {}
len(res_weights) = {}
len(res_biases) = {}""".format(len(dilate_weights),
len(dilate_biases),
len(skip_weights),
len(skip_biases),
len(res_weights)-1,
len(res_biases)-1)
self.num_layers = len(res_biases)
self.layers = interleave_lists(dilate_weights_prev,
dilate_weights_curr,
dilate_biases,
res_weights,
res_biases,
skip_weights,
skip_biases)
def infer(self, cond_input, implementation):
# cond_input is channels x batch x num_layers x samples
assert(cond_input.size()[0:3:2] == (2*self.R, self.num_layers)), \
"""Inputs are channels x batch x num_layers x samples.
Channels and num_layers should be sizes: {}
But input is: {}""".format((2*self.R, self.num_layers),
cond_input.size()[0:3:2])
batch_size = cond_input.size(1)
sample_count = cond_input.size(3)
cond_input = column_major(cond_input)
samples = torch.cuda.IntTensor(batch_size, sample_count)
nv_wavenet_ext.infer(samples,
sample_count,
batch_size,
self.embedding_prev,
self.embedding_curr,
self.conv_out,
self.conv_end,
cond_input,
self.num_layers,
self.use_embed_tanh,
self.max_dilation,
implementation,
self.layers)
return samples
|
nv-wavenet-master
|
pytorch/nv_wavenet.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import json
import os
import time
import torch
#=====START: ADDED FOR DISTRIBUTED======
from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
from torch.utils.data.distributed import DistributedSampler
#=====END: ADDED FOR DISTRIBUTED======
from torch.utils.data import DataLoader
from wavenet import WaveNet
from mel2samp_onehot import Mel2SampOnehot
from utils import to_gpu
class CrossEntropyLoss(torch.nn.Module):
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.num_classes = wavenet_config["n_out_channels"]
def forward(self, inputs, targets):
"""
inputs are batch by num_classes by sample
targets are batch by sample
torch CrossEntropyLoss needs
input = batch * samples by num_classes
targets = batch * samples
"""
targets = targets.view(-1)
inputs = inputs.transpose(1, 2)
inputs = inputs.contiguous()
inputs = inputs.view(-1, self.num_classes)
return torch.nn.CrossEntropyLoss()(inputs, targets)
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
optimizer.load_state_dict(checkpoint_dict['optimizer'])
model_for_loading = checkpoint_dict['model']
model.load_state_dict(model_for_loading.state_dict())
print("Loaded checkpoint '{}' (iteration {})" .format(
checkpoint_path, iteration))
return model, optimizer, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
model_for_saving = WaveNet(**wavenet_config).cuda()
model_for_saving.load_state_dict(model.state_dict())
torch.save({'model': model_for_saving,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def train(num_gpus, rank, group_name, output_directory, epochs, learning_rate,
iters_per_checkpoint, batch_size, seed, checkpoint_path):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
init_distributed(rank, num_gpus, group_name, **dist_config)
#=====END: ADDED FOR DISTRIBUTED======
criterion = CrossEntropyLoss()
model = WaveNet(**wavenet_config).cuda()
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
model = apply_gradient_allreduce(model)
#=====END: ADDED FOR DISTRIBUTED======
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Load checkpoint if one exists
iteration = 0
if checkpoint_path != "":
model, optimizer, iteration = load_checkpoint(checkpoint_path, model,
optimizer)
iteration += 1 # next iteration is iteration + 1
trainset = Mel2SampOnehot(**data_config)
# =====START: ADDED FOR DISTRIBUTED======
train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None
# =====END: ADDED FOR DISTRIBUTED======
train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
sampler=train_sampler,
batch_size=batch_size,
pin_memory=False,
drop_last=True)
# Get shared output_directory ready
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
print("output directory", output_directory)
model.train()
epoch_offset = max(0, int(iteration / len(train_loader)))
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
model.zero_grad()
x, y = batch
x = to_gpu(x).float()
y = to_gpu(y)
x = (x, y) # auto-regressive takes outputs as inputs
y_pred = model(x)
loss = criterion(y_pred, y)
if num_gpus > 1:
reduced_loss = reduce_tensor(loss.data, num_gpus)[0]
else:
reduced_loss = loss.data[0]
loss.backward()
optimizer.step()
print("{}:\t{:.9f}".format(iteration, reduced_loss))
if (iteration % iters_per_checkpoint == 0):
if rank == 0:
checkpoint_path = "{}/wavenet_{}".format(
output_directory, iteration)
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-r', '--rank', type=int, default=0,
help='rank of process for distributed')
parser.add_argument('-g', '--group_name', type=str, default='',
help='name of group for distributed')
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
config = json.loads(data)
train_config = config["train_config"]
global data_config
data_config = config["data_config"]
global dist_config
dist_config = config["dist_config"]
global wavenet_config
wavenet_config = config["wavenet_config"]
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
if args.group_name == '':
print("WARNING: Multiple GPUs detected but no distributed group set")
print("Only running 1 GPU. Use distributed.py for multiple GPUs")
num_gpus = 1
if num_gpus == 1 and args.rank != 0:
raise Exception("Doing single GPU training on rank > 0")
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
train(num_gpus, args.rank, args.group_name, **train_config)
|
nv-wavenet-master
|
pytorch/train.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
from scipy.io.wavfile import write
import torch
import nv_wavenet
import utils
def chunker(seq, size):
"""
https://stackoverflow.com/a/434328
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def main(mel_files, model_filename, output_dir, batch_size, implementation):
mel_files = utils.files_to_list(mel_files)
model = torch.load(model_filename)['model']
wavenet = nv_wavenet.NVWaveNet(**(model.export_weights()))
for files in chunker(mel_files, batch_size):
mels = []
for file_path in files:
print(file_path)
mel = torch.load(file_path)
mel = utils.to_gpu(mel)
mels.append(torch.unsqueeze(mel, 0))
cond_input = model.get_cond_input(torch.cat(mels, 0))
audio_data = wavenet.infer(cond_input, implementation)
for i, file_path in enumerate(files):
file_name = os.path.splitext(os.path.basename(file_path))[0]
audio = utils.mu_law_decode_numpy(audio_data[i,:].cpu().numpy(), wavenet.A)
audio = utils.MAX_WAV_VALUE * audio
wavdata = audio.astype('int16')
write("{}/{}.wav".format(output_dir, file_name),
16000, wavdata)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--filelist_path", required=True)
parser.add_argument('-c', "--checkpoint_path", required=True)
parser.add_argument('-o', "--output_dir", required=True)
parser.add_argument('-b', "--batch_size", default=1)
parser.add_argument('-i', "--implementation", type=str, default="persistent",
help="""Which implementation of NV-WaveNet to use.
Takes values of single, dual, or persistent""" )
args = parser.parse_args()
if args.implementation == "auto":
implementation = nv_wavenet.Impl.AUTO
elif args.implementation == "single":
implementation = nv_wavenet.Impl.SINGLE_BLOCK
elif args.implementation == "dual":
implementation = nv_wavenet.Impl.DUAL_BLOCK
elif args.implementation == "persistent":
implementation = nv_wavenet.Impl.PERSISTENT
else:
raise ValueError("implementation must be one of auto, single, dual, or persistent")
main(args.filelist_path, args.checkpoint_path, args.output_dir, args.batch_size, implementation)
|
nv-wavenet-master
|
pytorch/inference.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
"""
Generating pairs of mel-spectrograms and original audio
"""
import argparse
import json
import os
import random
import torch
import torch.utils.data
import sys
import utils
# We're using the audio processing from TacoTron2 to make sure it matches
sys.path.insert(0, 'tacotron2')
from tacotron2.layers import TacotronSTFT
class Mel2SampOnehot(torch.utils.data.Dataset):
"""
This is the main class that calculates the spectrogram and returns the
spectrogram, audio pair.
"""
def __init__(self, training_files, segment_length, mu_quantization,
filter_length, hop_length, win_length, sampling_rate):
audio_files = utils.files_to_list(training_files)
self.audio_files = audio_files
random.seed(1234)
random.shuffle(self.audio_files)
self.stft = TacotronSTFT(filter_length=filter_length,
hop_length=hop_length,
win_length=win_length,
sampling_rate=sampling_rate)
self.segment_length = segment_length
self.mu_quantization = mu_quantization
self.sampling_rate = sampling_rate
def get_mel(self, audio):
audio_norm = audio / utils.MAX_WAV_VALUE
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
return melspec
def __getitem__(self, index):
# Read audio
filename = self.audio_files[index]
audio, sampling_rate = utils.load_wav_to_torch(filename)
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:audio_start+self.segment_length]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_length - audio.size(0)), 'constant').data
mel = self.get_mel(audio)
audio = utils.mu_law_encode(audio / utils.MAX_WAV_VALUE, self.mu_quantization)
return (mel, audio)
def __len__(self):
return len(self.audio_files)
if __name__ == "__main__":
"""
Turns audio files into mel-spectrogram representations for inference
Uses the data portion of the config for audio processing parameters,
but ignores training files and segment lengths.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-a', "--audio_list", required=True, type=str,
help='File containing list of wavefiles')
parser.add_argument('-o', "--output_dir", required=True, type=str,
help='Directory to put Mel-Spectrogram Tensors')
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
args = parser.parse_args()
filepaths = utils.files_to_list(args.audio_list)
# Make directory if it doesn't exist
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
os.chmod(args.output_dir, 0o775)
# Parse config. Only using data processing
with open(args.config) as f:
data = f.read()
config = json.loads(data)
data_config = config["data_config"]
mel_factory = Mel2SampOnehot(**data_config)
for filepath in filepaths:
audio, sampling_rate = utils.load_wav_to_torch(filepath)
assert(sampling_rate == mel_factory.sampling_rate)
melspectrogram = mel_factory.get_mel(audio)
filename = os.path.basename(filepath)
new_filepath = args.output_dir + '/' + filename + '.pt'
print(new_filepath)
torch.save(melspectrogram, new_filepath)
|
nv-wavenet-master
|
pytorch/mel2samp_onehot.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
"""
Tests that the NV-WaveNet class is producing audio
"""
import torch
from scipy.io.wavfile import write
import nv_wavenet
from wavenet import WaveNet
import utils
import json
if __name__ == '__main__':
config = json.loads(open('config.json').read())
wavenet_config = config["wavenet_config"]
model = WaveNet(**wavenet_config).cuda()
weights = model.export_weights()
wavenet = nv_wavenet.NVWaveNet(**weights)
num_samples = 10*1000
batch_size = config['train_config']['batch_size']
cond_input = torch.zeros([2 * wavenet_config['n_residual_channels'], batch_size, wavenet_config['n_layers'], num_samples]).cuda()
samples = wavenet.infer(cond_input, nv_wavenet.Impl.PERSISTENT)[0]
audio = utils.mu_law_decode_numpy(samples.cpu().numpy(), 256)
audio = utils.MAX_WAV_VALUE * audio
wavdata = audio.astype('int16')
write('audio.wav',16000, wavdata)
|
nv-wavenet-master
|
pytorch/integration_test.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from setuptools import setup
classifiers = ['Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Jetson.GPIO',
version = '2.1.3',
author = 'NVIDIA',
author_email = 'linux-tegra-bugs@nvidia.com',
description = 'A module to control Jetson GPIO channels',
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
license = 'MIT',
keywords = 'Jetson GPIO',
url = 'https://github.com/NVIDIA/jetson-gpio',
classifiers = classifiers,
package_dir = {'': 'lib/python/'},
packages = ['Jetson', 'Jetson.GPIO', 'RPi', 'RPi.GPIO'],
package_data = {'Jetson.GPIO': ['99-gpio.rules',]},
include_package_data = True,
)
|
jetson-gpio-master
|
setup.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import threading
import time
import warnings
import RPi.GPIO as GPIO
# If a board has PWM support, the PWM tests expect 'out_a' to be PWM-capable.
pin_datas = {
'JETSON_ORIN_NANO': {
# Pre-test configuration, if boot-time pinmux doesn't set up PWM pins:
# Set BOARD pin 15 as mux function PWM:
# busybox devmem 0x02440020 32 0x400
# Set BOARD pin 33 as mux function PWM:
# busybox devmem 0x02434040 32 0x401
# Board mode pins
'out_a': 33,
'in_a': 19,
'out_b': 11,
'in_b': 13,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'GPIO09',
'tegra_soc_pin': 'GP167',
'all_pwms': (15, 33),
},
'JETSON_ORIN_NX': {
# Pre-test configuration, if boot-time pinmux doesn't set up PWM pins:
# Set BOARD pin 15 as mux function PWM:
# busybox devmem 0x02440020 32 0x400
# Set BOARD pin 33 as mux function PWM:
# busybox devmem 0x02434040 32 0x401
# Board mode pins
'out_a': 33,
'in_a': 19,
'out_b': 11,
'in_b': 13,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'GPIO09',
'tegra_soc_pin': 'GP167',
'all_pwms': (15, 33),
},
'JETSON_ORIN': {
# Pre-test configuration, if boot-time pinmux doesn't set up PWM pins:
# Set BOARD pin 15 as mux function PWM:
# busybox devmem 0x02440020 32 0x400
# Set BOARD pin 18 as mux function PWM:
# busybox devmem 0x02434040 32 0x401
# Board mode pins
'out_a': 18,
'in_a': 19,
'out_b': 11,
'in_b': 13,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'MCLK05',
'tegra_soc_pin': 'GP66',
'all_pwms': (15, 18),
},
'JETSON_XAVIER': {
# Pre-test configuration, if boot-time pinmux doesn't set up PWM pins:
# Set BOARD pin 18 as mux function PWM:
# busybox devmem 0x2434090 32 0x401
# Board mode pins
'out_a': 18,
'in_a': 19,
'out_b': 21,
'in_b': 22,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'MCLK05',
'tegra_soc_pin': 'SOC_GPIO42',
'all_pwms': (13, 15, 18),
},
'JETSON_TX2': {
# Board mode pins
'out_a': 18,
'in_a': 19,
'out_b': 21,
'in_b': 22,
'unimplemented_pins': (26,),
# Other pin modes:
'cvm_pin': 'AUDIO_MCLK',
'tegra_soc_pin': 'AUD_MCLK',
},
'JETSON_TX1': {
# Board mode pins
'out_a': 18,
'in_a': 19,
'out_b': 21,
'in_b': 22,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'AUDIO_MCLK',
'tegra_soc_pin': 'AUD_MCLK',
},
'JETSON_NANO': {
# Pre-test configuration, if boot-time pinmux doesn't set up PWM pins:
# Set BOARD pin 32 as mux function PWM (set bits 1:0 to 1 not 3):
# sudo busybox devmem 0x700031fc 32 0x45
# Set BOARD pin 32 as SFIO (clear bit 0):
# sudo busybox devmem 0x6000d504 32 0x2
# Board mode pins
'out_a': 32,
'in_a': 31,
'out_b': 29,
'in_b': 26,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'GPIO9',
'tegra_soc_pin': 'AUD_MCLK',
'all_pwms': (32, 33),
},
'JETSON_NX': {
# Pre-test configuration, if boot-time pinmux doesn't set up PWM pins:
# Set BOARD pin 32 as mux function PWM (func 1):
# busybox devmem 0x2430040 32 0x401
# Set BOARD pin 33 as mux function PWM (func 2):
# busybox devmem 0x2440020 32 0x402
# Board mode pins
'out_a': 32,
'in_a': 31,
'out_b': 29,
'in_b': 26,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'GPIO09',
'tegra_soc_pin': 'AUD_MCLK',
'all_pwms': (15, 32, 33),
},
'CLARA_AGX_XAVIER': {
# Pre-test configuration, if boot-time pinmux doesn't set up PWM pins:
# Set BOARD pin 18 as mux function PWM:
# busybox devmem 0x2434090 32 0x401
# Board mode pins
'out_a': 18,
'in_a': 19,
'out_b': 21,
'in_b': 22,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'MCLK05',
'tegra_soc_pin': 'SOC_GPIO42',
'all_pwms': (15, 18),
},
'JETSON_TX2_NX': {
# Pre-test configuration, if boot-time pinmux doesn't set up PWM pins:
# Set BOARD pin 33 as mux function PWM (func 1):
# busybox devmem 0x0c3010a8 32 0x401
# Set BOARD pin 32 as mux function PWM (func 2):
# busybox devmem 0x0c301080 32 0x401
# Board mode pins
'out_a': 32,
'in_a': 31,
'out_b': 29,
'in_b': 26,
'unimplemented_pins': (),
# Other pin modes:
'cvm_pin': 'GPIO09',
'tegra_soc_pin': 'AUD_MCLK',
'all_pwms': (32, 33),
},
}
pin_data = pin_datas.get(GPIO.model)
# Board mode
all_board_pins = (7, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29, 31,
32, 33, 35, 36, 37, 38, 40,)
bcm_pin = 4
tests = []
def test(f):
tests.append(f)
return f
def pwmtest(f):
if pin_data.get('all_pwms', None):
tests.append(f)
return f
# Tests of:
# def setwarnings(state):
@test
def test_warnings_off():
GPIO.setwarnings(False)
with warnings.catch_warnings(record=True) as w:
# cleanup() warns if no GPIOs were set up
GPIO.cleanup()
if len(w):
raise Exception("Unexpected warning occured")
@test
def test_warnings_on():
GPIO.setwarnings(True)
with warnings.catch_warnings(record=True) as w:
# cleanup() warns if no GPIOs were set up
GPIO.cleanup()
if not len(w):
raise Exception("Expected warning did not occur")
# Tests of:
# def setmode(mode):
# def getmode():
# def setup(channels, direction, pull_up_down=PUD_OFF, initial=None):
@test
def test_setup_one_board():
GPIO.setmode(GPIO.BOARD)
assert GPIO.getmode() == GPIO.BOARD
GPIO.setup(pin_data['in_a'], GPIO.IN)
GPIO.cleanup()
assert GPIO.getmode() is None
@test
def test_setup_one_bcm():
GPIO.setmode(GPIO.BCM)
assert GPIO.getmode() == GPIO.BCM
GPIO.setup(bcm_pin, GPIO.IN)
GPIO.cleanup()
assert GPIO.getmode() is None
@test
def test_setup_one_cvm():
GPIO.setmode(GPIO.CVM)
assert GPIO.getmode() == GPIO.CVM
GPIO.setup(pin_data['cvm_pin'], GPIO.IN)
GPIO.cleanup()
assert GPIO.getmode() is None
@test
def test_setup_one_tegra_soc():
GPIO.setmode(GPIO.TEGRA_SOC)
assert GPIO.getmode() == GPIO.TEGRA_SOC
GPIO.setup(pin_data['tegra_soc_pin'], GPIO.IN)
GPIO.cleanup()
assert GPIO.getmode() is None
@test
def test_setup_twice():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(pin_data['in_a'], GPIO.IN)
GPIO.setup(pin_data['in_a'], GPIO.IN)
val = GPIO.input(pin_data['in_a'])
assert(val == GPIO.HIGH)
GPIO.cleanup()
@test
def test_setup_one_out_no_init():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT)
GPIO.cleanup()
@test
def test_setup_one_out_high():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
GPIO.cleanup()
@test
def test_setup_one_out_low():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.LOW)
GPIO.cleanup()
@test
def test_setup_many_out_no_init():
GPIO.setmode(GPIO.BOARD)
GPIO.setup((pin_data['out_a'], pin_data['out_b']), GPIO.OUT)
GPIO.cleanup()
@test
def test_setup_many_out_one_init():
GPIO.setmode(GPIO.BOARD)
GPIO.setup((pin_data['out_a'], pin_data['out_b']), GPIO.OUT,
initial=GPIO.HIGH)
GPIO.cleanup()
@test
def test_setup_many_out_many_init():
GPIO.setmode(GPIO.BOARD)
GPIO.setup((pin_data['out_a'], pin_data['out_b']), GPIO.OUT,
initial=(GPIO.HIGH, GPIO.HIGH))
GPIO.cleanup()
@test
def test_setup_one_in():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['in_a'], GPIO.IN)
GPIO.cleanup()
@test
def test_setup_one_in_pull():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['in_a'], GPIO.IN, GPIO.PUD_OFF)
GPIO.cleanup()
@test
def test_setup_many_in():
GPIO.setmode(GPIO.BOARD)
GPIO.setup((pin_data['in_a'], pin_data['in_b']), GPIO.IN)
GPIO.cleanup()
@test
def test_setup_all():
GPIO.setmode(GPIO.BOARD)
for pin in all_board_pins:
if pin in pin_data['unimplemented_pins']:
continue
GPIO.setup(pin, GPIO.IN)
GPIO.cleanup()
# Tests of:
# def cleanup(channel=None):
# def getmode():
@test
def test_cleanup_one():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['in_a'], GPIO.IN)
GPIO.cleanup(pin_data['in_a'])
assert GPIO.getmode() == GPIO.BOARD
GPIO.cleanup()
assert GPIO.getmode() is None
@test
def test_cleanup_many():
GPIO.setmode(GPIO.BOARD)
GPIO.setup((pin_data['in_a'], pin_data['in_b']), GPIO.IN)
GPIO.cleanup((pin_data['in_a'], pin_data['in_b']))
assert GPIO.getmode() == GPIO.BOARD
GPIO.cleanup()
assert GPIO.getmode() is None
@test
def test_cleanup_all():
GPIO.setmode(GPIO.BOARD)
GPIO.setup((pin_data['in_a'], pin_data['in_b']), GPIO.IN)
GPIO.cleanup()
assert GPIO.getmode() is None
# Tests of:
# def input(channel):
@test
def test_input():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['in_a'], GPIO.IN)
GPIO.input(pin_data['in_a'])
GPIO.cleanup()
# Tests of:
# def output(channels, values):
@test
def test_output_one():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT)
GPIO.output(pin_data['out_a'], GPIO.HIGH)
GPIO.output(pin_data['out_a'], GPIO.LOW)
GPIO.cleanup()
@test
def test_output_many_one_value():
GPIO.setmode(GPIO.BOARD)
GPIO.setup((pin_data['out_a'], pin_data['out_b']), GPIO.OUT)
GPIO.output((pin_data['out_a'], pin_data['out_b']), GPIO.HIGH)
GPIO.output((pin_data['out_a'], pin_data['out_b']), GPIO.LOW)
GPIO.cleanup()
@test
def test_output_many_many_value():
GPIO.setmode(GPIO.BOARD)
GPIO.setup((pin_data['out_a'], pin_data['out_b']), GPIO.OUT)
GPIO.output((pin_data['out_a'], pin_data['out_b']), (GPIO.HIGH, GPIO.LOW))
GPIO.output((pin_data['out_a'], pin_data['out_b']), (GPIO.LOW, GPIO.HIGH))
GPIO.cleanup()
# Tests of combined (looped back) output/input
@test
def test_out_in_init_high():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(pin_data['in_a'], GPIO.IN)
val = GPIO.input(pin_data['in_a'])
assert(val == GPIO.HIGH)
GPIO.output(pin_data['out_a'], GPIO.LOW)
val = GPIO.input(pin_data['in_a'])
assert(val == GPIO.LOW)
GPIO.output(pin_data['out_a'], GPIO.HIGH)
val = GPIO.input(pin_data['in_a'])
assert(val == GPIO.HIGH)
GPIO.cleanup()
@test
def test_out_in_init_low():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(pin_data['in_a'], GPIO.IN)
val = GPIO.input(pin_data['in_a'])
assert(val == GPIO.LOW)
GPIO.output(pin_data['out_a'], GPIO.HIGH)
val = GPIO.input(pin_data['in_a'])
assert(val == GPIO.HIGH)
GPIO.output(pin_data['out_a'], GPIO.LOW)
val = GPIO.input(pin_data['in_a'])
assert(val == GPIO.LOW)
GPIO.cleanup()
# Tests of:
# def gpio_function(channel):
@test
def test_gpio_function_unexported():
GPIO.setmode(GPIO.BOARD)
val = GPIO.gpio_function(pin_data['in_a'])
assert val == GPIO.UNKNOWN
GPIO.cleanup()
@test
def test_gpio_function_in():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['in_a'], GPIO.IN)
val = GPIO.gpio_function(pin_data['in_a'])
assert val == GPIO.IN
GPIO.cleanup()
@test
def test_gpio_function_out():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT)
val = GPIO.gpio_function(pin_data['out_a'])
assert val == GPIO.OUT
GPIO.cleanup()
# Tests of:
# def wait_for_edge(channel, edge, bouncetime=None, timeout=None):
@test
def test_wait_for_edge_timeout():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(pin_data['in_a'], GPIO.IN)
val = GPIO.wait_for_edge(pin_data['in_a'], GPIO.BOTH, timeout=100)
assert val is None
GPIO.cleanup()
class DelayedSetChannel(threading.Thread):
def __init__(self, channel, value, delay):
super(DelayedSetChannel, self).__init__()
self.channel = channel
self.value = value
self.delay = delay
def run(self):
time.sleep(self.delay)
GPIO.output(self.channel, self.value)
@test
def test_wait_for_edge_rising():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(pin_data['in_a'], GPIO.IN)
dsc = DelayedSetChannel(pin_data['out_a'], GPIO.HIGH, 0.5)
dsc.start()
val = GPIO.wait_for_edge(pin_data['in_a'], GPIO.RISING, timeout=1000)
dsc.join()
assert val == pin_data['in_a']
GPIO.cleanup()
@test
def test_wait_for_edge_falling():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(pin_data['in_a'], GPIO.IN)
dsc = DelayedSetChannel(pin_data['out_a'], GPIO.LOW, 0.5)
dsc.start()
val = GPIO.wait_for_edge(pin_data['in_a'], GPIO.FALLING, timeout=1000)
dsc.join()
assert val == pin_data['in_a']
GPIO.cleanup()
# Tests of:
# def add_event_detect(channel, edge, callback=None, bouncetime=None):
# def event_detected(channel):
# def add_event_callback(channel, callback):
# def remove_event_detect(channel):
def _test_events(init, edge, tests, specify_callback, use_add_callback):
global event_callback_occurred
event_callback_occurred = False
# This is set as 0.5 sec delay because default remove event time is 0.5
time.sleep(0.5)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=init)
GPIO.setup(pin_data['in_a'], GPIO.IN)
def callback(channel):
global event_callback_occurred
if channel != pin_data['in_a']:
return
event_callback_occurred = True
def get_saw_event():
global event_callback_occurred
if specify_callback or use_add_callback:
val = event_callback_occurred
event_callback_occurred = False
return val
else:
return GPIO.event_detected(pin_data['in_a'])
if specify_callback:
args = {'callback': callback, 'polltime': 0.2}
else:
args = {'polltime': 0.2}
# After every pin state change, it is suggested to leave a time for the
# pin to setup itself. Here, we are using 0.2 seconds to make sure the pin
# state is stabilize. (Same reason as the following wait time after setting
# the output pin)
time.sleep(0.2)
# By default, the poll time is also 0.2 seconds. the poll time should be set
# to a large enough number to ensure the efficiency of thread, but also small
# enough so that it can respond to the event removal as fast as possible.
GPIO.add_event_detect(pin_data['in_a'], edge, **args)
if use_add_callback:
GPIO.add_event_callback(pin_data['in_a'], callback)
assert not get_saw_event()
for output, event_expected in tests:
GPIO.output(pin_data['out_a'], output)
time.sleep(0.2)
assert get_saw_event() == event_expected
assert not get_saw_event()
# By default, the timeout for removal is also 0.5 seconds
# The removal time should always be longer than the polltime, and it is
# suggested to be two times greater. Thus, in this example, as the poll
# time is set to 0.2, the timeout must be greater than 0.4.
GPIO.remove_event_detect(pin_data['in_a'], timeout=0.5)
GPIO.cleanup()
@test
def test_event_detected_falling():
_test_events(
GPIO.HIGH,
GPIO.FALLING,
(
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
),
False,
False
)
_test_events(
GPIO.LOW,
GPIO.FALLING,
(
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
),
True,
False
)
@test
def test_event_detected_rising():
_test_events(
GPIO.HIGH,
GPIO.RISING,
(
(GPIO.LOW, False),
(GPIO.HIGH, True),
(GPIO.LOW, False),
(GPIO.HIGH, True),
),
False,
False
)
_test_events(
GPIO.LOW,
GPIO.RISING,
(
(GPIO.HIGH, True),
(GPIO.LOW, False),
(GPIO.HIGH, True),
(GPIO.LOW, False),
),
True,
False
)
@test
def test_event_detected_both():
_test_events(
GPIO.HIGH,
GPIO.BOTH,
(
(GPIO.LOW, True),
(GPIO.HIGH, True),
(GPIO.LOW, True),
(GPIO.HIGH, True),
),
False,
False
)
_test_events(
GPIO.LOW,
GPIO.BOTH,
(
(GPIO.HIGH, True),
(GPIO.LOW, True),
(GPIO.HIGH, True),
(GPIO.LOW, True),
),
False,
True
)
# Tests of multiple:
# def add_event_callback(channel, callback):
def _test_callbacks(init, edge, tests, specify_callback, use_add_callback):
global event_callback_occurred
global event_callback_occurred_2
event_callback_occurred = False
event_callback_occurred_2 = False
# This is set as 0.5 sec delay because default remove event time is 0.5
time.sleep(0.5)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=init)
GPIO.setup(pin_data['in_a'], GPIO.IN)
def callback(channel):
global event_callback_occurred
if channel != pin_data['in_a']:
return
event_callback_occurred = True
def callback2(channel):
global event_callback_occurred_2
if channel != pin_data['in_a']:
return
event_callback_occurred_2 = True
# return true if every event has been detected
def get_saw_event():
global event_callback_occurred
global event_callback_occurred_2
if specify_callback:
val = event_callback_occurred
event_callback_occurred = False
return val
elif use_add_callback:
val = event_callback_occurred and event_callback_occurred_2
event_callback_occurred = False
event_callback_occurred_2 = False
return val
else:
return GPIO.event_detected(pin_data['in_a'])
# return true if every event has not been detected
# if any one event occurs, it will return false
def not_get_saw_event():
global event_callback_occurred
global event_callback_occurred_2
if specify_callback:
val = event_callback_occurred
event_callback_occurred = False
return not val
elif use_add_callback:
val = event_callback_occurred or event_callback_occurred_2
event_callback_occurred = False
event_callback_occurred_2 = False
return not val
else:
return not GPIO.event_detected(pin_data['in_a'])
if specify_callback:
args = {'callback': callback, 'polltime': 0.2}
else:
args = {'polltime': 0.2}
time.sleep(0.2)
GPIO.add_event_detect(pin_data['in_a'], edge, **args)
if use_add_callback:
# adding double callback functions
GPIO.add_event_callback(pin_data['in_a'], callback)
GPIO.add_event_callback(pin_data['in_a'], callback2)
assert not_get_saw_event()
for output, event_expected in tests:
GPIO.output(pin_data['out_a'], output)
time.sleep(0.2)
assert get_saw_event() == event_expected
assert not_get_saw_event()
GPIO.remove_event_detect(pin_data['in_a'], timeout=0.5)
GPIO.cleanup()
@test
def test_event_callbacks():
_test_callbacks(
GPIO.HIGH,
GPIO.FALLING,
(
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
),
False,
True
)
_test_callbacks(
GPIO.LOW,
GPIO.FALLING,
(
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
),
True,
False
)
_test_callbacks(
GPIO.LOW,
GPIO.FALLING,
(
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
),
False,
True
)
# Tests of multiple events at a time:
def _test_multi_events(init_a, edge_a, tests_a, init_b, edge_b, tests_b, specify_callback):
event_dict = { 'series_a': {'in_pin_name': 'in_a',
'out_pin_name': 'out_a',
'event_callback_occurred': False,
'init': init_a,
'edge': edge_a,
'tests': tests_a},
'series_b': {'in_pin_name': 'in_b',
'out_pin_name': 'out_b',
'event_callback_occurred': False,
'init': init_b,
'edge': edge_b,
'tests': tests_b}}
test_number = len(tests_a)
# internal functions
def callback(channel):
for pin_series in event_dict.keys():
input_pin_name = event_dict[pin_series]['in_pin_name']
if channel == pin_data[input_pin_name]:
event_dict[pin_series]['event_callback_occurred'] = True
return
def get_saw_event(series_name):
if specify_callback:
val = event_dict[series_name]['event_callback_occurred']
event_dict[series_name]['event_callback_occurred'] = False
return val
else:
pin_name = event_dict[series_name]['in_pin_name']
return GPIO.event_detected(pin_data[pin_name])
# setup
time.sleep(0.5)
GPIO.setmode(GPIO.BOARD)
for pin_series in event_dict.keys():
input_pin_name = event_dict[pin_series]['in_pin_name']
output_pin_name = event_dict[pin_series]['out_pin_name']
GPIO.setup(pin_data[output_pin_name], GPIO.OUT, initial=event_dict[pin_series]['init'])
GPIO.setup(pin_data[input_pin_name], GPIO.IN)
if specify_callback:
args = {'callback': callback, 'polltime': 0.2}
else:
args = {'polltime': 0.2}
time.sleep(0.2)
GPIO.add_event_detect(pin_data[input_pin_name], event_dict[pin_series]['edge'], **args)
assert not get_saw_event(pin_series)
# test edges
index=0
while index < test_number:
for pin_series in event_dict.keys():
output_pin_name = event_dict[pin_series]['out_pin_name']
input_pin_name = event_dict[pin_series]['in_pin_name']
pin_tests = event_dict[pin_series]['tests']
output, event_expected = pin_tests[index]
GPIO.output(pin_data[output_pin_name], output)
time.sleep(0.2)
assert get_saw_event(pin_series) == event_expected
assert not get_saw_event(pin_series)
index += 1
# cleanup
for pin_series in event_dict.keys():
input_pin_name = event_dict[pin_series]['in_pin_name']
GPIO.remove_event_detect(pin_data[input_pin_name], timeout=0.5)
GPIO.cleanup()
@test
def test_multi_events_detected_diff_edge():
_test_multi_events(
# series a
GPIO.HIGH,
GPIO.FALLING,
(
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
),
# series b
GPIO.HIGH,
GPIO.RISING,
(
(GPIO.LOW, False),
(GPIO.HIGH, True),
(GPIO.LOW, False),
(GPIO.HIGH, True),
),
False
)
_test_multi_events(
GPIO.LOW,
GPIO.FALLING,
(
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
),
GPIO.LOW,
GPIO.RISING,
(
(GPIO.HIGH, True),
(GPIO.LOW, False),
(GPIO.HIGH, True),
(GPIO.LOW, False),
),
True
)
@test
def test_multi_events_detected_same_edge():
_test_multi_events(
# series a
GPIO.HIGH,
GPIO.FALLING,
(
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
),
# series b
GPIO.HIGH,
GPIO.FALLING,
(
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
),
False
)
_test_multi_events(
GPIO.LOW,
GPIO.FALLING,
(
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
),
GPIO.HIGH,
GPIO.FALLING,
(
(GPIO.LOW, True),
(GPIO.HIGH, False),
(GPIO.LOW, True),
(GPIO.HIGH, False),
),
True
)
# Tests of class PWM
@pwmtest
def test_pwm_multi_duty():
for pct in (25, 50, 75):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['in_a'], GPIO.IN)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
p = GPIO.PWM(pin_data['out_a'], 500)
p.start(pct)
count = 0
for i in range(1000):
count += GPIO.input(pin_data['in_a'])
p.stop()
del p
min_ct = 10 * (pct - 5)
max_ct = 10 * (pct + 5)
assert min_ct <= count <= max_ct
GPIO.cleanup()
@pwmtest
def test_pwm_change_frequency():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
p = GPIO.PWM(pin_data['out_a'], 500)
p.start(50)
p.ChangeFrequency(550)
p.stop()
del p
GPIO.cleanup()
@pwmtest
def test_pwm_change_duty_cycle():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
p = GPIO.PWM(pin_data['out_a'], 500)
p.start(50)
p.ChangeDutyCycle(60)
p.stop()
del p
GPIO.cleanup()
@pwmtest
def test_pwm_cleanup_none():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
p = GPIO.PWM(pin_data['out_a'], 500)
p.start(50)
GPIO.cleanup()
@pwmtest
def test_pwm_cleanup_stop():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
p = GPIO.PWM(pin_data['out_a'], 500)
p.start(50)
p.stop()
GPIO.cleanup()
@pwmtest
def test_pwm_cleanup_del():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin_data['out_a'], GPIO.OUT, initial=GPIO.HIGH)
p = GPIO.PWM(pin_data['out_a'], 500)
p.start(50)
del p
GPIO.cleanup()
@pwmtest
def test_pwm_create_all():
for pin in pin_data['all_pwms']:
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)
p = GPIO.PWM(pin, 500)
p.start(50)
p.stop()
GPIO.cleanup()
# Main script
if __name__ == '__main__':
for test in tests:
print('Testing', test.__name__)
try:
test()
except:
# This isn't a finally block, since we don't want to repeat the
# cleanup() call that a successful test already made.
GPIO.cleanup()
raise
|
jetson-gpio-master
|
samples/test_all_apis.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import RPi.GPIO as GPIO
import time
# Pin Definitions
input_pin = 18 # BCM pin 18, BOARD pin 12
def main():
prev_value = None
# Pin Setup:
GPIO.setmode(GPIO.BCM) # BCM pin-numbering scheme from Raspberry Pi
GPIO.setup(input_pin, GPIO.IN) # set pin as an input pin
print("Starting demo now! Press CTRL+C to exit")
try:
while True:
value = GPIO.input(input_pin)
if value != prev_value:
if value == GPIO.HIGH:
value_str = "HIGH"
else:
value_str = "LOW"
print("Value read from pin {} : {}".format(input_pin,
value_str))
prev_value = value
time.sleep(1)
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
jetson-gpio-master
|
samples/simple_input.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import sys
import RPi.GPIO as GPIO
pin_datas = {
'JETSON_XAVIER': {
'unimplemented': (),
},
'JETSON_TX2': {
'unimplemented': (26, ),
},
'JETSON_TX1': {
'unimplemented': (),
},
'JETSON_NANO': {
'unimplemented': (),
},
'JETSON_NX': {
'unimplemented': (),
},
'CLARA_AGX_XAVIER': {
'unimplemented': (),
},
'JETSON_TX2_NX': {
'unimplemented': (),
},
'JETSON_ORIN': {
'unimplemented': (),
},
'JETSON_ORIN_NX': {
'unimplemented': (),
},
'JETSON_ORIN_NANO': {
'unimplemented': (),
},
}
pin_data = pin_datas.get(GPIO.model)
all_pins = (7, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29, 31, 32, 33,
35, 36, 37, 38, 40,)
if len(sys.argv) > 1:
all_pins = map(int, sys.argv[1:])
for pin in all_pins:
if pin in pin_data['unimplemented']:
print("Pin %d unimplemented; skipping" % pin)
continue
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.IN)
value = GPIO.input(pin)
print("Pin %d input value %d" % (pin, value))
GPIO.cleanup()
|
jetson-gpio-master
|
samples/test_all_pins_input.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import RPi.GPIO as GPIO
import time
output_pins = {
'JETSON_XAVIER': 18,
'JETSON_NANO': 33,
'JETSON_NX': 33,
'CLARA_AGX_XAVIER': 18,
'JETSON_TX2_NX': 32,
'JETSON_ORIN': 18,
'JETSON_ORIN_NX': 33,
'JETSON_ORIN_NANO': 33
}
output_pin = output_pins.get(GPIO.model, None)
if output_pin is None:
raise Exception('PWM not supported on this board')
def main():
# Pin Setup:
# Board pin-numbering scheme
GPIO.setmode(GPIO.BOARD)
# set pin as an output pin with optional initial state of HIGH
GPIO.setup(output_pin, GPIO.OUT, initial=GPIO.HIGH)
p = GPIO.PWM(output_pin, 50)
val = 25
incr = 5
p.start(val)
print("PWM running. Press CTRL+C to exit.")
try:
while True:
time.sleep(0.25)
if val >= 100:
incr = -incr
if val <= 0:
incr = -incr
val += incr
p.ChangeDutyCycle(val)
finally:
p.stop()
GPIO.cleanup()
if __name__ == '__main__':
main()
|
jetson-gpio-master
|
samples/simple_pwm.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import RPi.GPIO as GPIO
import time
pin = 32
def main():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.OUT)
for i in range(5):
ts = 0.1
GPIO.output(pin, GPIO.HIGH)
time.sleep(ts)
GPIO.output(pin, GPIO.LOW)
time.sleep(ts)
GPIO.cleanup()
if __name__ == '__main__':
main()
|
jetson-gpio-master
|
samples/issue40-trigger.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import RPi.GPIO as GPIO
import time
# Pin Definitions
output_pin = 18 # BCM pin 18, BOARD pin 12
def main():
# Pin Setup:
GPIO.setmode(GPIO.BCM) # BCM pin-numbering scheme from Raspberry Pi
# set pin as an output pin with optional initial state of HIGH
GPIO.setup(output_pin, GPIO.OUT, initial=GPIO.HIGH)
print("Starting demo now! Press CTRL+C to exit")
curr_value = GPIO.HIGH
try:
while True:
time.sleep(1)
# Toggle the output every second
print("Outputting {} to pin {}".format(curr_value, output_pin))
GPIO.output(output_pin, curr_value)
curr_value ^= GPIO.HIGH
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
jetson-gpio-master
|
samples/simple_out.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# EXAMPLE SETUP
# Connect a button to pin 18 and GND, a pull-up resistor connecting the button
# to 3V3 and an LED connected to pin 12. The application performs the same
# function as the button_led.py but performs a blocking wait for the button
# press event instead of continuously checking the value of the pin in order to
# reduce CPU usage.
import RPi.GPIO as GPIO
import time
# Pin Definitons:
led_pin = 12 # Board pin 12
but_pin = 18 # Board pin 18
def main():
# Pin Setup:
GPIO.setmode(GPIO.BOARD) # BOARD pin-numbering scheme
GPIO.setup(led_pin, GPIO.OUT) # LED pin set as output
GPIO.setup(but_pin, GPIO.IN) # button pin set as input
# Initial state for LEDs:
GPIO.output(led_pin, GPIO.LOW)
print("Starting demo now! Press CTRL+C to exit")
try:
while True:
print("Waiting for button event")
GPIO.wait_for_edge(but_pin, GPIO.FALLING)
# event received when button pressed
print("Button Pressed!")
GPIO.output(led_pin, GPIO.HIGH)
time.sleep(1)
GPIO.output(led_pin, GPIO.LOW)
finally:
GPIO.cleanup() # cleanup all GPIOs
if __name__ == '__main__':
main()
|
jetson-gpio-master
|
samples/button_event.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import RPi.GPIO as GPIO
import time
# Pin Definitons:
led_pin = 12 # BOARD pin 12
but_pin = 18 # BOARD pin 18
def main():
prev_value = None
# Pin Setup:
GPIO.setmode(GPIO.BOARD) # BOARD pin-numbering scheme
GPIO.setup(led_pin, GPIO.OUT) # LED pin set as output
GPIO.setup(but_pin, GPIO.IN) # Button pin set as input
# Initial state for LEDs:
GPIO.output(led_pin, GPIO.LOW)
print("Starting demo now! Press CTRL+C to exit")
try:
while True:
curr_value = GPIO.input(but_pin)
if curr_value != prev_value:
GPIO.output(led_pin, not curr_value)
prev_value = curr_value
print("Outputting {} to Pin {}".format(curr_value, led_pin))
time.sleep(1)
finally:
GPIO.cleanup() # cleanup all GPIO
if __name__ == '__main__':
main()
|
jetson-gpio-master
|
samples/button_led.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import Jetson.GPIO as GPIO
print(GPIO.model)
|
jetson-gpio-master
|
samples/jetson_model.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import RPi.GPIO as GPIO
import time
pin = 31
def on_falling(channel):
print("Callback.")
def main():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.IN)
# By default, the poll time is 0.2 seconds, too
GPIO.add_event_detect(pin, GPIO.FALLING, callback=on_falling, bouncetime=300, polltime=0.2)
print("Starting demo now! Press CTRL+C to exit")
try:
while True:
print("Main loop...")
time.sleep(5)
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
jetson-gpio-master
|
samples/issue40.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import RPi.GPIO as GPIO
import time
# Pin Definitions:
led_pin_1 = 12
led_pin_2 = 13
but_pin = 18
# blink LED 2 quickly 5 times when button pressed
def blink(channel):
print("Blink LED 2")
for i in range(5):
GPIO.output(led_pin_2, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(led_pin_2, GPIO.LOW)
time.sleep(0.5)
def main():
# Pin Setup:
GPIO.setmode(GPIO.BOARD) # BOARD pin-numbering scheme
GPIO.setup([led_pin_1, led_pin_2], GPIO.OUT) # LED pins set as output
GPIO.setup(but_pin, GPIO.IN) # button pin set as input
# Initial state for LEDs:
GPIO.output(led_pin_1, GPIO.LOW)
GPIO.output(led_pin_2, GPIO.LOW)
# By default, the poll time is 0.2 seconds, too
GPIO.add_event_detect(but_pin, GPIO.FALLING, callback=blink, bouncetime=10, polltime=0.2)
print("Starting demo now! Press CTRL+C to exit")
try:
while True:
# blink LED 1 slowly
GPIO.output(led_pin_1, GPIO.HIGH)
time.sleep(2)
GPIO.output(led_pin_1, GPIO.LOW)
time.sleep(2)
finally:
GPIO.cleanup() # cleanup all GPIOs
if __name__ == '__main__':
main()
|
jetson-gpio-master
|
samples/button_interrupt.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import sys
import time
import RPi.GPIO as GPIO
pin_datas = {
'JETSON_XAVIER': {
'unimplemented': (),
'input_only': (36, ),
},
'JETSON_TX2': {
'unimplemented': (26, ),
'input_only': (16, 36, ),
},
'JETSON_TX1': {
'unimplemented': (),
'input_only': (36, ),
},
'JETSON_NANO': {
'unimplemented': (),
'input_only': (),
},
'JETSON_NX': {
'unimplemented': (),
'input_only': (),
},
'CLARA_AGX_XAVIER': {
'unimplemented': (),
'input_only': (),
},
'JETSON_TX2_NX': {
'unimplemented': (),
'input_only': (),
},
'JETSON_ORIN': {
'unimplemented': (),
'input_only': (),
},
'JETSON_ORIN_NX': {
'unimplemented': (),
'input_only': (),
},
'JETSON_ORIN_NANO': {
'unimplemented': (),
'input_only': (),
},
}
pin_data = pin_datas.get(GPIO.model)
all_pins = (7, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29, 31, 32, 33,
35, 36, 37, 38, 40,)
if len(sys.argv) > 1:
all_pins = map(int, sys.argv[1:])
for pin in all_pins:
if pin in pin_data['unimplemented']:
print("Pin %d unimplemented; skipping" % pin)
continue
if pin in pin_data['input_only']:
print("Pin %d input-only; skipping" % pin)
continue
print("Testing pin %d as OUTPUT; CTRL-C to test next pin" % pin)
try:
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.OUT)
while True:
GPIO.output(pin, GPIO.HIGH)
time.sleep(0.25)
GPIO.output(pin, GPIO.LOW)
time.sleep(0.25)
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
|
jetson-gpio-master
|
samples/test_all_pins.py
|
jetson-gpio-master
|
lib/python/RPi/__init__.py
|
|
from Jetson.GPIO import *
VERSION = '0.1.0'
|
jetson-gpio-master
|
lib/python/RPi/GPIO/__init__.py
|
jetson-gpio-master
|
lib/python/Jetson/__init__.py
|
|
# Copyright (c) 2012-2017 Ben Croston <ben@croston.org>.
# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# @File name: gpio_event.py
# @Date:
# @Last modified by:
# @Last Modified time: 6/20/2023
# @Description: This file manages all function needed for event detection
# that enables users to add or remove an event in a blocking or non-blocking
# mode. It keeps an global event dictionary that supports looking up a
# channel's registered event.
# @Note: Ideas for multiple channel detection
# 1. polling for maxevents should not be specified
# 2. Each thread should not share epoll instance
# Python2 has module thread. Renamed to _thread in Python3
try:
import thread
except:
import _thread as thread
import os
import warnings
import fcntl
import select
import ctypes
import time
from Jetson.GPIO import gpio_cdev as cdev
from datetime import datetime
try:
InterruptedError = InterruptedError
except:
InterruptedError = IOError
# Edge possibilities
NO_EDGE = 0
RISING_EDGE = 1
FALLING_EDGE = 2
BOTH_EDGE = 3
# Dictionary storing the epoll thread object
# Key: channel (pin number), Value: Epoll object
_epoll_fd_thread = {}
# epoll blocking wait object
_epoll_fd_blocking = None
# 2-layered dictionary of GPIO class objects.
# layer 1 key = chip name, layer 2 key = channel (pin number by mode)
# value = GPIO class object
_gpio_event_list = {}
# Dictionary for thread to lookup to check its supposed state
# Key: thread id, Value: true if it is supposed to be running state, otherwise false
_thread_running_dict = {}
# lock object for thread
_mutex = thread.allocate_lock()
class _Gpios:
# @value_fd the file descriptor for the chip line
# @initial_thread true if the thread just start up (within the first loop)
# @thread_added the number of threads being added to monitor this object/gpio
# @thread_id the id of the thread being created to detect event
# @bouncetime the time interval for debouncing
# @callbacks a list of callback functions to be executed when an edge event happened
# @lastcall the timestamp for counting debounce
# @event_occurred true if an edge event occured
def __init__(self, line_fd, bouncetime=None):
self.value_fd = line_fd
self.initial_thread = True
self.thread_added = False
self.thread_id = 0
self.thread_exited = False
self.bouncetime = bouncetime
self.callbacks = []
self.lastcall = 0
self.event_occurred = False
def __del__(self):
del self.callbacks
# @brief adding an edge detecting event
# The detection event runs in an thread that enables non-blocking I/O multiplexing approach.
# However, one pin on a chip (channel) can only allow one edge detection event, the new added
# event will be removed if there's an existing event.
# @param[in] chip_fd: file descriptor
# @param[in] chip_name: the GPIO chip name/instance
# @param[in] channel: the file descriptor for the chip line
# @param[in] request: gpioevent_request struct that describes gpio event monitoring
# @param[in] bouncetime: the time interval for debouncing
# @param[in] poll_time: the max time to wait for an edge event
# @param[out] success on 0, otherwise return 2 if something fatal happened
def add_edge_detect(chip_fd, chip_name, channel, request, bouncetime, poll_time):
gpio_obj = None
res = gpio_event_added(chip_name, channel)
# event not added
if not res:
# open the line
try:
ioctl_ret = fcntl.ioctl(chip_fd, cdev.GPIO_GET_LINEEVENT_IOCTL, request)
except (OSError, IOError) as e:
raise cdev.GPIOError(e.errno, "Opening input line event handle: " + e.strerror)
else:
warnings.warn("Warning: event is already added, ignore new added event", RuntimeWarning)
return 1
# Check if we successfully get the event handle from ioctl
if ioctl_ret < 0:
raise cdev.GPIOError("Unable to get line event handle", e.strerror)
else:
gpio_obj = _Gpios(request.fd, bouncetime)
# create epoll object for fd if not already open
_mutex.acquire()
if channel not in _epoll_fd_thread:
_epoll_fd_thread[channel] = select.epoll()
if _epoll_fd_thread[channel] is None:
_mutex.release()
return 2
# add eventmask and fd to epoll object
try:
# eventmask: available for read and edge trigger
_epoll_fd_thread[channel].register(gpio_obj.value_fd, select.EPOLLIN | select.EPOLLET)
except IOError:
_mutex.release()
remove_edge_detect(chip_name, channel)
return 2
_mutex.release()
# create and start poll thread if not already running
try:
thread_id = thread.start_new_thread(_edge_handler, ("edge_handler_thread", request.fd, channel, poll_time))
gpio_obj.thread_id = thread_id
except:
remove_edge_detect(chip_name, channel)
warnings.warn("Unable to start thread", RuntimeWarning)
return 2
gpio_obj.thread_added = True
_add_gpio_event(chip_name, channel, gpio_obj)
return 0
# @brief Remove an edge event detection
# Not only will the event be unregistered, the thread corresponds will also be cleared.
# Suggestion about the timeout parameter: the value should be greater than the poll_time
# in add_edge_detect to keep it safe.
# @param[in] chip_name: the GPIO chip name/instance
# @param[in] channel: the pin number in specified mode (board or bcm)
# @param[in] timeout: the maximum time to wait for the thread detecting channel to stop
def remove_edge_detect(chip_name, channel, timeout=0.3):
gpio_obj = gpio_event_added(chip_name, channel)
if gpio_obj is None:
warnings.warn("Event not found", RuntimeWarning)
return
# Have the thread to be in an exit state
_mutex.acquire()
thread_id = _gpio_event_list[chip_name][channel].thread_id
_thread_running_dict[thread_id] = False
# Wait till the thread exits
if _gpio_event_list[chip_name][channel].thread_added == True:
_mutex.release()
time.sleep(timeout)
_mutex.acquire()
if _gpio_event_list[chip_name][channel].thread_exited == False:
warnings.warn("Timeout in waiting event detection to be removed", RuntimeWarning)
# unregister the epoll file descriptor
if channel in _epoll_fd_thread and _epoll_fd_thread[channel] is not None:
_epoll_fd_thread[channel].unregister(_gpio_event_list[chip_name][channel].value_fd)
del _gpio_event_list[chip_name][channel]
_mutex.release()
# @brief Add a callback function for an event
# Note that if the function does not exist or the event has not been set up, warning
# will be shown, ignoring the action
# @param[in] chip_name: the GPIO chip name/instance
# @param[in] channel: the pin number in specified mode (board or bcm)
# @param[in] callback: a callback function
def add_edge_callback(chip_name, channel, callback):
gpio_obj = gpio_event_added(chip_name, channel)
if gpio_obj is None:
warnings.warn("Event not found", RuntimeWarning)
return
_mutex.acquire()
if not _gpio_event_list[chip_name][channel].thread_added:
_mutex.release()
warnings.warn("Please add the event before adding callback", RuntimeWarning)
return
_gpio_event_list[chip_name][channel].callbacks.append(callback)
_mutex.release()
# @brief Check if any edge event occured
# If an adge event happened, the flag will be cleared for the next occurance
# @param[in] chip_name: the GPIO chip name/instance
# @param[in] channel: the pin number in specified mode (board or bcm)
# @param[out] true if an edge event occured, otherwise false
def edge_event_detected(chip_name, channel):
gpio_obj = gpio_event_added(chip_name, channel)
if gpio_obj is None:
warnings.warn("Event not found", RuntimeWarning)
return False
_mutex.acquire()
# Event has occured
if _gpio_event_list[chip_name][channel].event_occurred:
_gpio_event_list[chip_name][channel].event_occurred = False
_mutex.release()
return True
_mutex.release()
return False
# @brief Check if any event is added to the channel in the chip controller
# @param[in] chip_name: the GPIO chip name/instance
# @param[in] channel: the pin number in specified mode (board or bcm)
# @param[out] the gpio object if an event exists, otherwise None
def gpio_event_added(chip_name, channel):
_mutex.acquire()
if chip_name not in _gpio_event_list:
_mutex.release()
return None
if channel not in _gpio_event_list[chip_name]:
_mutex.release()
return None
gpio_obj = _gpio_event_list[chip_name][channel]
_mutex.release()
return gpio_obj
# @brief Add an event to the event list
# @param[in] chip_name: the GPIO chip name/instance
# @param[in] channel: the pin number in specified mode (board or bcm)
# @param[in] gpio_obj: the gpio handle with related information of a channel's
# event
def _add_gpio_event(chip_name, channel, gpio_obj):
_mutex.acquire()
if chip_name not in _gpio_event_list:
_gpio_event_list[chip_name] = {}
if channel not in _gpio_event_list[chip_name]:
_gpio_event_list[chip_name][channel] = gpio_obj
_mutex.release()
# @brief Look up by chip name and channel to get the gpio object
# with related information of a channel's event
# @param[in] chip_name: the GPIO chip name/instance
# @param[in] channel: the pin number in specified mode (board or bcm)
# @param[out] the gpio handle with related information of a channel's event,
# if such handle exist, otherwise return None
def _get_gpio_object(chip_name, channel):
gpio_obj = gpio_event_added(chip_name, channel)
if gpio_obj is None:
warnings.warn("Event not found", RuntimeWarning)
return None
_mutex.acquire()
gpio_obj = _gpio_event_list[chip_name][channel]
_mutex.release()
return gpio_obj
def _set_edge(gpio_name, edge):
raise RuntimeError("This function is deprecated")
# @brief Look up by chip name and channel to get the gpio object
# with related information of a channel's event
# @param[in] fd: the file descriptor of a channel/line
# @param[out] a tuple of the GPIO chip name and the pin number in
# specified mode, otherwise return a tuple of None
def _get_gpio_obj_keys(fd):
_mutex.acquire()
for chip_name in _gpio_event_list:
for pin in _gpio_event_list[chip_name]:
if _gpio_event_list[chip_name][pin].value_fd == fd:
_mutex.release()
return (chip_name, pin)
_mutex.release()
return None, None
def _get_gpio_file_object(fileno):
raise RuntimeError("This function is deprecated")
def _set_thread_exit_state(fd):
chip_name, channel = _get_gpio_obj_keys(fd)
# Get the gpio object to do following updates
gpio_obj = _get_gpio_object(chip_name, channel)
if gpio_obj == None:
warnings.warn("Channel has been remove from detection before thread exits", RuntimeWarning)
return
# Set state
_mutex.acquire()
_gpio_event_list[chip_name][channel].thread_exited = True
_mutex.release()
# @brief A thread that catches GPIO events in a non-blocking mode.
# Exit upon error (may be fd non-existance, information descrepency,
# unknown error)
# @param[in] thread_name: a functional name of the thread
# @param[in] fd: the file descriptor of a channel/line
# @param[in] channel: the pin number in specified mode (board or bcm)
# @param[in] poll_timeout: the maximum time set to wait for edge event (second)
def _edge_handler(thread_name, fileno, channel, poll_timeout):
thread_id = thread.get_ident()
# Mark the thread state as running
_mutex.acquire()
_thread_running_dict[thread_id] = True
# clean device buffer
epoll_obj = _epoll_fd_thread[channel]
_mutex.release()
# The timeout should be longer than the wait time between the events
precedent_events = epoll_obj.poll(timeout=0.5, maxevents=1)
if len(precedent_events) > 0:
_fd = precedent_events[0][0]
try:
data = os.read(_fd, ctypes.sizeof(cdev.gpioevent_data))
except OSError as e:
raise cdev.GPIOError(e.errno, "Reading GPIO event: " + e.strerror)
while _thread_running_dict[thread_id]:
try:
# poll for event
events = epoll_obj.poll(timeout=poll_timeout, maxevents=1)
# Timeout without any event
if len(events) == 0:
# The timeout is especially added to confirm the thread running status, so
# it is a design that no warning signal is shown when timeout
continue
fd = events[0][0]
# Check if the returning fd is the one we are waiting for
if fd != fileno:
raise RuntimeError("File object not found after wait for GPIO %s" % channel)
#read the result out
try:
data = os.read(fd, ctypes.sizeof(cdev.gpioevent_data))
except OSError as e:
raise cdev.GPIOError(e.errno, "Reading GPIO event: " + e.strerror)
event_data = cdev.gpioevent_data.from_buffer_copy(data)
# event result
if (event_data.id != cdev.GPIOEVENT_REQUEST_RISING_EDGE and
event_data.id != cdev.GPIOEVENT_REQUEST_FALLING_EDGE):
warnings.warn("Unknown event caught", RuntimeWarning)
continue
# check key to make sure gpio object has not been deleted
# from main thread
chip_name, pin_num = _get_gpio_obj_keys(fd)
if channel != pin_num:
warnings.warn("Channel does not match with assigned file descriptor", RuntimeWarning)
_mutex.acquire()
_thread_running_dict[thread.get_ident()] = False
_mutex.release()
break
# Get the gpio object to do following updates
gpio_obj = _get_gpio_object(chip_name, pin_num)
if gpio_obj is None:
raise RuntimeError("GPIO object does not exists")
# debounce the input event for the specified bouncetime
time = datetime.now()
time = time.second * 1E6 + time.microsecond
if (gpio_obj.bouncetime is None or
(time - gpio_obj.lastcall >
gpio_obj.bouncetime * 1000) or
(gpio_obj.lastcall == 0) or gpio_obj.lastcall > time):
gpio_obj.lastcall = time
gpio_obj.event_occurred = True
#update to the original list
_mutex.acquire()
_gpio_event_list[chip_name][pin_num] = gpio_obj
_mutex.release()
# callback function
for cb_func in gpio_obj.callbacks:
cb_func()
# if interrupted by a signal, continue to start of the loop
except InterruptedError:
continue
except AttributeError:
break
finally:
if _mutex.locked():
_mutex.release()
_set_thread_exit_state(fileno)
thread.exit()
# This function waits for a edge event in a blocking mode, which the user must
# specify the file descriptor of the chip, which channel of the chip, the event handle,
# time for debouncing in milliseconds, the time limit to wait for the event.
# return value: -2 for fatal errors, -1 if edge is already being detected, 0 if timeout
# occured, and 1 if event was valid
def blocking_wait_for_edge(chip_fd, chip_name, channel, request, bouncetime, timeout):
# check if gpio edge already added. Add if not already added
gpio_obj = gpio_event_added(chip_name, channel)
if gpio_obj != None:
return -1
else:
try:
fcntl.ioctl(chip_fd, cdev.GPIO_GET_LINEEVENT_IOCTL, request)
except (OSError, IOError) as e:
raise cdev.GPIOError(e.errno, "Opening input line event handle: " + e.strerror)
gpio_obj = _Gpios(request.fd, bouncetime)
_add_gpio_event(chip_name, channel, gpio_obj)
ret = select.select([request.fd], [], [], timeout)
if ret[0] == [request.fd]:
try:
data = os.read(request.fd, ctypes.sizeof(cdev.gpioevent_data))
except OSError as e:
raise cdev.GPIOError(e.errno, "Reading GPIO event: " + e.strerror)
event_data = cdev.gpioevent_data.from_buffer_copy(data)
if (event_data.id != cdev.GPIOEVENT_REQUEST_RISING_EDGE and
event_data.id != cdev.GPIOEVENT_REQUEST_FALLING_EDGE):
warnings.warn("Unknown event caught", RuntimeWarning)
return -2
return int(ret != [])
elif len(ret[0]) == 0:
# Timeout
return 0
return -2
# @brief clean up the event registered on the name of chip and channel
# Note that the event detection thread will also be removed
# @param[in] chip_name: the GPIO chip name/instance
# @param[in] channel: the pin number in specified mode (board or bcm)
def event_cleanup(chip_name, channel):
global _epoll_fd_blocking
#remove all the event being detected in the event list
remove_edge_detect(chip_name, channel)
#unregister the device being polled
_mutex.acquire()
for gpio_chip in _gpio_event_list.copy():
# Warning: this is only for single threaded solution
if channel not in _gpio_event_list[gpio_chip]:
# It is a design decision that every pin owns its epoll object
if _epoll_fd_blocking is not None:
_epoll_fd_blocking.close()
_epoll_fd_blocking = None
if channel in _epoll_fd_thread and _epoll_fd_thread[channel] is not None:
_epoll_fd_thread[channel].close()
del _epoll_fd_thread[channel]
_mutex.release()
|
jetson-gpio-master
|
lib/python/Jetson/GPIO/gpio_event.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# @File name: gpio_cdev.py
# @Date:
# @Last modified by:
# @Last Modified time: 6/6/2023
# @Description: This file provides the interface to the GPIO controller
# in form of a character device. File operations such as open, close,
# ioctl, etc are provided for usage to interact with the GPIO controller.
import os
import fcntl
import ctypes
GPIO_HIGH = 1
GPIOHANDLE_REQUEST_INPUT = 0x1
GPIOHANDLE_REQUEST_OUTPUT = 0x2
GPIOEVENT_REQUEST_RISING_EDGE = 0x1
GPIOEVENT_REQUEST_FALLING_EDGE = 0x2
GPIOEVENT_REQUEST_BOTH_EDGES = 0x3
GPIO_GET_CHIPINFO_IOCTL = 0x8044B401
GPIO_GET_LINEINFO_IOCTL = 0xC048B402
GPIO_GET_LINEHANDLE_IOCTL = 0xC16CB403
GPIOHANDLE_GET_LINE_VALUES_IOCTL = 0xC040B408
GPIOHANDLE_SET_LINE_VALUES_IOCTL = 0xC040B409
GPIO_GET_LINEEVENT_IOCTL = 0xC030B404
# @brief the information about a GPIO chip
# @name: the Linux kernel name of the chip
# @label: a name for the chip
# @lines: number of GPIO lines on this chip
class gpiochip_info(ctypes.Structure):
_fields_ = [
('name', ctypes.c_char * 32),
('label', ctypes.c_char * 32),
('lines', ctypes.c_uint32),
]
# @brief the information about a GPIO handle request
# @lineoffsets: an array of lines, specified by offset index
# @flags: flags for the GPIO lines (the flag applies to all)
# @default_values: the default output value, expecting 0 or 1
# anything else will be interpreted as 1
# @consumer_label: a label for the selected GPIO line(s)
# @lines: number of lines requested in this request
# @fd: this field will contain a valid file handle (value that
# is equal or smaller than 0 means error) on success
class gpiohandle_request(ctypes.Structure):
_fields_ = [
('lineoffsets', ctypes.c_uint32 * 64),
('flags', ctypes.c_uint32),
('default_values', ctypes.c_uint8 * 64),
('consumer_label', ctypes.c_char * 32),
('lines', ctypes.c_uint32),
('fd', ctypes.c_int),
]
# @brief the information of values on a GPIO handle
# @values: current state of a line (get), contain the desired
# target state (set)
class gpiohandle_data(ctypes.Structure):
_fields_ = [
('values', ctypes.c_uint8 * 64),
]
# @brief the information about a GPIO line
# @line_offset: the local offset on this GPIO device
# @flags: flags for this line
# @name: the name of this GPIO line
# @consumer: a functional name for the consumer of this GPIO line as set by
# whatever is using it
class gpioline_info(ctypes.Structure):
_fields_ = [
('line_offset', ctypes.c_uint32),
('flags', ctypes.c_uint32),
('name', ctypes.c_char * 32),
('consumer', ctypes.c_char * 32),
]
# @brief the information about a change in a GPIO line's status
# @timestamp: estimated time of status change occurrence (ns)
# @event_type: type of event
# @info: updated line info
class gpioline_info_changed(ctypes.Structure):
_fields_ = [
('line_info', gpioline_info),
('timestamp', ctypes.c_uint64),
('event_type', ctypes.c_uint32),
('padding', ctypes.c_uint32 * 5),
]
# @brief the information about a GPIO event request
# @lineoffset: the line to subscribe to events from in offset index
# @handleflags: handle flags for the GPIO line
# @eventflags: desired flags for the GPIO event line (what edge)
# @consumer_label: a consumer label for the selected GPIO line(s)
# @fd: contain a valid file handle if successful, otherwise zero or
# negative value
class gpioevent_request(ctypes.Structure):
_fields_ = [
('lineoffset', ctypes.c_uint32),
('handleflags', ctypes.c_uint32),
('eventflags', ctypes.c_uint32),
('consumer_label', ctypes.c_char * 32),
('fd', ctypes.c_int),
]
# @brief the actual event being pushed to userspace
# @timestamp: best estimate of event occurrence's time (ns)
# @id: event identifier
class gpioevent_data(ctypes.Structure):
_fields_ = [
('timestamp', ctypes.c_uint64),
('id', ctypes.c_uint32),
]
class GPIOError(IOError):
"""Base class for GPIO errors."""
pass
# @brief open a chip by its name
# @param[in] gpio_chip: name of the chip
# @param[out] the file descriptor of the chip
def chip_open(gpio_chip):
try:
chip_fd = os.open(gpio_chip, os.O_RDONLY)
except OSError as e:
raise GPIOError(e.errno, "Opening GPIO chip: " + e.strerror)
return chip_fd
# @brief open and check the information of the chip
# @param[in] label: label of the chip
# @param[in] gpio_chip: name of the chip
# @param[out] the file descriptor of the chip
def chip_check_info(label, gpio_device):
chip_fd = chip_open(gpio_device)
chip_info = gpiochip_info()
try:
fcntl.ioctl(chip_fd, GPIO_GET_CHIPINFO_IOCTL, chip_info)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Querying GPIO chip info: " + e.strerror)
if label != chip_info.label.decode():
try:
close_chip(chip_fd)
except OSError as e:
raise GPIOError(e.errno, "Opening GPIO chip: " + e.strerror)
chip_fd = None
return chip_fd
# @brief open a chip by its label
# @param[in] label:
# @param[out] the file descriptor of the chip
def chip_open_by_label(label):
dev = '/dev/'
for device in os.listdir(dev):
if device.startswith('gpiochip'):
gpio_device = dev + device
chip_fd = chip_check_info(label, gpio_device)
if chip_fd != None:
break
if chip_fd == None:
raise Exception("{}: No such gpio device registered".format(label))
return chip_fd
# @brief close a chip
# @param[in] chip_fd: the file descriptor of the chip
def close_chip(chip_fd):
if chip_fd is None:
return
try:
os.close(chip_fd)
except (OSError, IOError) as e:
pass
# @brief open a line of a chip
# @param[in] ch_info: ChannelInfo object of the channel desired to open
# @param[out] the file descriptor of the line
def open_line(ch_info, request):
try:
fcntl.ioctl(ch_info.chip_fd, GPIO_GET_LINEHANDLE_IOCTL, request)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Opening output line handle: " + e.strerror)
ch_info.line_handle = request.fd
# @brief close a line
# @param[in] line_handle: the file descriptor of the line
def close_line(line_handle):
if line_handle is None:
return
try:
os.close(line_handle)
except OSError as e:
raise GPIOError(e.errno, "Closing existing GPIO line: " + e.strerror)
# @brief build a request handle struct
# @param[in] line_offset: the offset of the line to its chip
# @param[in] direction: the direction of the line (in or out)
# @param[in] initial: initial value of the line
# @param[in] consumer: the consumer label that uses the line
# @param[out] the request handle struct
def request_handle(line_offset, direction, initial, consumer):
request = gpiohandle_request()
request.lineoffsets[0] = line_offset
request.flags = direction
if direction == GPIOHANDLE_REQUEST_OUTPUT:
request.default_values[0] = initial if initial is not None else GPIO_HIGH
else:
if initial is not None:
raise ValueError("initial parameter is not valid for inputs")
request.consumer_label = consumer.encode()
request.lines = 1
return request
# @brief build a request event struct
# @param[in] line_offset: the offset of the line to its chip
# @param[in] edge: event's detection edge
# @param[in] consumer: the consumer label that uses the line
def request_event(line_offset, edge, consumer):
request = gpioevent_request()
request.lineoffset = line_offset
request.handleflags = GPIOHANDLE_REQUEST_INPUT
request.eventflags = edge
request.consumer_label = consumer.encode()
return request
# @brief read the value of a line
# @param[in] line_handle: file descriptor of the line
# @param[out] the value of the line
def get_value(line_handle):
data = gpiohandle_data()
try:
fcntl.ioctl(line_handle, GPIOHANDLE_GET_LINE_VALUES_IOCTL, data)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Getting line value: " + e.strerror)
return data.values[0]
# @brief write the value of a line
# @param[in] line_handle: file descriptor of the line
# @param[in] value: the value to set the line
def set_value(line_handle, value):
data = gpiohandle_data()
data.values[0] = value
try:
fcntl.ioctl(line_handle, GPIOHANDLE_SET_LINE_VALUES_IOCTL, data)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Setting line value: " + e.strerror)
|
jetson-gpio-master
|
lib/python/Jetson/GPIO/gpio_cdev.py
|
from .gpio import *
VERSION = '2.1.3'
|
jetson-gpio-master
|
lib/python/Jetson/GPIO/__init__.py
|
# Copyright (c) 2012-2017 Ben Croston <ben@croston.org>.
# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from Jetson.GPIO import gpio_event as event
from Jetson.GPIO import gpio_pin_data
from Jetson.GPIO import gpio_cdev
import os
import warnings
import time
# sysfs root
_GPIOCHIP_ROOT = "/dev/gpiochip0"
if not os.access(_GPIOCHIP_ROOT, os.W_OK):
raise RuntimeError("The current user does not have permissions set to access the library functionalites. Please configure permissions or use the root user to run this. It is also possible that {} does not exist. Please check if that file is present.".format(_GPIOCHIP_ROOT))
# Pin Numbering Modes
BOARD = 10
BCM = 11
TEGRA_SOC = 1000
CVM = 1001
# The constants and their offsets are implemented to prevent HIGH from being
# used in place of other variables (ie. HIGH and RISING should not be
# interchangeable)
# Pull up/down options
_PUD_OFFSET = 20
PUD_OFF = 0 + _PUD_OFFSET
PUD_DOWN = 1 + _PUD_OFFSET
PUD_UP = 2 + _PUD_OFFSET
HIGH = 1
LOW = 0
# Edge possibilities
# These values (with _EDGE_OFFSET subtracted) must match gpio_event.py:*_EDGE
_EDGE_OFFSET = 30
RISING = 1 + _EDGE_OFFSET
FALLING = 2 + _EDGE_OFFSET
BOTH = 3 + _EDGE_OFFSET
# GPIO directions. UNKNOWN constant is for gpios that are not yet setup
UNKNOWN = -1
OUT = 0
IN = 1
HARD_PWM = 43
model, JETSON_INFO, _channel_data_by_mode = gpio_pin_data.get_data()
RPI_INFO = JETSON_INFO
# Dictionary used as a lookup table for pin to its info object (_Gpios) mapping
# key: channel, value: ChannelInfo object
_channel_data = {}
_gpio_warnings = True
_gpio_mode = None
# Dictionary used as a lookup table for pin to its configuration
# key: channel, value: GPIO directions (IN/OUT deprecated)
_channel_configuration = {}
# Dictionary used as a lookup table from GPIO chip name to chip fd
_chip_fd = {}
def _validate_mode_set():
if _gpio_mode is None:
raise RuntimeError("Please set pin numbering mode using "
"GPIO.setmode(GPIO.BOARD), GPIO.setmode(GPIO.BCM), "
"GPIO.setmode(GPIO.TEGRA_SOC) or "
"GPIO.setmode(GPIO.CVM)")
def _make_iterable(iterable, single_length=None):
if isinstance(iterable, str):
iterable = [iterable]
try:
for x in iterable:
break
except:
iterable = [iterable]
if single_length is not None and len(iterable) == 1:
iterable = iterable * single_length
return iterable
def _channel_to_info_lookup(channel, need_gpio, need_pwm):
if channel not in _channel_data:
raise ValueError("Channel %s is invalid" % str(channel))
ch_info = _channel_data[channel]
if need_pwm and ch_info.pwm_chip_dir is None:
raise ValueError("Channel %s is not a PWM" % str(channel))
return ch_info
def _channel_to_info(channel, need_gpio=False, need_pwm=False):
_validate_mode_set()
return _channel_to_info_lookup(channel, need_gpio, need_pwm)
def _channels_to_infos(channels, need_gpio=False, need_pwm=False):
_validate_mode_set()
return [_channel_to_info_lookup(c, need_gpio, need_pwm)
for c in _make_iterable(channels)]
def _sysfs_channel_configuration(ch_info):
"""Return the current configuration of a channel as reported by sysfs. Any
of IN, OUT, PWM, or None may be returned."""
if ch_info.pwm_chip_dir is not None:
pwm_dir = "%s/pwm%i" % (ch_info.pwm_chip_dir, ch_info.pwm_id)
if os.path.exists(pwm_dir):
return HARD_PWM
return None
def _app_channel_configuration(ch_info):
"""Return the current configuration of a channel as requested by this
module in this process. Any of IN, OUT, or None may be returned."""
return _channel_configuration.get(ch_info.channel, None)
def _chip_fd_map(ch_info):
return _chip_fd.get(ch_info.gpio_chip, None)
def _do_one_channel(ch_info, direction, initial, consumer):
ch_info.chip_fd = _chip_fd_map(ch_info)
if not ch_info.chip_fd:
ch_info.chip_fd = gpio_cdev.chip_open_by_label(ch_info.gpio_chip)
_chip_fd[ch_info.gpio_chip] = ch_info.chip_fd
cdev_direction = gpio_cdev.GPIOHANDLE_REQUEST_OUTPUT if direction == OUT else gpio_cdev.GPIOHANDLE_REQUEST_INPUT
request = gpio_cdev.request_handle(ch_info.line_offset, cdev_direction, initial, consumer)
gpio_cdev.open_line(ch_info, request)
_channel_configuration[ch_info.channel] = direction
def _pwm_path(ch_info):
return ch_info.pwm_chip_dir + '/pwm' + str(ch_info.pwm_id)
def _pwm_export_path(ch_info):
return ch_info.pwm_chip_dir + '/export'
def _pwm_unexport_path(ch_info):
return ch_info.pwm_chip_dir + '/unexport'
def _pwm_period_path(ch_info):
return _pwm_path(ch_info) + "/period"
def _pwm_duty_cycle_path(ch_info):
return _pwm_path(ch_info) + "/duty_cycle"
def _pwm_enable_path(ch_info):
return _pwm_path(ch_info) + "/enable"
def _export_pwm(ch_info):
if not os.path.exists(_pwm_path(ch_info)):
with open(_pwm_export_path(ch_info), 'w') as f:
f.write(str(ch_info.pwm_id))
enable_path = _pwm_enable_path(ch_info)
while not os.access(enable_path, os.R_OK | os.W_OK):
time.sleep(0.01)
ch_info.f_duty_cycle = open(_pwm_duty_cycle_path(ch_info), 'r+')
def _unexport_pwm(ch_info):
ch_info.f_duty_cycle.close()
with open(_pwm_unexport_path(ch_info), 'w') as f:
f.write(str(ch_info.pwm_id))
def _set_pwm_period(ch_info, period_ns):
with open(_pwm_period_path(ch_info), 'w') as f:
f.write(str(period_ns))
def _set_pwm_duty_cycle(ch_info, duty_cycle_ns):
# On boot, both period and duty cycle are both 0. In this state, the period
# must be set first; any configuration change made while period==0 is
# rejected. This is fine if we actually want a duty cycle of 0. Later, once
# any period has been set, we will always be able to set a duty cycle of 0.
# The code could be written to always read the current value, and only
# write the value if the desired value is different. However, we enable
# this check only for the 0 duty cycle case, to avoid having to read the
# current value every time the duty cycle is set.
if not duty_cycle_ns:
ch_info.f_duty_cycle.seek(0)
cur = ch_info.f_duty_cycle.read().strip()
if cur == '0':
return
ch_info.f_duty_cycle.seek(0)
ch_info.f_duty_cycle.write(str(duty_cycle_ns))
ch_info.f_duty_cycle.flush()
def _enable_pwm(ch_info):
with open(_pwm_enable_path(ch_info), 'w') as f:
f.write("1")
def _disable_pwm(ch_info):
with open(_pwm_enable_path(ch_info), 'w') as f:
f.write("0")
# Clean up all resources taken by a channel,
# including pwm, chip and lines
def _cleanup_one(ch_info):
#clean up pwm config
app_cfg = _channel_configuration[ch_info.channel]
if app_cfg == HARD_PWM:
_disable_pwm(ch_info)
_unexport_pwm(ch_info)
else:
event.event_cleanup(ch_info.gpio_chip, ch_info.channel)
del _channel_configuration[ch_info.channel]
# clean gpio config
# clean up chip
if ch_info.chip_fd:
gpio_cdev.close_chip(ch_info.chip_fd)
ch_info.chip_fd = None
if ch_info.gpio_chip in _chip_fd:
del _chip_fd[ch_info.gpio_chip]
# clean up line
if ch_info.line_handle:
gpio_cdev.close_line(ch_info.line_handle)
ch_info.line_handle = None
def _cleanup_all():
global _gpio_mode
for channel in list(_channel_configuration.keys()):
ch_info = _channel_to_info(channel)
_cleanup_one(ch_info)
_gpio_mode = None
# Function used to enable/disable warnings during setup and cleanup.
# Param -> state is a bool
def setwarnings(state):
global _gpio_warnings
_gpio_warnings = bool(state)
# Function used to set the pin mumbering mode. Possible mode values are BOARD,
# BCM, TEGRA_SOC and CVM
def setmode(mode):
global _gpio_mode, _channel_data
# check if a different mode has been set
if _gpio_mode and mode != _gpio_mode:
raise ValueError("A different mode has already been set!")
mode_map = {
BOARD: 'BOARD',
BCM: 'BCM',
CVM: 'CVM',
TEGRA_SOC: 'TEGRA_SOC',
}
# check if mode parameter is valid
if mode not in mode_map:
raise ValueError("An invalid mode was passed to setmode()!")
_channel_data = _channel_data_by_mode[mode_map[mode]]
_gpio_mode = mode
# Function used to get the currently set pin numbering mode
def getmode():
return _gpio_mode
# Mutable class to represent a default function argument.
# See https://stackoverflow.com/a/57628817/2767322
class _Default:
def __init__(self, val):
self.val = val
# Function used to setup individual pins or lists/tuples of pins as
# Input or Output. Param channels must an integer or list/tuple of integers,
# direction must be IN or OUT, pull_up_down must be PUD_OFF, PUD_UP or
# PUD_DOWN and is only valid when direction in IN, initial must be HIGH or LOW
# and is only valid when direction is OUT
def setup(channels, direction, pull_up_down=_Default(PUD_OFF), initial=None, consumer='Jetson-gpio'):
if pull_up_down in setup.__defaults__:
pull_up_down_explicit = False
pull_up_down = pull_up_down.val
else:
pull_up_down_explicit = True
ch_infos = _channels_to_infos(channels, need_gpio=True)
# check direction is valid
if direction != OUT and direction != IN:
raise ValueError("An invalid direction was passed to setup()")
# check if pullup/down is used with output
if direction == OUT and pull_up_down != PUD_OFF:
raise ValueError("pull_up_down parameter is not valid for outputs")
# check if pullup/down value is specified and/or valid
if pull_up_down_explicit:
warnings.warn("Jetson.GPIO ignores setup()'s pull_up_down parameter")
if (pull_up_down != PUD_OFF and pull_up_down != PUD_UP and
pull_up_down != PUD_DOWN):
raise ValueError("Invalid value for pull_up_down; should be one of"
"PUD_OFF, PUD_UP or PUD_DOWN")
for ch_info in ch_infos:
if ch_info.channel in _channel_configuration:
_cleanup_one(ch_info)
if direction == OUT:
initial = _make_iterable(initial, len(ch_infos))
if len(initial) != len(ch_infos):
raise RuntimeError("Number of values != number of channels")
for ch_info, init in zip(ch_infos, initial):
_do_one_channel(ch_info, direction, init, consumer)
else:
for ch_info in ch_infos:
_do_one_channel(ch_info, direction, initial, consumer)
# Function used to cleanup channels at the end of the program.
# The param channel can be an integer or list/tuple of integers specifying the
# channels to be cleaned up. If no channel is provided, all channels are
# cleaned
def cleanup(channel=None):
# warn if no channel is setup
if _gpio_mode is None:
if _gpio_warnings:
warnings.warn("No channels have been set up yet - nothing to "
"clean up! Try cleaning up at the end of your "
"program instead!", RuntimeWarning)
return
# clean all channels if no channel param provided
if channel is None:
_cleanup_all()
return
ch_infos = _channels_to_infos(channel)
for ch_info in ch_infos:
if ch_info.channel in _channel_configuration:
_cleanup_one(ch_info)
# Function used to return the current value of the specified channel.
# Function returns either HIGH or LOW
def input(channel):
ch_info = _channel_to_info(channel, need_gpio=True)
cur_cfg = _app_channel_configuration(ch_info)
if cur_cfg not in [IN, OUT]:
raise RuntimeError("You must setup() the GPIO channel first")
# _GPIOHANDLE_GET_LINE_VALUES_IOCTL, _CGpiohandleData
return gpio_cdev.get_value(ch_info.line_handle)
# Function used to set a value to a channel or list/tuple of channels.
# Parameter channels must be an integer or list/tuple of integers.
# Values must be either HIGH or LOW or list/tuple
# of HIGH and LOW with the same length as the channels list/tuple
def output(channels, values):
ch_infos = _channels_to_infos(channels, need_gpio=True)
values = _make_iterable(values, len(ch_infos))
if len(values) != len(ch_infos):
raise RuntimeError("Number of values != number of channels")
# check that channels have been set as output
if any(_app_channel_configuration(ch_info) != OUT for ch_info in ch_infos):
raise RuntimeError("The GPIO channel has not been set up as an "
"OUTPUT")
for ch_info, value in zip(ch_infos, values):
gpio_cdev.set_value(ch_info.line_handle, value)
# Function used to add threaded event detection for a specified gpio channel.
# Param gpio must be an integer specifying the channel, edge must be RISING,
# FALLING or BOTH. A callback function to be called when the event is detected
# and an integer bounctime in milliseconds can be optionally provided. A optional
# polltime in second can be provided to indicate the max time waiting for an edge.
# Note that one channel only allows one event, which the duplicated event will
# be ignored.
def add_event_detect(channel, edge, callback=None, bouncetime=None, polltime=0.2):
ch_info = _channel_to_info(channel, need_gpio=True)
if (not callable(callback)) and callback is not None:
raise TypeError("Callback Parameter must be callable")
# channel must be setup as input
if _app_channel_configuration(ch_info) != IN:
raise RuntimeError("You must setup() the GPIO channel as an input "
"first")
# edge must be rising, falling or both
if edge != RISING and edge != FALLING and edge != BOTH:
raise ValueError("The edge must be set to RISING, FALLING, or BOTH")
else:
edge = gpio_cdev.GPIOEVENT_REQUEST_RISING_EDGE if edge == RISING else gpio_cdev.GPIOEVENT_REQUEST_FALLING_EDGE if edge == FALLING else gpio_cdev.GPIOEVENT_REQUEST_BOTH_EDGES
# if bouncetime is provided, it must be int and greater than 0
if bouncetime is not None:
if type(bouncetime) != int:
raise TypeError("bouncetime must be an integer")
elif bouncetime < 0:
raise ValueError("bouncetime must be an integer greater than 0")
if ch_info.line_handle:
gpio_cdev.close_line(ch_info.line_handle)
request = gpio_cdev.request_event(ch_info.line_offset, edge, ch_info.consumer)
event.add_edge_detect(ch_info.chip_fd, ch_info.gpio_chip, channel, request, bouncetime, polltime)
if callback is not None:
event.add_edge_callback(ch_info.gpio_chip, channel, lambda: callback(channel))
# We should wait until the thread is up, which the device buffer cleaning takes time
time.sleep(1)
# Function used to remove event detection for channel
# Timeout param for the max time to wait for thread (event detecion) to end
def remove_event_detect(channel, timeout=0.5):
ch_info = _channel_to_info(channel, need_gpio=True)
event.remove_edge_detect(ch_info.gpio_chip, channel, timeout)
# Function used to check if an event occurred on the specified channel.
# Param channel must be an integer.
# This function return True or False
def event_detected(channel):
ch_info = _channel_to_info(channel, need_gpio=True)
# channel must be setup as input
if _app_channel_configuration(ch_info) != IN:
raise RuntimeError("You must setup() the GPIO channel as an "
"input first")
return event.edge_event_detected(ch_info.gpio_chip, channel)
# Function used to add a callback function to channel, after it has been
# registered for events using add_event_detect()
def add_event_callback(channel, callback):
ch_info = _channel_to_info(channel, need_gpio=True)
if not callable(callback):
raise TypeError("Parameter must be callable")
if _app_channel_configuration(ch_info) != IN:
raise RuntimeError("You must setup() the GPIO channel as an "
"input first")
if not event.gpio_event_added(ch_info.gpio_chip, channel):
raise RuntimeError("Add event detection using add_event_detect first "
"before adding a callback")
event.add_edge_callback(ch_info.gpio_chip, channel, lambda: callback(channel))
# We should wait until the thread is up, which the device buffer cleaning takes time
time.sleep(1)
# Function used to wait for a edge event in blocking mode, it is also one-shoot.
def wait_for_edge(channel, edge, bouncetime=None, timeout=None):
ch_info = _channel_to_info(channel, need_gpio=True)
# channel must be setup as input
if _app_channel_configuration(ch_info) != IN:
raise RuntimeError("You must setup() the GPIO channel as an input "
"first")
# edge provided must be rising, falling or both
if edge != RISING and edge != FALLING and edge != BOTH:
raise ValueError("The edge must be set to RISING, FALLING_EDGE "
"or BOTH")
else:
edge = gpio_cdev.GPIOEVENT_REQUEST_RISING_EDGE if edge == RISING else gpio_cdev.GPIOEVENT_REQUEST_FALLING_EDGE if edge == FALLING else gpio_cdev.GPIOEVENT_REQUEST_BOTH_EDGES
# if bouncetime is provided, it must be int and greater than 0
if bouncetime is not None:
if type(bouncetime) != int:
raise TypeError("bouncetime must be an integer")
elif bouncetime < 0:
raise ValueError("bouncetime must be an integer greater than 0")
# if timeout is specified, it must be an int and greater than 0
if timeout is not None:
if type(timeout) != int:
raise TypeError("Timeout must be an integer")
elif timeout < 0:
raise ValueError("Timeout must greater than 0")
if ch_info.line_handle:
gpio_cdev.close_line(ch_info.line_handle)
request = gpio_cdev.request_event(ch_info.line_offset, edge, ch_info.consumer)
result = event.blocking_wait_for_edge(ch_info.chip_fd, ch_info.gpio_chip, channel, request, bouncetime, timeout)
# If not error, result == channel. If timeout occurs while waiting,
# result == None. If error occurs, result == -1 means channel is
# registered for conflicting edge detection, result == -2 means an error
# occurred while registering event or polling
if not result:
return None
elif result == -1:
raise RuntimeError("Conflicting edge detection event already exists "
"for this GPIO channel")
elif result == -2:
raise RuntimeError("Error waiting for edge")
return channel
# Function used to check the currently set function of the channel specified.
# Param channel must be an integers. The function returns either IN, OUT,
# or UNKNOWN
def gpio_function(channel):
ch_info = _channel_to_info(channel)
func = _app_channel_configuration(ch_info)
if func is None:
func = UNKNOWN
return func
class PWM(object):
def __init__(self, channel, frequency_hz):
self._ch_info = _channel_to_info(channel, need_pwm=True)
app_cfg = _app_channel_configuration(self._ch_info)
if app_cfg == HARD_PWM:
raise ValueError("Can't create duplicate PWM objects")
# Apps typically set up channels as GPIO before making them be PWM,
# because RPi.GPIO does soft-PWM. We must undo the GPIO export to
# allow HW PWM to run on the pin.
if app_cfg in [IN, OUT]:
cleanup(channel)
if _gpio_warnings:
sysfs_cfg = _sysfs_channel_configuration(self._ch_info)
app_cfg = _app_channel_configuration(self._ch_info)
# warn if channel has been setup external to current program
if app_cfg is None and sysfs_cfg is not None:
warnings.warn(
"This channel is already in use, continuing anyway. "
"Use GPIO.setwarnings(False) to disable warnings",
RuntimeWarning)
_export_pwm(self._ch_info)
self._started = False
_set_pwm_duty_cycle(self._ch_info, 0)
# Anything that doesn't match new frequency_hz
self._frequency_hz = -1 * frequency_hz
self._reconfigure(frequency_hz, 0.0)
_channel_configuration[channel] = HARD_PWM
def __del__(self):
if _channel_configuration.get(self._ch_info.channel, None) != HARD_PWM:
# The user probably ran cleanup() on the channel already, so avoid
# attempts to repeat the cleanup operations.
return
self.stop()
_unexport_pwm(self._ch_info)
del _channel_configuration[self._ch_info.channel]
def start(self, duty_cycle_percent):
self._reconfigure(self._frequency_hz, duty_cycle_percent, start=True)
def ChangeFrequency(self, frequency_hz):
self._reconfigure(frequency_hz, self._duty_cycle_percent)
def ChangeDutyCycle(self, duty_cycle_percent):
self._reconfigure(self._frequency_hz, duty_cycle_percent)
def stop(self):
if not self._started:
return
_disable_pwm(self._ch_info)
def _reconfigure(self, frequency_hz, duty_cycle_percent, start=False):
if duty_cycle_percent < 0.0 or duty_cycle_percent > 100.0:
raise ValueError("")
freq_change = start or (frequency_hz != self._frequency_hz)
stop = self._started and freq_change
if stop:
self._started = False
_disable_pwm(self._ch_info)
if freq_change:
self._frequency_hz = frequency_hz
self._period_ns = int(1000000000.0 / frequency_hz)
# Reset duty cycle period incase the previous duty
# cycle is higher than the period
_set_pwm_duty_cycle(self._ch_info, 0)
_set_pwm_period(self._ch_info, self._period_ns)
self._duty_cycle_percent = duty_cycle_percent
self._duty_cycle_ns = int(self._period_ns *
(duty_cycle_percent / 100.0))
_set_pwm_duty_cycle(self._ch_info, self._duty_cycle_ns)
if stop or start:
_enable_pwm(self._ch_info)
self._started = True
|
jetson-gpio-master
|
lib/python/Jetson/GPIO/gpio.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import os.path
import sys
CLARA_AGX_XAVIER = 'CLARA_AGX_XAVIER'
JETSON_NX = 'JETSON_NX'
JETSON_XAVIER = 'JETSON_XAVIER'
JETSON_TX2 = 'JETSON_TX2'
JETSON_TX1 = 'JETSON_TX1'
JETSON_NANO = 'JETSON_NANO'
JETSON_TX2_NX='JETSON_TX2_NX'
JETSON_ORIN='JETSON_ORIN'
JETSON_ORIN_NX='JETSON_ORIN_NX'
JETSON_ORIN_NANO='JETSON_ORIN_NANO'
JETSON_MODELS = [JETSON_TX1, JETSON_TX2, CLARA_AGX_XAVIER, JETSON_TX2_NX, JETSON_XAVIER, JETSON_NANO, JETSON_NX, JETSON_ORIN, JETSON_ORIN_NX, JETSON_ORIN_NANO]
# These arrays contain tuples of all the relevant GPIO data for each Jetson
# Platform. The fields are:
# - Linux GPIO pin number (line offset inside chip, not global),
# - Linux exported GPIO name,
# (map from chip GPIO count to value, to cater for different naming schemes)
# (entries omitted if exported filename is gpio%i)
# - GPIO chip name/instance
# - Pin number (BOARD mode)
# - Pin number (BCM mode)
# - Pin name (CVM mode)
# - Pin name (TEGRA_SOC mode)
# - PWM chip sysfs directory
# - PWM ID within PWM chip
# The values are used to generate dictionaries that map the corresponding pin
# mode numbers to the Linux GPIO pin number and GPIO chip directory
JETSON_ORIN_NX_PIN_DEFS = [
(144, 'PAC.06', "tegra234-gpio", 7, 4, 'GPIO09', 'GP167', None, None),
(112, 'PR.04', "tegra234-gpio", 11, 17, 'UART1_RTS', 'GP72_UART1_RTS_N', None, None),
(50, 'PH.07', "tegra234-gpio", 12, 18, 'I2S0_SCLK', 'GP122', None, None),
(122, 'PY.00', "tegra234-gpio", 13, 27, 'SPI1_SCK', 'GP36_SPI3_CLK', None, None),
(85, 'PN.01', "tegra234-gpio", 15, 22, 'GPIO12', 'GP88_PWM1', '3280000.pwm', 0),
(126, 'PY.04', "tegra234-gpio", 16, 23, 'SPI1_CS1', 'GP40_SPI3_CS1_N', None, None),
(125, 'PY.03', "tegra234-gpio", 18, 24, 'SPI1_CS0', 'GP39_SPI3_CS0_N', None, None),
(135, 'PZ.05', "tegra234-gpio", 19, 10, 'SPI0_MOSI', 'GP49_SPI1_MOSI', None, None),
(134, 'PZ.04', "tegra234-gpio", 21, 9, 'SPI0_MISO', 'GP48_SPI1_MISO', None, None),
(123, 'PY.01', "tegra234-gpio", 22, 25, 'SPI1_MISO', 'GP37_SPI3_MISO', None, None),
(133, 'PZ.03', "tegra234-gpio", 23, 11, 'SPI0_SCK', 'GP47_SPI1_CLK', None, None),
(136, 'PZ.06', "tegra234-gpio", 24, 8, 'SPI0_CS0', 'GP50_SPI1_CS0_N', None, None),
(137, 'PZ.07', "tegra234-gpio", 26, 7, 'SPI0_CS1', 'GP51_SPI1_CS1_N', None, None),
(105, 'PQ.05', "tegra234-gpio", 29, 5, 'GPIO01', 'GP65', None, None),
(106, 'PQ.06', "tegra234-gpio", 31, 6, 'GPIO11', 'GP66', None, None),
(41, 'PG.06', "tegra234-gpio", 32, 12, 'GPIO07', 'GP113_PWM7', '32e0000.pwm', 0),
(43, 'PH.00', "tegra234-gpio", 33, 13, 'GPIO13', 'GP115', '32c0000.pwm', 0),
(53, 'PI.02', "tegra234-gpio", 35, 19, 'I2S0_FS', 'GP125', None, None),
(113, 'PR.05', "tegra234-gpio", 36, 16, 'UART1_CTS', 'GP73_UART1_CTS_N', None, None),
(124, 'PY.02', "tegra234-gpio", 37, 26, 'SPI1_MOSI', 'GP38_SPI3_MOSI', None, None),
(52, 'PI.01', "tegra234-gpio", 38, 20, 'I2S0_SDIN', 'GP124', None, None),
(51, 'PI.00', "tegra234-gpio", 40, 21, 'I2S0_SDOUT', 'GP123', None, None)
]
compats_jetson_orins_nx = (
"nvidia,p3509-0000+p3767-0000",
"nvidia,p3768-0000+p3767-0000",
"nvidia,p3509-0000+p3767-0001",
"nvidia,p3768-0000+p3767-0001",
)
compats_jetson_orins_nano = (
"nvidia,p3509-0000+p3767-0003",
"nvidia,p3768-0000+p3767-0003",
"nvidia,p3509-0000+p3767-0004",
"nvidia,p3768-0000+p3767-0004",
"nvidia,p3509-0000+p3767-0005",
"nvidia,p3768-0000+p3767-0005",
)
JETSON_ORIN_PIN_DEFS = [
(106, 'PQ.06', "tegra234-gpio", 7, 4, 'MCLK05', 'GP66', None, None),
# Output-only (due to base board)
(112, 'PR.04', "tegra234-gpio", 11, 17, 'UART1_RTS', 'GP72_UART1_RTS_N', None, None),
(50, 'PH.07', "tegra234-gpio", 12, 18, 'I2S2_CLK', 'GP122', None, None),
(108, 'PR.00', "tegra234-gpio", 13, 27, 'PWM01', 'GP68', None, None),
(85, 'PN.01', "tegra234-gpio", 15, 22, 'GPIO27', 'GP88_PWM1', '3280000.pwm', 0),
(9, 'PBB.01', "tegra234-gpio-aon", 16, 23, 'GPIO08', 'GP26', None, None),
(43, 'PH.00', "tegra234-gpio", 18, 24, 'GPIO35', 'GP115', '32c0000.pwm', 0),
(135, 'PZ.05', "tegra234-gpio", 19, 10, 'SPI1_MOSI', 'GP49_SPI1_MOSI', None, None),
(134, 'PZ.04', "tegra234-gpio", 21, 9, 'SPI1_MISO', 'GP48_SPI1_MISO', None, None),
(96, 'PP.04', "tegra234-gpio", 22, 25, 'GPIO17', 'GP56', None, None),
(133, 'PZ.03', "tegra234-gpio", 23, 11, 'SPI1_CLK', 'GP47_SPI1_CLK', None, None),
(136, 'PZ.06', "tegra234-gpio", 24, 8, 'SPI1_CS0_N', 'GP50_SPI1_CS0_N', None, None),
(137, 'PZ.07', "tegra234-gpio", 26, 7, 'SPI1_CS1_N', 'GP51_SPI1_CS1_N', None, None),
(1, 'PAA.01', "tegra234-gpio-aon", 29, 5, 'CAN0_DIN', 'GP18_CAN0_DIN', None, None),
(0, 'PAA.00', "tegra234-gpio-aon", 31, 6, 'CAN0_DOUT', 'GP17_CAN0_DOUT', None, None),
(8, 'PBB.00', "tegra234-gpio-aon", 32, 12, 'GPIO09', 'GP25', None, None),
(2, 'PAA.02', "tegra234-gpio-aon", 33, 13, 'CAN1_DOUT', 'GP19_CAN1_DOUT', None, None),
(53, 'PI.02', "tegra234-gpio", 35, 19, 'I2S2_FS', 'GP125', None, None),
(113, 'PR.05', "tegra234-gpio", 36, 16, 'UART1_CTS', 'GP73_UART1_CTS_N', None, None),
(3, 'PAA.03', "tegra234-gpio-aon", 37, 26, 'CAN1_DIN', 'GP20_CAN1_DIN', None, None),
(52, 'PI.01', "tegra234-gpio", 38, 20, 'I2S2_DIN', 'GP124', None, None),
(51, 'PI.00', "tegra234-gpio", 40, 21, 'I2S2_DOUT', 'GP123', None, None)
]
compats_jetson_orins = (
'nvidia,p3737-0000+p3701-0000',
'nvidia,p3737-0000+p3701-0004',
'nvidia,p3737-0000+p3701-0008',
'nvidia,p3737-0000+p3701-0005',
'nvidia,p3737-0000+p3701-0001',
)
CLARA_AGX_XAVIER_PIN_DEFS = [
(106, 'PQ.06', "tegra194-gpio", 7, 4, 'MCLK05', 'SOC_GPIO42', None, None),
(112, 'PR.04', "tegra194-gpio", 11, 17, 'UART1_RTS', 'UART1_RTS', None, None),
(51, 'PH.07', "tegra194-gpio", 12, 18, 'I2S2_CLK', 'DAP2_SCLK', None, None),
(96, 'PP.04', "tegra194-gpio", 13, 27, 'GPIO32', 'SOC_GPIO04', None, None),
# Older versions of L4T don't enable this PWM controller in DT, so this PWM
# channel may not be available.
(84, 'PN.01', "tegra194-gpio", 15, 22, 'GPIO27', 'SOC_GPIO54', '3280000.pwm', 0),
(8, 'PBB.00', "tegra194-gpio-aon", 16, 23, 'GPIO8', 'CAN1_STB', None, None),
(44, 'PH.00', "tegra194-gpio", 18, 24, 'GPIO35', 'SOC_GPIO12', '32c0000.pwm', 0),
(162, 'PZ.05', "tegra194-gpio", 19, 10, 'SPI1_MOSI', 'SPI1_MOSI', None, None),
(161, 'PZ.04', "tegra194-gpio", 21, 9, 'SPI1_MISO', 'SPI1_MISO', None, None),
(101, 'PQ.01', "tegra194-gpio", 22, 25, 'GPIO17', 'SOC_GPIO21', None, None),
(160, 'PZ.03', "tegra194-gpio", 23, 11, 'SPI1_CLK', 'SPI1_SCK', None, None),
(163, 'PZ.06', "tegra194-gpio", 24, 8, 'SPI1_CS0_N', 'SPI1_CS0_N', None, None),
(164, 'PZ.07', "tegra194-gpio", 26, 7, 'SPI1_CS1_N', 'SPI1_CS1_N', None, None),
(3, 'PAA.03', "tegra194-gpio-aon", 29, 5, 'CAN0_DIN', 'CAN0_DIN', None, None),
(2, 'PAA.02', "tegra194-gpio-aon", 31, 6, 'CAN0_DOUT', 'CAN0_DOUT', None, None),
(9, 'PBB.01', "tegra194-gpio-aon", 32, 12, 'GPIO9', 'CAN1_EN', None, None),
(0, 'PAA.00', "tegra194-gpio-aon", 33, 13, 'CAN1_DOUT', 'CAN1_DOUT', None, None),
(54, 'PI.02', "tegra194-gpio", 35, 19, 'I2S2_FS', 'DAP2_FS', None, None),
# Input-only (due to base board)
(113, 'PR.05', "tegra194-gpio", 36, 16, 'UART1_CTS', 'UART1_CTS', None, None),
(1, 'PAA.01', "tegra194-gpio-aon", 37, 26, 'CAN1_DIN', 'CAN1_DIN', None, None),
(53, 'PI.01', "tegra194-gpio", 38, 20, 'I2S2_DIN', 'DAP2_DIN', None, None),
(52, 'PI.00', "tegra194-gpio", 40, 21, 'I2S2_DOUT', 'DAP2_DOUT', None, None)
]
compats_clara_agx_xavier = (
'nvidia,e3900-0000+p2888-0004',
)
JETSON_NX_PIN_DEFS = [
(118, 'PS.04', "tegra194-gpio", 7, 4, 'GPIO09', 'AUD_MCLK', None, None),
(112, 'PR.04', "tegra194-gpio", 11, 17, 'UART1_RTS', 'UART1_RTS', None, None),
(127, 'PT.05', "tegra194-gpio", 12, 18, 'I2S0_SCLK', 'DAP5_SCLK', None, None),
(149, 'PY.00', "tegra194-gpio", 13, 27, 'SPI1_SCK', 'SPI3_SCK', None, None),
(16, 'PCC.04', "tegra194-gpio-aon", 15, 22, 'GPIO12', 'TOUCH_CLK', "c340000.pwm", 0),
(153, 'PY.04', "tegra194-gpio", 16, 23, 'SPI1_CS1', 'SPI3_CS1_N', None, None),
(152, 'PY.03', "tegra194-gpio", 18, 24, 'SPI1_CS0', 'SPI3_CS0_N', None, None),
(162, 'PZ.05', "tegra194-gpio", 19, 10, 'SPI0_MOSI', 'SPI1_MOSI', None, None),
(161, 'PZ.04', "tegra194-gpio", 21, 9, 'SPI0_MISO', 'SPI1_MISO', None, None),
(150, 'PY.01', "tegra194-gpio", 22, 25, 'SPI1_MISO', 'SPI3_MISO', None, None),
(160, 'PZ.03', "tegra194-gpio", 23, 11, 'SPI0_SCK', 'SPI1_SCK', None, None),
(163, 'PZ.06', "tegra194-gpio", 24, 8, 'SPI0_CS0', 'SPI1_CS0_N', None, None),
(164, 'PZ.07', "tegra194-gpio", 26, 7, 'SPI0_CS1', 'SPI1_CS1_N', None, None),
(105, 'PQ.05', "tegra194-gpio", 29, 5, 'GPIO01', 'SOC_GPIO41', None, None),
(106, 'PQ.06', "tegra194-gpio", 31, 6, 'GPIO11', 'SOC_GPIO42', None, None),
(108, 'PR.00', "tegra194-gpio", 32, 12, 'GPIO07', 'SOC_GPIO44', '32f0000.pwm', 0),
(84, 'PN.01', "tegra194-gpio", 33, 13, 'GPIO13', 'SOC_GPIO54', '3280000.pwm', 0),
(130, 'PU.00', "tegra194-gpio", 35, 19, 'I2S0_FS', 'DAP5_FS', None, None),
(113, 'PR.05', "tegra194-gpio", 36, 16, 'UART1_CTS', 'UART1_CTS', None, None),
(151, 'PY.02', "tegra194-gpio", 37, 26, 'SPI1_MOSI', 'SPI3_MOSI', None, None),
(129, 'PT.07', "tegra194-gpio", 38, 20, 'I2S0_DIN', 'DAP5_DIN', None, None),
(128, 'PT.06', "tegra194-gpio", 40, 21, 'I2S0_DOUT', 'DAP5_DOUT', None, None)
]
compats_nx = (
'nvidia,p3509-0000+p3668-0000',
'nvidia,p3509-0000+p3668-0001',
'nvidia,p3449-0000+p3668-0000',
'nvidia,p3449-0000+p3668-0001',
'nvidia,p3449-0000+p3668-0003',
)
JETSON_XAVIER_PIN_DEFS = [
(106, 'PQ.06', "tegra194-gpio", 7, 4, 'MCLK05', 'SOC_GPIO42', None, None),
(112, 'PR.04', "tegra194-gpio", 11, 17, 'UART1_RTS', 'UART1_RTS', None, None),
(51, 'PH.07', "tegra194-gpio", 12, 18, 'I2S2_CLK', 'DAP2_SCLK', None, None),
(108, 'PR.00', "tegra194-gpio", 13, 27, 'PWM01', 'SOC_GPIO44', '32f0000.pwm', 0),
# Older versions of L4T don'Pt enable this PWM controller in DT, so this PWM
# channel may not be available.
(84, 'PN.01', "tegra194-gpio", 15, 22, 'GPIO27', 'SOC_GPIO54', '3280000.pwm', 0),
(8, 'BB.00', "tegra194-gpio-aon", 16, 23, 'GPIO8', 'CAN1_STB', None, None),
(44, 'PH.00', "tegra194-gpio", 18, 24, 'GPIO35', 'SOC_GPIO12', '32c0000.pwm', 0),
(162, 'PZ.05', "tegra194-gpio", 19, 10, 'SPI1_MOSI', 'SPI1_MOSI', None, None),
(161, 'PZ.04', "tegra194-gpio", 21, 9, 'SPI1_MISO', 'SPI1_MISO', None, None),
(101, 'PQ.01', "tegra194-gpio", 22, 25, 'GPIO17', 'SOC_GPIO21', None, None),
(160, 'PZ.03', "tegra194-gpio", 23, 11, 'SPI1_CLK', 'SPI1_SCK', None, None),
(163, 'PZ.06', "tegra194-gpio", 24, 8, 'SPI1_CS0_N', 'SPI1_CS0_N', None, None),
(164, 'PZ.07', "tegra194-gpio", 26, 7, 'SPI1_CS1_N', 'SPI1_CS1_N', None, None),
(3, 'PAA.03', "tegra194-gpio-aon", 29, 5, 'CAN0_DIN', 'CAN0_DIN', None, None),
(2, 'PAA.02', "tegra194-gpio-aon", 31, 6, 'CAN0_DOUT', 'CAN0_DOUT', None, None),
(9, 'PBB.01', "tegra194-gpio-aon", 32, 12, 'GPIO9', 'CAN1_EN', None, None),
(0, 'PAA.00', "tegra194-gpio-aon", 33, 13, 'CAN1_DOUT', 'CAN1_DOUT', None, None),
(54, 'PI.02', "tegra194-gpio", 35, 19, 'I2S2_FS', 'DAP2_FS', None, None),
# Input-only (due to base board)
(113, 'PR.05', "tegra194-gpio", 36, 16, 'UART1_CTS', 'UART1_CTS', None, None),
(1, 'PAA.01', "tegra194-gpio-aon", 37, 26, 'CAN1_DIN', 'CAN1_DIN', None, None),
(53, 'PI.01', "tegra194-gpio", 38, 20, 'I2S2_DIN', 'DAP2_DIN', None, None),
(52, 'PI.00', "tegra194-gpio", 40, 21, 'I2S2_DOUT', 'DAP2_DOUT', None, None)
]
compats_xavier = (
'nvidia,p2972-0000',
'nvidia,p2972-0006',
'nvidia,jetson-xavier',
'nvidia,galen-industrial',
'nvidia,jetson-xavier-industrial',
)
JETSON_TX2_NX_PIN_DEFS = [
(76, 'PJ.04', "tegra-gpio", 7, 4, 'GPIO09', 'AUD_MCLK', None, None),
(28, 'PW.04', "tegra-gpio-aon", 11, 17, 'UART1_RTS', 'UART3_RTS', None, None),
(72, 'PJ.00', "tegra-gpio", 12, 18, 'I2S0_SCLK', 'DAP1_SCLK', None, None),
(17, 'PV.01', "tegra-gpio-aon", 13, 27, 'SPI1_SCK', 'GPIO_SEN1', None, None),
(18, 'PC.02', "tegra-gpio", 15, 22, 'GPIO12', 'DAP2_DOUT', None, None),
(19, 'PC.03', "tegra-gpio", 16, 23, 'SPI1_CS1', 'DAP2_DIN', None, None),
(20, 'PV.04', "tegra-gpio-aon", 18, 24, 'SPI1_CS0', 'GPIO_SEN4', None, None),
(58, 'PH.02', "tegra-gpio", 19, 10, 'SPI0_MOSI', 'GPIO_WAN7', None, None),
(57, 'PH.01', "tegra-gpio", 21, 9, 'SPI0_MISO', 'GPIO_WAN6', None, None),
(18, 'PV.02', "tegra-gpio-aon", 22, 25, 'SPI1_MISO', 'GPIO_SEN2', None, None),
(56, 'PH.00', "tegra-gpio", 23, 11, 'SPI1_CLK', 'GPIO_WAN5', None, None),
(59, 'PH.03', "tegra-gpio", 24, 8, 'SPI0_CS0', 'GPIO_WAN8', None, None),
(163, 'PY.03', "tegra-gpio", 26, 7, 'SPI0_CS1', 'GPIO_MDM4', None, None),
(105, 'PN.01', "tegra-gpio", 29, 5, 'GPIO01', 'GPIO_CAM2', None, None),
(50, 'PEE.02', "tegra-gpio-aon", 31, 6, 'GPIO11', 'TOUCH_CLK', None, None),
(8, 'PU.00', "tegra-gpio-aon", 32, 12, 'GPIO07', 'GPIO_DIS0', '3280000.pwm', 0),
(13, 'PU.05', "tegra-gpio-aon", 33, 13, 'GPIO13', 'GPIO_DIS5', '32a0000.pwm', 0),
(75, 'PJ.03', "tegra-gpio", 35, 19, 'I2S0_FS', 'DAP1_FS', None, None),
(29, 'PW.05', "tegra-gpio-aon", 36, 16, 'UART1_CTS', 'UART3_CTS', None, None),
(19, 'PV.03', "tegra-gpio-aon", 37, 26, 'SPI1_MOSI', 'GPIO_SEN3', None, None),
(74, 'PJ.02', "tegra-gpio", 38, 20, 'I2S0_DIN', 'DAP1_DIN', None, None),
(73, 'PJ.01', "tegra-gpio", 40, 21, 'I2S0_DOUT', 'DAP1_DOUT', None, None)
]
compats_tx2_nx = (
'nvidia,p3509-0000+p3636-0001',
)
JETSON_TX2_PIN_DEFS = [
(76, 'PJ.04', "tegra-gpio", 7, 4, 'PAUDIO_MCLK', 'AUD_MCLK', None, None),
# Output-only (due to base board)
(146, 'PT.02', "tegra-gpio", 11, 17, 'PUART0_RTS', 'UART1_RTS', None, None),
(72, 'PJ.00', "tegra-gpio", 12, 18, 'PI2S0_CLK', 'DAP1_SCLK', None, None),
(77, 'PJ.05', "tegra-gpio", 13, 27, 'PGPIO20_AUD_INT', 'GPIO_AUD0', None, None),
(15, 'GPIO_EXP_P16', "tca9539", 15, 22, 'GPIO_EXP_P17', 'GPIO_EXP_P17', None, None),
# Input-only (due to module):
(40, 'PAA.00', "tegra-gpio-aon", 16, 23, 'AO_DMIC_IN_DAT', 'CAN_GPIO0', None, None),
(161, 'PY.01', "tegra-gpio", 18, 24, 'GPIO16_MDM_WAKE_AP', 'GPIO_MDM2', None, None),
(109, 'PN.05', "tegra-gpio", 19, 10, 'SPI1_MOSI', 'GPIO_CAM6', None, None),
(108, 'PN.04', "tegra-gpio", 21, 9, 'SPI1_MISO', 'GPIO_CAM5', None, None),
(14, 'GPIO_EXP_P16', "tca9539", 22, 25, 'GPIO_EXP_P16', 'GPIO_EXP_P16', None, None),
(107, 'PN.03', "tegra-gpio", 23, 11, 'SPI1_CLK', 'GPIO_CAM4', None, None),
(110, 'PN.06', "tegra-gpio", 24, 8, 'SPI1_CS0', 'GPIO_CAM7', None, None),
# Board pin 26 is not available on this board
(78, 'PJ.06', "tegra-gpio", 29, 5, 'GPIO19_AUD_RST', 'GPIO_AUD1', None, None),
(42, 'PAA.02', "tegra-gpio-aon", 31, 6, 'GPIO9_MOTION_INT', 'CAN_GPIO2', None, None),
# Output-only (due to module):
(41, 'PAA.01', "tegra-gpio-aon", 32, 12, 'AO_DMIC_IN_CLK', 'CAN_GPIO1', None, None),
(69, 'PI.05', "tegra-gpio", 33, 13, 'GPIO11_AP_WAKE_BT', 'GPIO_PQ5', None, None),
(75, 'PJ.03', "tegra-gpio", 35, 19, 'I2S0_LRCLK', 'DAP1_FS', None, None),
# Input-only (due to base board) IF NVIDIA debug card NOT plugged in
# Output-only (due to base board) IF NVIDIA debug card plugged in
(147, 'PT.03', "tegra-gpio", 36, 16, 'UART0_CTS', 'UART1_CTS', None, None),
(68, 'PI.04', "tegra-gpio", 37, 26, 'GPIO8_ALS_PROX_INT', 'GPIO_PQ4', None, None),
(74, 'PJ.02', "tegra-gpio", 38, 20, 'I2S0_SDIN', 'DAP1_DIN', None, None),
(73, 'PJ.01', "tegra-gpio", 40, 21, 'I2S0_SDOUT', 'DAP1_DOUT', None, None)
]
compats_tx2 = (
'nvidia,p2771-0000',
'nvidia,p2771-0888',
'nvidia,p3489-0000',
'nvidia,lightning',
'nvidia,quill',
'nvidia,storm',
)
JETSON_TX1_PIN_DEFS = [
(216, '', "tegra-gpio", 7, 4, 'AUDIO_MCLK', 'AUD_MCLK', None, None),
# Output-only (due to base board)
(162, '', "tegra-gpio", 11, 17, 'UART0_RTS', 'UART1_RTS', None, None),
(11, '', "tegra-gpio", 12, 18, 'I2S0_CLK', 'DAP1_SCLK', None, None),
(38, '', "tegra-gpio", 13, 27, 'GPIO20_AUD_INT', 'GPIO_PE6', None, None),
(15, '', "tca9539", 15, 22, 'GPIO_EXP_P17', 'GPIO_EXP_P17', None, None),
(37, '', "tegra-gpio", 16, 23, 'AO_DMIC_IN_DAT', 'DMIC3_DAT', None, None),
(184, '', "tegra-gpio", 18, 24, 'GPIO16_MDM_WAKE_AP', 'MODEM_WAKE_AP', None, None),
(16, '', "tegra-gpio", 19, 10, 'SPI1_MOSI', 'SPI1_MOSI', None, None),
(17, '', "tegra-gpio", 21, 9, 'SPI1_MISO', 'SPI1_MISO', None, None),
(14, '', "tca9539", 22, 25, 'GPIO_EXP_P16', 'GPIO_EXP_P16', None, None),
(18, '', "tegra-gpio", 23, 11, 'SPI1_CLK', 'SPI1_SCK', None, None),
(19, '', "tegra-gpio", 24, 8, 'SPI1_CS0', 'SPI1_CS0', None, None),
(20, '', "tegra-gpio", 26, 7, 'SPI1_CS1', 'SPI1_CS1', None, None),
(219, '', "tegra-gpio", 29, 5, 'GPIO19_AUD_RST', 'GPIO_X1_AUD', None, None),
(186, '', "tegra-gpio", 31, 6, 'GPIO9_MOTION_INT', 'MOTION_INT', None, None),
(36, '', "tegra-gpio", 32, 12, 'AO_DMIC_IN_CLK', 'DMIC3_CLK', None, None),
(63, '', "tegra-gpio", 33, 13, 'GPIO11_AP_WAKE_BT', 'AP_WAKE_NFC', None, None),
(8, '', "tegra-gpio", 35, 19, 'I2S0_LRCLK', 'DAP1_FS', None, None),
# Input-only (due to base board) IF NVIDIA debug card NOT plugged in
# Input-only (due to base board) (always reads fixed value) IF NVIDIA debug card plugged in
(163, '', "tegra-gpio", 36, 16, 'UART0_CTS', 'UART1_CTS', None, None),
(187, '', "tegra-gpio", 37, 26, 'GPIO8_ALS_PROX_INT', 'ALS_PROX_INT', None, None),
(9, '', "tegra-gpio", 38, 20, 'I2S0_SDIN', 'DAP1_DIN', None, None),
(10, '', "tegra-gpio", 40, 21, 'I2S0_SDOUT', 'DAP1_DOUT', None, None)
]
compats_tx1 = (
'nvidia,p2371-2180',
'nvidia,jetson-cv',
)
JETSON_NANO_PIN_DEFS = [
(216, '', "tegra-gpio", 7, 4, 'GPIO9', 'AUD_MCLK', None, None),
(50, '', "tegra-gpio", 11, 17, 'UART1_RTS', 'UART2_RTS', None, None),
(79, '', "tegra-gpio", 12, 18, 'I2S0_SCLK', 'DAP4_SCLK', None, None),
(14, '', "tegra-gpio", 13, 27, 'SPI1_SCK', 'SPI2_SCK', None, None),
(194, '', "tegra-gpio", 15, 22, 'GPIO12', 'LCD_TE', None, None),
(232, '', "tegra-gpio", 16, 23, 'SPI1_CS1', 'SPI2_CS1', None, None),
(15, '', "tegra-gpio", 18, 24, 'SPI1_CS0', 'SPI2_CS0', None, None),
(16, '', "tegra-gpio", 19, 10, 'SPI0_MOSI', 'SPI1_MOSI', None, None),
(17, '', "tegra-gpio", 21, 9, 'SPI0_MISO', 'SPI1_MISO', None, None),
(13, '', "tegra-gpio", 22, 25, 'SPI1_MISO', 'SPI2_MISO', None, None),
(18, '', "tegra-gpio", 23, 11, 'SPI0_SCK', 'SPI1_SCK', None, None),
(19, '', "tegra-gpio", 24, 8, 'SPI0_CS0', 'SPI1_CS0', None, None),
(20, '', "tegra-gpio", 26, 7, 'SPI0_CS1', 'SPI1_CS1', None, None),
(149, '', "tegra-gpio", 29, 5, 'GPIO01', 'CAM_AF_EN', None, None),
(200, '', "tegra-gpio", 31, 6, 'GPIO11', 'GPIO_PZ0', None, None),
# Older versions of L4T have a DT bug which instantiates a bogus device
# which prevents this library from using this PWM channel.
(168, '', "tegra-gpio", 32, 12, 'GPIO07', 'LCD_BL_PW', '7000a000.pwm', 0),
(38, '', "tegra-gpio", 33, 13, 'GPIO13', 'GPIO_PE6', '7000a000.pwm', 2),
(76, '', "tegra-gpio", 35, 19, 'I2S0_FS', 'DAP4_FS', None, None),
(51, '', "tegra-gpio", 36, 16, 'UART1_CTS', 'UART2_CTS', None, None),
(12, '', "tegra-gpio", 37, 26, 'SPI1_MOSI', 'SPI2_MOSI', None, None),
(77, '', "tegra-gpio", 38, 20, 'I2S0_DIN', 'DAP4_DIN', None, None),
(78, '', "tegra-gpio", 40, 21, 'I2S0_DOUT', 'DAP4_DOUT', None, None)
]
compats_nano = (
'nvidia,p3450-0000',
'nvidia,p3450-0002',
'nvidia,jetson-nano',
)
jetson_gpio_data = {
JETSON_ORIN_NX: (
JETSON_ORIN_NX_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '32768M, 65536M',
'REVISION': 'Unknown',
'TYPE': 'JETSON_ORIN_NX',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'A78AE'
}
),
JETSON_ORIN_NANO: (
JETSON_ORIN_NX_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '32768M, 65536M',
'REVISION': 'Unknown',
'TYPE': 'JETSON_ORIN_NANO',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'A78AE'
}
),
JETSON_ORIN: (
JETSON_ORIN_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '32768M, 65536M',
'REVISION': 'Unknown',
'TYPE': 'JETSON_ORIN',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'A78AE'
}
),
CLARA_AGX_XAVIER: (
CLARA_AGX_XAVIER_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '16384M',
'REVISION': 'Unknown',
'TYPE': 'CLARA_AGX_XAVIER',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'ARM Carmel'
}
),
JETSON_NX: (
JETSON_NX_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '16384M, 8192M',
'REVISION': 'Unknown',
'TYPE': 'Jetson NX',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'ARM Carmel'
}
),
JETSON_XAVIER: (
JETSON_XAVIER_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '65536M, 32768M, 16384M, 8192M',
'REVISION': 'Unknown',
'TYPE': 'Jetson Xavier',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'ARM Carmel'
}
),
JETSON_TX2_NX: (
JETSON_TX2_NX_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '4096M',
'REVISION': 'Unknown',
'TYPE': 'Jetson TX2 NX',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'ARM A57 + Denver'
}
),
JETSON_TX2: (
JETSON_TX2_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '8192M, 4096M',
'REVISION': 'Unknown',
'TYPE': 'Jetson TX2',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'ARM A57 + Denver'
}
),
JETSON_TX1: (
JETSON_TX1_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '4096M',
'REVISION': 'Unknown',
'TYPE': 'Jetson TX1',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'ARM A57'
}
),
JETSON_NANO: (
JETSON_NANO_PIN_DEFS,
{
'P1_REVISION': 1,
'RAM': '4096M, 2048M',
'REVISION': 'Unknown',
'TYPE': 'Jetson Nano',
'MANUFACTURER': 'NVIDIA',
'PROCESSOR': 'ARM A57'
}
),
}
class ChannelInfo(object):
# @channel the pin number in specified mode (board or bcm)
# @chip_fd the file descriptor of the chip
# @line_handle the file descriptor of the line
# @line_offset Linux GPIO pin number (line offset inside chip, not global)
# @direction the direction of a pin is configured (in or out)
# @edge rising and/or falling edge being monitored
# @consumer consumer label
# @gpio_name Linux exported GPIO name
# @gpio_chip GPIO chip name/instance
def __init__(self, channel, line_offset, gpio_name, gpio_chip, pwm_chip_dir, pwm_id):
self.channel = channel
self.chip_fd = None
self.line_handle = None
self.line_offset = line_offset
self.direction = None
self.edge = None
self.consumer = "Jetson-gpio"
self.gpio_name = gpio_name
self.gpio_chip = gpio_chip
self.pwm_chip_dir = pwm_chip_dir
self.pwm_id = pwm_id
ids_warned = False
def find_pmgr_board(prefix):
global ids_warned
ids_path = '/proc/device-tree/chosen/plugin-manager/ids'
ids_path_k510 = '/proc/device-tree/chosen/ids'
if os.path.exists(ids_path):
for f in os.listdir(ids_path):
if f.startswith(prefix):
return f
elif os.path.exists(ids_path_k510):
with open(ids_path_k510, 'r') as f:
ids = f.read()
for s in ids.split():
if s.startswith(prefix):
return s
else:
if not ids_warned:
ids_warned = True
msg = """\
WARNING: Plugin manager information missing from device tree.
WARNING: Cannot determine whether the expected Jetson board is present.
"""
sys.stderr.write(msg)
return None
def warn_if_not_carrier_board(*carrier_boards):
found = False
for b in carrier_boards:
found = find_pmgr_board(b + '-')
if found:
break
if not found:
msg = """\
WARNING: Carrier board is not from a Jetson Developer Kit.
WARNNIG: Jetson.GPIO library has not been verified with this carrier board,
WARNING: and in fact is unlikely to work correctly.
"""
sys.stderr.write(msg)
def get_compatibles(compatible_path):
with open(compatible_path, 'r') as f:
compatibles = f.read().split('\x00')
return compatibles
def get_model():
compatible_path = '/proc/device-tree/compatible'
# get model info from compatible_path
if os.path.exists(compatible_path):
compatibles = get_compatibles(compatible_path)
def matches(vals):
return any(v in compatibles for v in vals)
if matches(compats_tx1):
warn_if_not_carrier_board('2597')
return JETSON_TX1
elif matches(compats_tx2):
warn_if_not_carrier_board('2597')
return JETSON_TX2
elif matches(compats_clara_agx_xavier):
warn_if_not_carrier_board('3900')
return CLARA_AGX_XAVIER
elif matches(compats_tx2_nx):
warn_if_not_carrier_board('3509')
return JETSON_TX2_NX
elif matches(compats_xavier):
warn_if_not_carrier_board('2822')
return JETSON_XAVIER
elif matches(compats_nano):
module_id = find_pmgr_board('3448')
if not module_id:
raise Exception('Could not determine Jetson Nano module revision')
revision = module_id.split('-')[-1]
# Revision is an ordered string, not a decimal integer
if revision < "200":
raise Exception('Jetson Nano module revision must be A02 or later')
warn_if_not_carrier_board('3449', '3542')
return JETSON_NANO
elif matches(compats_nx):
warn_if_not_carrier_board('3509', '3449')
return JETSON_NX
elif matches(compats_jetson_orins):
warn_if_not_carrier_board('3737')
return JETSON_ORIN
elif matches(compats_jetson_orins_nx):
warn_if_not_carrier_board('3509', '3768')
return JETSON_ORIN_NX
elif matches(compats_jetson_orins_nano):
warn_if_not_carrier_board('3509', '3768')
return JETSON_ORIN_NANO
# get model info from the environment variables for docker containers
model_name = os.environ.get("JETSON_MODEL_NAME")
if model_name is not None:
model_name = model_name.strip()
if model_name in JETSON_MODELS:
return model_name
else:
msg = f"Environment variable 'JETSON_MODEL_NAME={model_name}' is invalid."
sys.stderr.write(msg)
raise Exception('Could not determine Jetson model')
# @brief Retrieve all the data before connecting to any ports
# @param[out] model: model number of an Jetson platform
# @param[out] jetson_info:
# @param[out] channel_info: the information related to pin/line, in
def get_data():
model = get_model()
pin_defs, jetson_info = jetson_gpio_data[model]
gpio_chip_dirs = {}
gpio_chip_base = {}
gpio_chip_ngpio = {}
pwm_dirs = {}
sysfs_prefixes = ['/sys/devices/', '/sys/devices/platform/', '/sys/bus/platform/devices/']
pwm_chip_names = set([x[7] for x in pin_defs if x[7] is not None])
for pwm_chip_name in pwm_chip_names:
pwm_chip_dir = None
for prefix in sysfs_prefixes:
d = prefix + pwm_chip_name
if os.path.isdir(d):
pwm_chip_dir = d
break
# Some PWM controllers aren't enabled in all versions of the DT. In
# this case, just hide the PWM function on this pin, but let all other
# aspects of the library continue to work.
if pwm_chip_dir is None:
continue
pwm_chip_pwm_dir = pwm_chip_dir + '/pwm'
if not os.path.exists(pwm_chip_pwm_dir):
continue
for fn in os.listdir(pwm_chip_pwm_dir):
if not fn.startswith('pwmchip'):
continue
pwm_chip_pwm_pwmchipn_dir = pwm_chip_pwm_dir + '/' + fn
pwm_dirs[pwm_chip_name] = pwm_chip_pwm_pwmchipn_dir
break
def model_data(key_col, pin_defs):
return {x[key_col]: ChannelInfo(
x[key_col],
x[0],
x[1],
x[2],
pwm_chip_dir=pwm_dirs.get(x[7], None),
pwm_id=x[8]) for x in pin_defs}
channel_data = {
'BOARD': model_data(3, pin_defs),
'BCM': model_data(4, pin_defs),
'CVM': model_data(5, pin_defs),
'TEGRA_SOC': model_data(6, pin_defs),
}
return model, jetson_info, channel_data
|
jetson-gpio-master
|
lib/python/Jetson/GPIO/gpio_pin_data.py
|
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
import codecs
import importlib.util
import os
import subprocess
from distutils import cmd as distutils_cmd
from distutils import log as distutils_log
from itertools import chain
import setuptools
spec = importlib.util.spec_from_file_location('package_info', 'nemo/package_info.py')
package_info = importlib.util.module_from_spec(spec)
spec.loader.exec_module(package_info)
__contact_emails__ = package_info.__contact_emails__
__contact_names__ = package_info.__contact_names__
__description__ = package_info.__description__
__download_url__ = package_info.__download_url__
__homepage__ = package_info.__homepage__
__keywords__ = package_info.__keywords__
__license__ = package_info.__license__
__package_name__ = package_info.__package_name__
__repository_url__ = package_info.__repository_url__
__version__ = package_info.__version__
if os.path.exists('nemo/README.md'):
with open("nemo/README.md", "r", encoding='utf-8') as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
elif os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), 'r', encoding='utf-8',
).read()
long_description_content_type = "text/x-rst"
else:
long_description = 'See ' + __homepage__
long_description_content_type = "text/plain"
###############################################################################
# Dependency Loading #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
def req_file(filename, folder="requirements"):
with open(os.path.join(folder, filename), encoding='utf-8') as f:
content = f.readlines()
# you may also want to remove whitespace characters
# Example: `\n` at the end of each line
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
extras_require = {
# User packages
'test': req_file("requirements_test.txt"),
# Lightning Collections Packages
'core': req_file("requirements_lightning.txt"),
'common': req_file('requirements_common.txt'),
# domain packages
'asr': req_file("requirements_asr.txt"),
'nlp': req_file("requirements_nlp.txt"),
'tts': req_file("requirements_tts.txt"),
'slu': req_file("requirements_slu.txt"),
}
extras_require['all'] = list(chain(extras_require.values()))
# Add lightning requirements as needed
extras_require['common'] = list(chain([extras_require['common'], extras_require['core']]))
extras_require['test'] = list(chain([extras_require['tts'], extras_require['core'], extras_require['common'],]))
extras_require['asr'] = list(chain([extras_require['asr'], extras_require['core'], extras_require['common']]))
extras_require['nlp'] = list(chain([extras_require['nlp'], extras_require['core'], extras_require['common'],]))
extras_require['tts'] = list(chain([extras_require['tts'], extras_require['core'], extras_require['common'],]))
# TTS has extra dependencies
extras_require['tts'] = list(chain([extras_require['tts'], extras_require['asr']]))
extras_require['slu'] = list(chain([extras_require['slu'], extras_require['asr']]))
###############################################################################
# Code style checkers #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
class StyleCommand(distutils_cmd.Command):
__ISORT_BASE = 'isort'
__BLACK_BASE = 'black'
description = 'Checks overall project code style.'
user_options = [
('scope=', None, 'Folder of file to operate within.'),
('fix', None, 'True if tries to fix issues in-place.'),
]
def __call_checker(self, base_command, scope, check):
command = list(base_command)
command.append(scope)
if check:
command.extend(['--check', '--diff'])
self.announce(
msg='Running command: %s' % str(' '.join(command)), level=distutils_log.INFO,
)
return_code = subprocess.call(command)
return return_code
def _isort(self, scope, check):
return self.__call_checker(base_command=self.__ISORT_BASE.split(), scope=scope, check=check,)
def _black(self, scope, check):
return self.__call_checker(base_command=self.__BLACK_BASE.split(), scope=scope, check=check,)
def _pass(self):
self.announce(msg='\033[32mPASS\x1b[0m', level=distutils_log.INFO)
def _fail(self):
self.announce(msg='\033[31mFAIL\x1b[0m', level=distutils_log.INFO)
# noinspection PyAttributeOutsideInit
def initialize_options(self):
self.scope = '.'
self.fix = ''
def run(self):
scope, check = self.scope, not self.fix
isort_return = self._isort(scope=scope, check=check)
black_return = self._black(scope=scope, check=check)
if isort_return == 0 and black_return == 0:
self._pass()
else:
self._fail()
exit(isort_return if isort_return != 0 else black_return)
def finalize_options(self):
pass
###############################################################################
setuptools.setup(
name=__package_name__,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=__description__,
long_description=long_description,
long_description_content_type=long_description_content_type,
# The project's main homepage.
url=__repository_url__,
download_url=__download_url__,
# Author details
author=__contact_names__,
author_email=__contact_emails__,
# maintainer Details
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
# The licence under which the project is released
license=__license__,
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
# Indicate what your project relates to
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Supported python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
# Additional Setting
'Environment :: Console',
'Natural Language :: English',
'Operating System :: OS Independent',
],
packages=setuptools.find_packages(),
install_requires=install_requires,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# $ pip install -e ".[all]"
# $ pip install nemo_toolkit[all]
extras_require=extras_require,
# Add in any packaged data.
include_package_data=True,
exclude=['tools', 'tests'],
package_data={'': ['*.tsv', '*.txt', '*.far', '*.fst', '*.cpp', 'Makefile']},
zip_safe=False,
# PyPI package information.
keywords=__keywords__,
# Custom commands.
cmdclass={'style': StyleCommand},
)
|
NeMo-main
|
setup.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
import os
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import List, Optional
import torch
from omegaconf import OmegaConf
from utils.data_prep import (
add_t_start_end_to_utt_obj,
get_batch_starts_ends,
get_batch_variables,
get_manifest_lines_batch,
is_entry_in_all_lines,
is_entry_in_any_lines,
)
from utils.make_ass_files import make_ass_files
from utils.make_ctm_files import make_ctm_files
from utils.make_output_manifest import write_manifest_out_line
from utils.viterbi_decoding import viterbi_decoding
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
from nemo.collections.asr.models.hybrid_rnnt_ctc_models import EncDecHybridRNNTCTCModel
from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchASR
from nemo.collections.asr.parts.utils.transcribe_utils import setup_model
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
Align the utterances in manifest_filepath.
Results are saved in ctm files in output_dir.
Arguments:
pretrained_name: string specifying the name of a CTC NeMo ASR model which will be automatically downloaded
from NGC and used for generating the log-probs which we will use to do alignment.
Note: NFA can only use CTC models (not Transducer models) at the moment.
model_path: string specifying the local filepath to a CTC NeMo ASR model which will be used to generate the
log-probs which we will use to do alignment.
Note: NFA can only use CTC models (not Transducer models) at the moment.
Note: if a model_path is provided, it will override the pretrained_name.
manifest_filepath: filepath to the manifest of the data you want to align,
containing 'audio_filepath' and 'text' fields.
output_dir: the folder where output CTM files and new JSON manifest will be saved.
align_using_pred_text: if True, will transcribe the audio using the specified model and then use that transcription
as the reference text for the forced alignment.
transcribe_device: None, or a string specifying the device that will be used for generating log-probs (i.e. "transcribing").
The string needs to be in a format recognized by torch.device(). If None, NFA will set it to 'cuda' if it is available
(otherwise will set it to 'cpu').
viterbi_device: None, or string specifying the device that will be used for doing Viterbi decoding.
The string needs to be in a format recognized by torch.device(). If None, NFA will set it to 'cuda' if it is available
(otherwise will set it to 'cpu').
batch_size: int specifying batch size that will be used for generating log-probs and doing Viterbi decoding.
use_local_attention: boolean flag specifying whether to try to use local attention for the ASR Model (will only
work if the ASR Model is a Conformer model). If local attention is used, we will set the local attention context
size to [64,64].
additional_segment_grouping_separator: an optional string used to separate the text into smaller segments.
If this is not specified, then the whole text will be treated as a single segment.
remove_blank_tokens_from_ctm: a boolean denoting whether to remove <blank> tokens from token-level output CTMs.
audio_filepath_parts_in_utt_id: int specifying how many of the 'parts' of the audio_filepath
we will use (starting from the final part of the audio_filepath) to determine the
utt_id that will be used in the CTM files. Note also that any spaces that are present in the audio_filepath
will be replaced with dashes, so as not to change the number of space-separated elements in the
CTM files.
e.g. if audio_filepath is "/a/b/c/d/e 1.wav" and audio_filepath_parts_in_utt_id is 1 => utt_id will be "e1"
e.g. if audio_filepath is "/a/b/c/d/e 1.wav" and audio_filepath_parts_in_utt_id is 2 => utt_id will be "d_e1"
e.g. if audio_filepath is "/a/b/c/d/e 1.wav" and audio_filepath_parts_in_utt_id is 3 => utt_id will be "c_d_e1"
use_buffered_infer: False, if set True, using streaming to do get the logits for alignment
This flag is useful when aligning large audio file.
However, currently the chunk streaming inference does not support batch inference,
which means even you set batch_size > 1, it will only infer one by one instead of doing
the whole batch inference together.
chunk_len_in_secs: float chunk length in seconds
total_buffer_in_secs: float Length of buffer (chunk + left and right padding) in seconds
chunk_batch_size: int batch size for buffered chunk inference,
which will cut one audio into segments and do inference on chunk_batch_size segments at a time
simulate_cache_aware_streaming: False, if set True, using cache aware streaming to do get the logits for alignment
save_output_file_formats: List of strings specifying what type of output files to save (default: ["ctm", "ass"])
ctm_file_config: CTMFileConfig to specify the configuration of the output CTM files
ass_file_config: ASSFileConfig to specify the configuration of the output ASS files
"""
@dataclass
class CTMFileConfig:
remove_blank_tokens: bool = False
# minimum duration (in seconds) for timestamps in the CTM.If any line in the CTM has a
# duration lower than this, it will be enlarged from the middle outwards until it
# meets the minimum_timestamp_duration, or reaches the beginning or end of the audio file.
# Note that this may cause timestamps to overlap.
minimum_timestamp_duration: float = 0
@dataclass
class ASSFileConfig:
fontsize: int = 20
vertical_alignment: str = "center"
# if resegment_text_to_fill_space is True, the ASS files will use new segments
# such that each segment will not take up more than (approximately) max_lines_per_segment
# when the ASS file is applied to a video
resegment_text_to_fill_space: bool = False
max_lines_per_segment: int = 2
text_already_spoken_rgb: List[int] = field(default_factory=lambda: [49, 46, 61]) # dark gray
text_being_spoken_rgb: List[int] = field(default_factory=lambda: [57, 171, 9]) # dark green
text_not_yet_spoken_rgb: List[int] = field(default_factory=lambda: [194, 193, 199]) # light gray
@dataclass
class AlignmentConfig:
# Required configs
pretrained_name: Optional[str] = None
model_path: Optional[str] = None
manifest_filepath: Optional[str] = None
output_dir: Optional[str] = None
# General configs
align_using_pred_text: bool = False
transcribe_device: Optional[str] = None
viterbi_device: Optional[str] = None
batch_size: int = 1
use_local_attention: bool = True
additional_segment_grouping_separator: Optional[str] = None
audio_filepath_parts_in_utt_id: int = 1
# Buffered chunked streaming configs
use_buffered_chunked_streaming: bool = False
chunk_len_in_secs: float = 1.6
total_buffer_in_secs: float = 4.0
chunk_batch_size: int = 32
# Cache aware streaming configs
simulate_cache_aware_streaming: Optional[bool] = False
# Output file configs
save_output_file_formats: List[str] = field(default_factory=lambda: ["ctm", "ass"])
ctm_file_config: CTMFileConfig = CTMFileConfig()
ass_file_config: ASSFileConfig = ASSFileConfig()
@hydra_runner(config_name="AlignmentConfig", schema=AlignmentConfig)
def main(cfg: AlignmentConfig):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
# Validate config
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None")
if cfg.model_path is not None and cfg.pretrained_name is not None:
raise ValueError("One of cfg.model_path and cfg.pretrained_name must be None")
if cfg.manifest_filepath is None:
raise ValueError("cfg.manifest_filepath must be specified")
if cfg.output_dir is None:
raise ValueError("cfg.output_dir must be specified")
if cfg.batch_size < 1:
raise ValueError("cfg.batch_size cannot be zero or a negative number")
if cfg.additional_segment_grouping_separator == "" or cfg.additional_segment_grouping_separator == " ":
raise ValueError("cfg.additional_grouping_separator cannot be empty string or space character")
if cfg.ctm_file_config.minimum_timestamp_duration < 0:
raise ValueError("cfg.minimum_timestamp_duration cannot be a negative number")
if cfg.ass_file_config.vertical_alignment not in ["top", "center", "bottom"]:
raise ValueError("cfg.ass_file_config.vertical_alignment must be one of 'top', 'center' or 'bottom'")
for rgb_list in [
cfg.ass_file_config.text_already_spoken_rgb,
cfg.ass_file_config.text_already_spoken_rgb,
cfg.ass_file_config.text_already_spoken_rgb,
]:
if len(rgb_list) != 3:
raise ValueError(
"cfg.ass_file_config.text_already_spoken_rgb,"
" cfg.ass_file_config.text_being_spoken_rgb,"
" and cfg.ass_file_config.text_already_spoken_rgb all need to contain"
" exactly 3 elements."
)
# Validate manifest contents
if not is_entry_in_all_lines(cfg.manifest_filepath, "audio_filepath"):
raise RuntimeError(
"At least one line in cfg.manifest_filepath does not contain an 'audio_filepath' entry. "
"All lines must contain an 'audio_filepath' entry."
)
if cfg.align_using_pred_text:
if is_entry_in_any_lines(cfg.manifest_filepath, "pred_text"):
raise RuntimeError(
"Cannot specify cfg.align_using_pred_text=True when the manifest at cfg.manifest_filepath "
"contains 'pred_text' entries. This is because the audio will be transcribed and may produce "
"a different 'pred_text'. This may cause confusion."
)
else:
if not is_entry_in_all_lines(cfg.manifest_filepath, "text"):
raise RuntimeError(
"At least one line in cfg.manifest_filepath does not contain a 'text' entry. "
"NFA requires all lines to contain a 'text' entry when cfg.align_using_pred_text=False."
)
# init devices
if cfg.transcribe_device is None:
transcribe_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
transcribe_device = torch.device(cfg.transcribe_device)
logging.info(f"Device to be used for transcription step (`transcribe_device`) is {transcribe_device}")
if cfg.viterbi_device is None:
viterbi_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
viterbi_device = torch.device(cfg.viterbi_device)
logging.info(f"Device to be used for viterbi step (`viterbi_device`) is {viterbi_device}")
if transcribe_device.type == 'cuda' or viterbi_device.type == 'cuda':
logging.warning(
'One or both of transcribe_device and viterbi_device are GPUs. If you run into OOM errors '
'it may help to change both devices to be the CPU.'
)
# load model
model, _ = setup_model(cfg, transcribe_device)
model.eval()
if isinstance(model, EncDecHybridRNNTCTCModel):
model.change_decoding_strategy(decoder_type="ctc")
if cfg.use_local_attention:
logging.info(
"Flag use_local_attention is set to True => will try to use local attention for model if it allows it"
)
model.change_attention_model(self_attention_model="rel_pos_local_attn", att_context_size=[64, 64])
if not (isinstance(model, EncDecCTCModel) or isinstance(model, EncDecHybridRNNTCTCModel)):
raise NotImplementedError(
f"Model is not an instance of NeMo EncDecCTCModel or ENCDecHybridRNNTCTCModel."
" Currently only instances of these models are supported"
)
if cfg.ctm_file_config.minimum_timestamp_duration > 0:
logging.warning(
f"cfg.ctm_file_config.minimum_timestamp_duration has been set to {cfg.ctm_file_config.minimum_timestamp_duration} seconds. "
"This may cause the alignments for some tokens/words/additional segments to be overlapping."
)
buffered_chunk_params = {}
if cfg.use_buffered_chunked_streaming:
model_cfg = copy.deepcopy(model._cfg)
OmegaConf.set_struct(model_cfg.preprocessor, False)
# some changes for streaming scenario
model_cfg.preprocessor.dither = 0.0
model_cfg.preprocessor.pad_to = 0
if model_cfg.preprocessor.normalize != "per_feature":
logging.error(
"Only EncDecCTCModelBPE models trained with per_feature normalization are supported currently"
)
# Disable config overwriting
OmegaConf.set_struct(model_cfg.preprocessor, True)
feature_stride = model_cfg.preprocessor['window_stride']
model_stride_in_secs = feature_stride * cfg.model_downsample_factor
total_buffer = cfg.total_buffer_in_secs
chunk_len = float(cfg.chunk_len_in_secs)
tokens_per_chunk = math.ceil(chunk_len / model_stride_in_secs)
mid_delay = math.ceil((chunk_len + (total_buffer - chunk_len) / 2) / model_stride_in_secs)
logging.info(f"tokens_per_chunk is {tokens_per_chunk}, mid_delay is {mid_delay}")
model = FrameBatchASR(
asr_model=model,
frame_len=chunk_len,
total_buffer=cfg.total_buffer_in_secs,
batch_size=cfg.chunk_batch_size,
)
buffered_chunk_params = {
"delay": mid_delay,
"model_stride_in_secs": model_stride_in_secs,
"tokens_per_chunk": tokens_per_chunk,
}
# get start and end line IDs of batches
starts, ends = get_batch_starts_ends(cfg.manifest_filepath, cfg.batch_size)
# init output_timestep_duration = None and we will calculate and update it during the first batch
output_timestep_duration = None
# init f_manifest_out
os.makedirs(cfg.output_dir, exist_ok=True)
tgt_manifest_name = str(Path(cfg.manifest_filepath).stem) + "_with_output_file_paths.json"
tgt_manifest_filepath = str(Path(cfg.output_dir) / tgt_manifest_name)
f_manifest_out = open(tgt_manifest_filepath, 'w')
# get alignment and save in CTM batch-by-batch
for start, end in zip(starts, ends):
manifest_lines_batch = get_manifest_lines_batch(cfg.manifest_filepath, start, end)
(log_probs_batch, y_batch, T_batch, U_batch, utt_obj_batch, output_timestep_duration,) = get_batch_variables(
manifest_lines_batch,
model,
cfg.additional_segment_grouping_separator,
cfg.align_using_pred_text,
cfg.audio_filepath_parts_in_utt_id,
output_timestep_duration,
cfg.simulate_cache_aware_streaming,
cfg.use_buffered_chunked_streaming,
buffered_chunk_params,
)
alignments_batch = viterbi_decoding(log_probs_batch, y_batch, T_batch, U_batch, viterbi_device)
for utt_obj, alignment_utt in zip(utt_obj_batch, alignments_batch):
utt_obj = add_t_start_end_to_utt_obj(utt_obj, alignment_utt, output_timestep_duration)
if "ctm" in cfg.save_output_file_formats:
utt_obj = make_ctm_files(utt_obj, cfg.output_dir, cfg.ctm_file_config,)
if "ass" in cfg.save_output_file_formats:
utt_obj = make_ass_files(utt_obj, cfg.output_dir, cfg.ass_file_config)
write_manifest_out_line(
f_manifest_out, utt_obj,
)
f_manifest_out.close()
return None
if __name__ == "__main__":
main()
|
NeMo-main
|
tools/nemo_forced_aligner/align.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from utils.data_prep import restore_token_case
@pytest.mark.parametrize(
"word,word_tokens,expected_word_tokens_cased",
[
("HEY!", ['▁he', 'y', '!'], ['▁HE', 'Y', '!']),
("BabABa▁", ['▁b', 'a', 'b', 'a', 'b', 'a'], ['▁B', 'a', 'b', 'A', 'B', 'a']),
("BabAB▁a", ['▁b', 'a', 'b', 'a', 'b', '_a'], ['▁B', 'a', 'b', 'A', 'B', '_a']),
("Bab▁AB▁a", ['▁b', 'a', 'b', '▁a', 'b', '▁a'], ['▁B', 'a', 'b', '▁A', 'B', '▁a']),
("▁Bab▁AB▁a", ['▁b', 'a', 'b', '▁a', 'b', '▁a'], ['▁B', 'a', 'b', '▁A', 'B', '▁a']),
("▁Bab▁AB▁▁a", ['▁b', 'a', 'b', '▁a', 'b', '▁a'], ['▁B', 'a', 'b', '▁A', 'B', '▁a']),
("▁▁BabAB▁a", ['▁b', 'a', 'b', 'a', 'b', '▁a'], ['▁B', 'a', 'b', 'A', 'B', '▁a']),
("m²", ['▁', 'm', '2'], ['▁', 'm', '2']),
("²", ['▁', '2'], ['▁', '2']),
],
)
def test_restore_token_case(word, word_tokens, expected_word_tokens_cased):
word_tokens_cased = restore_token_case(word, word_tokens)
assert word_tokens_cased == expected_word_tokens_cased
|
NeMo-main
|
tools/nemo_forced_aligner/tests/test_restore_token_case.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import prettyprinter
import pytest
from prettyprinter import pretty_call, register_pretty
from utils.data_prep import Segment, Token, Utterance, Word, get_utt_obj
from nemo.collections.asr.models import ASRModel
def get_utt_obj_pp_string(utt_obj):
@register_pretty(Word)
def pretty_utterance(value, ctx):
return pretty_call(
ctx,
Word,
text=value.text,
s_start=value.s_start,
s_end=value.s_end,
t_start=value.t_start,
t_end=value.t_end,
tokens=value.tokens,
)
@register_pretty(Segment)
def pretty_utterance(value, ctx):
return pretty_call(
ctx,
Segment,
text=value.text,
s_start=value.s_start,
s_end=value.s_end,
t_start=value.t_start,
t_end=value.t_end,
words_and_tokens=value.words_and_tokens,
)
@register_pretty(Utterance)
def pretty_utterance(value, ctx):
return pretty_call(
ctx,
Utterance,
text=value.text,
token_ids_with_blanks=value.token_ids_with_blanks,
segments_and_tokens=value.segments_and_tokens,
audio_filepath=value.audio_filepath,
utt_id=value.utt_id,
)
return prettyprinter.pformat(utt_obj)
T_FOR_TEST = 999
AUDIO_FILEPATH_FOR_TEST = "arbitrary_string.wav"
UTT_ID_FOR_TEST = "arbitrary_string"
EN_TEXT = "hi world | hey"
EN_CN_EXPECTED_UTTERANCE = Utterance(
text='hi world | hey',
token_ids_with_blanks=[1024, 317, 1024, 472, 1024, 25, 1024, 20, 1024],
segments_and_tokens=[
Token(text='<b>', text_cased='<b>', s_start=0, s_end=0, t_start=None, t_end=None),
Segment(
text='hi world',
s_start=1,
s_end=3,
t_start=None,
t_end=None,
words_and_tokens=[
Word(
text='hi',
s_start=1,
s_end=1,
t_start=None,
t_end=None,
tokens=[Token(text='▁hi', text_cased='▁hi', s_start=1, s_end=1, t_start=None, t_end=None)],
),
Token(text='<b>', text_cased='<b>', s_start=2, s_end=2, t_start=None, t_end=None),
Word(
text='world',
s_start=3,
s_end=3,
t_start=None,
t_end=None,
tokens=[Token(text='▁world', text_cased='▁world', s_start=3, s_end=3, t_start=None, t_end=None)],
),
],
),
Token(text='<b>', text_cased='<b>', s_start=4, s_end=4, t_start=None, t_end=None),
Segment(
text='hey',
s_start=5,
s_end=7,
t_start=None,
t_end=None,
words_and_tokens=[
Word(
text='hey',
s_start=5,
s_end=7,
t_start=None,
t_end=None,
tokens=[
Token(text='▁he', text_cased='▁he', s_start=5, s_end=5, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=6, s_end=6, t_start=None, t_end=None),
Token(text='y', text_cased='y', s_start=7, s_end=7, t_start=None, t_end=None),
],
)
],
),
Token(text='<b>', text_cased='<b>', s_start=8, s_end=8, t_start=None, t_end=None),
],
audio_filepath=AUDIO_FILEPATH_FOR_TEST,
utt_id=UTT_ID_FOR_TEST,
)
EN_QN_EXPECTED_UTTERANCE = Utterance(
text='hi world | hey',
token_ids_with_blanks=[
28,
8,
28,
9,
28,
0,
28,
23,
28,
15,
28,
18,
28,
12,
28,
4,
28,
0,
28,
8,
28,
5,
28,
25,
28,
],
segments_and_tokens=[
Token(text='<b>', text_cased='<b>', s_start=0, s_end=0, t_start=None, t_end=None),
Segment(
text="hi world",
s_start=1,
s_end=15,
t_start=None,
t_end=None,
words_and_tokens=[
Word(
text="hi",
s_start=1,
s_end=3,
t_start=None,
t_end=None,
tokens=[
Token(text='h', text_cased='h', s_start=1, s_end=1, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=2, s_end=2, t_start=None, t_end=None),
Token(text='i', text_cased='i', s_start=3, s_end=3, t_start=None, t_end=None),
],
),
Token(text='<b>', text_cased='<b>', s_start=4, s_end=4, t_start=None, t_end=None),
Token(text='<space>', text_cased='<space>', s_start=5, s_end=5, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=6, s_end=6, t_start=None, t_end=None),
Word(
text="world",
s_start=7,
s_end=15,
t_start=None,
t_end=None,
tokens=[
Token(text='w', text_cased='w', s_start=7, s_end=7, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=8, s_end=8, t_start=None, t_end=None),
Token(text='o', text_cased='o', s_start=9, s_end=9, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=10, s_end=10, t_start=None, t_end=None),
Token(text='r', text_cased='r', s_start=11, s_end=11, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=12, s_end=12, t_start=None, t_end=None),
Token(text='l', text_cased='l', s_start=13, s_end=13, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=14, s_end=14, t_start=None, t_end=None),
Token(text='d', text_cased='d', s_start=15, s_end=15, t_start=None, t_end=None),
],
),
],
),
Token(text='<b>', text_cased='<b>', s_start=16, s_end=16, t_start=None, t_end=None),
Token(text='<space>', text_cased='<space>', s_start=17, s_end=17, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=18, s_end=18, t_start=None, t_end=None),
Segment(
text="hey",
s_start=19,
s_end=23,
t_start=None,
t_end=None,
words_and_tokens=[
Word(
text="hey",
s_start=19,
s_end=23,
t_start=None,
t_end=None,
tokens=[
Token(text='h', text_cased='h', s_start=19, s_end=19, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=20, s_end=20, t_start=None, t_end=None),
Token(text='e', text_cased='e', s_start=21, s_end=21, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=22, s_end=22, t_start=None, t_end=None),
Token(text='y', text_cased='y', s_start=23, s_end=23, t_start=None, t_end=None),
],
)
],
),
Token(text='<b>', text_cased='<b>', s_start=24, s_end=24, t_start=None, t_end=None),
],
audio_filepath=AUDIO_FILEPATH_FOR_TEST,
utt_id=UTT_ID_FOR_TEST,
)
ZH_TEXT = "人工 智能|技术"
ZH_CN_EXPECTED_UTTERANCE = Utterance(
text='人工 智能|技术',
token_ids_with_blanks=[
5206,
125,
5206,
1329,
5206,
0,
5206,
2029,
5206,
3668,
5206,
0,
5206,
1695,
5206,
2075,
5206,
],
segments_and_tokens=[
Token(text='<b>', text_cased='<b>', s_start=0, s_end=0, t_start=None, t_end=None),
Segment(
text='人工 智能',
s_start=1,
s_end=9,
t_start=None,
t_end=None,
words_and_tokens=[
Word(
text='人工',
s_start=1,
s_end=3,
t_start=None,
t_end=None,
tokens=[
Token(text='人', text_cased='人', s_start=1, s_end=1, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=2, s_end=2, t_start=None, t_end=None),
Token(text='工', text_cased='工', s_start=3, s_end=3, t_start=None, t_end=None),
],
),
Token(text='<b>', text_cased='<b>', s_start=4, s_end=4, t_start=None, t_end=None),
Token(text='<space>', text_cased='<space>', s_start=5, s_end=5, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=6, s_end=6, t_start=None, t_end=None),
Word(
text='智能',
s_start=7,
s_end=9,
t_start=None,
t_end=None,
tokens=[
Token(text='智', text_cased='智', s_start=7, s_end=7, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=8, s_end=8, t_start=None, t_end=None),
Token(text='能', text_cased='能', s_start=9, s_end=9, t_start=None, t_end=None),
],
),
],
),
Token(text='<b>', text_cased='<b>', s_start=10, s_end=10, t_start=None, t_end=None),
Token(text='<space>', text_cased='<space>', s_start=11, s_end=11, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=12, s_end=12, t_start=None, t_end=None),
Segment(
text='技术',
s_start=13,
s_end=15,
t_start=None,
t_end=None,
words_and_tokens=[
Word(
text='技术',
s_start=13,
s_end=15,
t_start=None,
t_end=None,
tokens=[
Token(text='技', text_cased='技', s_start=13, s_end=13, t_start=None, t_end=None),
Token(text='<b>', text_cased='<b>', s_start=14, s_end=14, t_start=None, t_end=None),
Token(text='术', text_cased='术', s_start=15, s_end=15, t_start=None, t_end=None),
],
)
],
),
Token(text='<b>', text_cased='<b>', s_start=16, s_end=16, t_start=None, t_end=None),
],
audio_filepath=AUDIO_FILEPATH_FOR_TEST,
utt_id=UTT_ID_FOR_TEST,
)
@pytest.mark.parametrize(
"text,model_pretrained_name,separator,expected_utterance",
[
(EN_TEXT, "stt_en_citrinet_256_gamma_0_25", "|", EN_CN_EXPECTED_UTTERANCE),
(EN_TEXT, "stt_en_quartznet15x5", "|", EN_QN_EXPECTED_UTTERANCE),
(ZH_TEXT, "stt_zh_citrinet_512", "|", ZH_CN_EXPECTED_UTTERANCE),
],
)
def test_token_info(text, model_pretrained_name, separator, expected_utterance):
model = ASRModel.from_pretrained(model_pretrained_name)
utt_obj = get_utt_obj(
text, model, separator, T=T_FOR_TEST, audio_filepath=AUDIO_FILEPATH_FOR_TEST, utt_id=UTT_ID_FOR_TEST
)
print(f"expected utterance object: {get_utt_obj_pp_string(expected_utterance)}\n")
print(f"output utterance object in test: {get_utt_obj_pp_string(utt_obj)}\n")
assert utt_obj == expected_utterance
|
NeMo-main
|
tools/nemo_forced_aligner/tests/test_get_utt_obj.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pytest
from utils.data_prep import Segment, Token, Utterance, Word, add_t_start_end_to_utt_obj
OUTPUT_TIMESTEP_DURATION = 0.04
ALIGNMENT = [
1,
1,
3,
3,
4,
5,
7,
7,
9,
10,
11,
12,
13,
15,
17,
17,
19,
21,
23,
23,
]
EXPECTED_OUTPUT_UTTERANCE = Utterance(
text='hi world | hey',
token_ids_with_blanks=[
28,
8,
28,
9,
28,
0,
28,
23,
28,
15,
28,
18,
28,
12,
28,
4,
28,
0,
28,
8,
28,
5,
28,
25,
28,
],
segments_and_tokens=[
Token(text='<b>', text_cased='<b>', s_start=0, s_end=0, t_start=-1, t_end=-1),
Segment(
text="hi world",
s_start=1,
s_end=15,
t_start=0 * OUTPUT_TIMESTEP_DURATION,
t_end=14 * OUTPUT_TIMESTEP_DURATION,
words_and_tokens=[
Word(
text="hi",
s_start=1,
s_end=3,
t_start=0 * OUTPUT_TIMESTEP_DURATION,
t_end=4 * OUTPUT_TIMESTEP_DURATION,
tokens=[
Token(
text='h',
text_cased='h',
s_start=1,
s_end=1,
t_start=0 * OUTPUT_TIMESTEP_DURATION,
t_end=2 * OUTPUT_TIMESTEP_DURATION,
),
Token(text='<b>', text_cased='<b>', s_start=2, s_end=2, t_start=-1, t_end=-1),
Token(
text='i',
text_cased='i',
s_start=3,
s_end=3,
t_start=2 * OUTPUT_TIMESTEP_DURATION,
t_end=4 * OUTPUT_TIMESTEP_DURATION,
),
],
),
Token(
text='<b>',
text_cased='<b>',
s_start=4,
s_end=4,
t_start=4 * OUTPUT_TIMESTEP_DURATION,
t_end=5 * OUTPUT_TIMESTEP_DURATION,
),
Token(
text='<space>',
text_cased='<space>',
s_start=5,
s_end=5,
t_start=5 * OUTPUT_TIMESTEP_DURATION,
t_end=6 * OUTPUT_TIMESTEP_DURATION,
),
Token(text='<b>', text_cased='<b>', s_start=6, s_end=6, t_start=-1, t_end=-1),
Word(
text="world",
s_start=7,
s_end=15,
t_start=6 * OUTPUT_TIMESTEP_DURATION,
t_end=14 * OUTPUT_TIMESTEP_DURATION,
tokens=[
Token(
text='w',
text_cased='w',
s_start=7,
s_end=7,
t_start=6 * OUTPUT_TIMESTEP_DURATION,
t_end=8 * OUTPUT_TIMESTEP_DURATION,
),
Token(text='<b>', text_cased='<b>', s_start=8, s_end=8, t_start=-1, t_end=-1),
Token(
text='o',
text_cased='o',
s_start=9,
s_end=9,
t_start=8 * OUTPUT_TIMESTEP_DURATION,
t_end=9 * OUTPUT_TIMESTEP_DURATION,
),
Token(
text='<b>',
text_cased='<b>',
s_start=10,
s_end=10,
t_start=9 * OUTPUT_TIMESTEP_DURATION,
t_end=10 * OUTPUT_TIMESTEP_DURATION,
),
Token(
text='r',
text_cased='r',
s_start=11,
s_end=11,
t_start=10 * OUTPUT_TIMESTEP_DURATION,
t_end=11 * OUTPUT_TIMESTEP_DURATION,
),
Token(
text='<b>',
text_cased='<b>',
s_start=12,
s_end=12,
t_start=11 * OUTPUT_TIMESTEP_DURATION,
t_end=12 * OUTPUT_TIMESTEP_DURATION,
),
Token(
text='l',
text_cased='l',
s_start=13,
s_end=13,
t_start=12 * OUTPUT_TIMESTEP_DURATION,
t_end=13 * OUTPUT_TIMESTEP_DURATION,
),
Token(text='<b>', text_cased='<b>', s_start=14, s_end=14, t_start=-1, t_end=-1),
Token(
text='d',
text_cased='d',
s_start=15,
s_end=15,
t_start=13 * OUTPUT_TIMESTEP_DURATION,
t_end=14 * OUTPUT_TIMESTEP_DURATION,
),
],
),
],
),
Token(text='<b>', text_cased='<b>', s_start=16, s_end=16, t_start=-1, t_end=-1),
Token(
text='<space>',
text_cased='<space>',
s_start=17,
s_end=17,
t_start=14 * OUTPUT_TIMESTEP_DURATION,
t_end=16 * OUTPUT_TIMESTEP_DURATION,
),
Token(text='<b>', text_cased='<b>', s_start=18, s_end=18, t_start=-1, t_end=-1),
Segment(
text="hey",
s_start=19,
s_end=23,
t_start=16 * OUTPUT_TIMESTEP_DURATION,
t_end=20 * OUTPUT_TIMESTEP_DURATION,
words_and_tokens=[
Word(
text="hey",
s_start=19,
s_end=23,
t_start=16 * OUTPUT_TIMESTEP_DURATION,
t_end=20 * OUTPUT_TIMESTEP_DURATION,
tokens=[
Token(
text='h',
text_cased='h',
s_start=19,
s_end=19,
t_start=16 * OUTPUT_TIMESTEP_DURATION,
t_end=17 * OUTPUT_TIMESTEP_DURATION,
),
Token(text='<b>', text_cased='<b>', s_start=20, s_end=20, t_start=-1, t_end=-1),
Token(
text='e',
text_cased='e',
s_start=21,
s_end=21,
t_start=17 * OUTPUT_TIMESTEP_DURATION,
t_end=18 * OUTPUT_TIMESTEP_DURATION,
),
Token(text='<b>', text_cased='<b>', s_start=22, s_end=22, t_start=-1, t_end=-1),
Token(
text='y',
text_cased='y',
s_start=23,
s_end=23,
t_start=18 * OUTPUT_TIMESTEP_DURATION,
t_end=20 * OUTPUT_TIMESTEP_DURATION,
),
],
)
],
),
Token(text='<b>', text_cased='<b>', s_start=24, s_end=24, t_start=-1, t_end=-1),
],
)
@pytest.mark.parametrize(
"alignment,expected_output_utterance, output_timestep_duration",
[(ALIGNMENT, EXPECTED_OUTPUT_UTTERANCE, OUTPUT_TIMESTEP_DURATION),],
)
def test_add_t_start_end_to_utt_obj(alignment, expected_output_utterance, output_timestep_duration):
input_utterance = copy.deepcopy(expected_output_utterance)
# set all t_start and t_end to None in input_utterance
for segment_or_token in input_utterance.segments_and_tokens:
if type(segment_or_token) is Segment:
segment = segment_or_token
segment.t_start = None
segment.t_end = None
for word_or_token in segment.words_and_tokens:
if type(word_or_token) is Word:
word = word_or_token
word.t_start = None
word.t_end = None
for token in word.tokens:
token.t_start = None
token.t_end = None
else:
token = word_or_token
token.t_start = None
token.t_end = None
else:
token = segment_or_token
token.t_start = None
token.t_end = None
output_utterance = add_t_start_end_to_utt_obj(input_utterance, alignment, output_timestep_duration)
assert output_utterance == expected_output_utterance
|
NeMo-main
|
tools/nemo_forced_aligner/tests/test_add_t_start_end_to_utt_obj.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
def write_manifest_out_line(
f_manifest_out, utt_obj,
):
data = {"audio_filepath": utt_obj.audio_filepath}
if not utt_obj.text is None:
data["text"] = utt_obj.text
if not utt_obj.pred_text is None:
data["pred_text"] = utt_obj.pred_text
for key, val in utt_obj.saved_output_files.items():
data[key] = val
new_line = json.dumps(data)
f_manifest_out.write(f"{new_line}\n")
return None
|
NeMo-main
|
tools/nemo_forced_aligner/utils/make_output_manifest.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BLANK_TOKEN = "<b>"
SPACE_TOKEN = "<space>"
V_NEGATIVE_NUM = -3.4e38 # this is just above the most negative number in torch.float32
|
NeMo-main
|
tools/nemo_forced_aligner/utils/constants.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from utils.constants import V_NEGATIVE_NUM
def viterbi_decoding(log_probs_batch, y_batch, T_batch, U_batch, viterbi_device):
"""
Do Viterbi decoding with an efficient algorithm (the only for-loop in the 'forward pass' is over the time dimension).
Args:
log_probs_batch: tensor of shape (B, T_max, V). The parts of log_probs_batch which are 'padding' are filled
with 'V_NEGATIVE_NUM' - a large negative number which represents a very low probability.
y_batch: tensor of shape (B, U_max) - contains token IDs including blanks in every other position. The parts of
y_batch which are padding are filled with the number 'V'. V = the number of tokens in the vocabulary + 1 for
the blank token.
T_batch: tensor of shape (B, 1) - contains the durations of the log_probs_batch (so we can ignore the
parts of log_probs_batch which are padding)
U_batch: tensor of shape (B, 1) - contains the lengths of y_batch (so we can ignore the parts of y_batch
which are padding).
viterbi_device: the torch device on which Viterbi decoding will be done.
Returns:
alignments_batch: list of lists containing locations for the tokens we align to at each timestep.
Looks like: [[0, 0, 1, 2, 2, 3, 3, ..., ], ..., [0, 1, 2, 2, 2, 3, 4, ....]].
Each list inside alignments_batch is of length T_batch[location of utt in batch].
"""
B, T_max, _ = log_probs_batch.shape
U_max = y_batch.shape[1]
# transfer all tensors to viterbi_device
log_probs_batch = log_probs_batch.to(viterbi_device)
y_batch = y_batch.to(viterbi_device)
T_batch = T_batch.to(viterbi_device)
U_batch = U_batch.to(viterbi_device)
# make tensor that we will put at timesteps beyond the duration of the audio
padding_for_log_probs = V_NEGATIVE_NUM * torch.ones((B, T_max, 1), device=viterbi_device)
# make log_probs_padded tensor of shape (B, T_max, V +1 ) where all of
# log_probs_padded[:,:,-1] is the 'V_NEGATIVE_NUM'
log_probs_padded = torch.cat((log_probs_batch, padding_for_log_probs), dim=2)
# initialize v_prev - tensor of previous timestep's viterbi probabilies, of shape (B, U_max)
v_prev = V_NEGATIVE_NUM * torch.ones((B, U_max), device=viterbi_device)
v_prev[:, :2] = torch.gather(input=log_probs_padded[:, 0, :], dim=1, index=y_batch[:, :2])
# initialize backpointers_rel - which contains values like 0 to indicate the backpointer is to the same u index,
# 1 to indicate the backpointer pointing to the u-1 index and 2 to indicate the backpointer is pointing to the u-2 index
backpointers_rel = -99 * torch.ones((B, T_max, U_max), dtype=torch.int8, device=viterbi_device)
# Make a letter_repetition_mask the same shape as y_batch
# the letter_repetition_mask will have 'True' where the token (including blanks) is the same
# as the token two places before it in the ground truth (and 'False everywhere else).
# We will use letter_repetition_mask to determine whether the Viterbi algorithm needs to look two tokens back or
# three tokens back
y_shifted_left = torch.roll(y_batch, shifts=2, dims=1)
letter_repetition_mask = y_batch - y_shifted_left
letter_repetition_mask[:, :2] = 1 # make sure dont apply mask to first 2 tokens
letter_repetition_mask = letter_repetition_mask == 0
for t in range(1, T_max):
# e_current is a tensor of shape (B, U_max) of the log probs of every possible token at the current timestep
e_current = torch.gather(input=log_probs_padded[:, t, :], dim=1, index=y_batch)
# apply a mask to e_current to cope with the fact that we do not keep the whole v_matrix and continue
# calculating viterbi probabilities during some 'padding' timesteps
t_exceeded_T_batch = t >= T_batch
U_can_be_final = torch.logical_or(
torch.arange(0, U_max, device=viterbi_device).unsqueeze(0) == (U_batch.unsqueeze(1) - 0),
torch.arange(0, U_max, device=viterbi_device).unsqueeze(0) == (U_batch.unsqueeze(1) - 1),
)
mask = torch.logical_not(torch.logical_and(t_exceeded_T_batch.unsqueeze(1), U_can_be_final,)).long()
e_current = e_current * mask
# v_prev_shifted is a tensor of shape (B, U_max) of the viterbi probabilities 1 timestep back and 1 token position back
v_prev_shifted = torch.roll(v_prev, shifts=1, dims=1)
# by doing a roll shift of size 1, we have brought the viterbi probability in the final token position to the
# first token position - let's overcome this by 'zeroing out' the probabilities in the firest token position
v_prev_shifted[:, 0] = V_NEGATIVE_NUM
# v_prev_shifted2 is a tensor of shape (B, U_max) of the viterbi probabilities 1 timestep back and 2 token position back
v_prev_shifted2 = torch.roll(v_prev, shifts=2, dims=1)
v_prev_shifted2[:, :2] = V_NEGATIVE_NUM # zero out as we did for v_prev_shifted
# use our letter_repetition_mask to remove the connections between 2 blanks (so we don't skip over a letter)
# and to remove the connections between 2 consective letters (so we don't skip over a blank)
v_prev_shifted2.masked_fill_(letter_repetition_mask, V_NEGATIVE_NUM)
# we need this v_prev_dup tensor so we can calculated the viterbi probability of every possible
# token position simultaneously
v_prev_dup = torch.cat(
(v_prev.unsqueeze(2), v_prev_shifted.unsqueeze(2), v_prev_shifted2.unsqueeze(2),), dim=2,
)
# candidates_v_current are our candidate viterbi probabilities for every token position, from which
# we will pick the max and record the argmax
candidates_v_current = v_prev_dup + e_current.unsqueeze(2)
# we straight away save results in v_prev instead of v_current, so that the variable v_prev will be ready for the
# next iteration of the for-loop
v_prev, bp_relative = torch.max(candidates_v_current, dim=2)
backpointers_rel[:, t, :] = bp_relative
# trace backpointers
alignments_batch = []
for b in range(B):
T_b = int(T_batch[b])
U_b = int(U_batch[b])
if U_b == 1: # i.e. we put only a blank token in the reference text because the reference text is empty
current_u = 0 # set initial u to 0 and let the rest of the code block run as usual
else:
current_u = int(torch.argmax(v_prev[b, U_b - 2 : U_b])) + U_b - 2
alignment_b = [current_u]
for t in range(T_max - 1, 0, -1):
current_u = current_u - int(backpointers_rel[b, t, current_u])
alignment_b.insert(0, current_u)
alignment_b = alignment_b[:T_b]
alignments_batch.append(alignment_b)
return alignments_batch
|
NeMo-main
|
tools/nemo_forced_aligner/utils/viterbi_decoding.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Union
import soundfile as sf
import torch
from tqdm.auto import tqdm
from utils.constants import BLANK_TOKEN, SPACE_TOKEN, V_NEGATIVE_NUM
from nemo.utils import logging
def _get_utt_id(audio_filepath, audio_filepath_parts_in_utt_id):
fp_parts = Path(audio_filepath).parts[-audio_filepath_parts_in_utt_id:]
utt_id = Path("_".join(fp_parts)).stem
utt_id = utt_id.replace(" ", "-") # replace any spaces in the filepath with dashes
return utt_id
def get_batch_starts_ends(manifest_filepath, batch_size):
"""
Get the start and end ids of the lines we will use for each 'batch'.
"""
with open(manifest_filepath, 'r') as f:
num_lines_in_manifest = sum(1 for _ in f)
starts = [x for x in range(0, num_lines_in_manifest, batch_size)]
ends = [x - 1 for x in starts]
ends.pop(0)
ends.append(num_lines_in_manifest)
return starts, ends
def is_entry_in_any_lines(manifest_filepath, entry):
"""
Returns True if entry is a key in any of the JSON lines in manifest_filepath
"""
entry_in_manifest = False
with open(manifest_filepath, 'r') as f:
for line in f:
data = json.loads(line)
if entry in data:
entry_in_manifest = True
return entry_in_manifest
def is_entry_in_all_lines(manifest_filepath, entry):
"""
Returns True is entry is a key in all of the JSON lines in manifest_filepath.
"""
with open(manifest_filepath, 'r') as f:
for line in f:
data = json.loads(line)
if entry not in data:
return False
return True
def get_manifest_lines_batch(manifest_filepath, start, end):
manifest_lines_batch = []
with open(manifest_filepath, "r", encoding="utf-8-sig") as f:
for line_i, line in enumerate(f):
if line_i >= start and line_i <= end:
data = json.loads(line)
if "text" in data:
# remove any BOM, any duplicated spaces, convert any
# newline chars to spaces
data["text"] = data["text"].replace("\ufeff", "")
data["text"] = " ".join(data["text"].split())
# Replace any horizontal ellipses with 3 separate periods.
# The tokenizer will do this anyway. But making this replacement
# now helps avoid errors when restoring punctuation when saving
# the output files
data["text"] = data["text"].replace("\u2026", "...")
manifest_lines_batch.append(data)
if line_i == end:
break
return manifest_lines_batch
def get_char_tokens(text, model):
tokens = []
for character in text:
if character in model.decoder.vocabulary:
tokens.append(model.decoder.vocabulary.index(character))
else:
tokens.append(len(model.decoder.vocabulary)) # return unk token (same as blank token)
return tokens
def is_sub_or_superscript_pair(ref_text, text):
"""returns True if ref_text is a subscript or superscript version of text"""
sub_or_superscript_to_num = {
"⁰": "0",
"¹": "1",
"²": "2",
"³": "3",
"⁴": "4",
"⁵": "5",
"⁶": "6",
"⁷": "7",
"⁸": "8",
"⁹": "9",
"₀": "0",
"₁": "1",
"₂": "2",
"₃": "3",
"₄": "4",
"₅": "5",
"₆": "6",
"₇": "7",
"₈": "8",
"₉": "9",
}
if text in sub_or_superscript_to_num:
if sub_or_superscript_to_num[text] == ref_text:
return True
return False
def restore_token_case(word, word_tokens):
# remove repeated "▁" and "_" from word as that is what the tokenizer will do
while "▁▁" in word:
word = word.replace("▁▁", "▁")
while "__" in word:
word = word.repalce("__", "_")
word_tokens_cased = []
word_char_pointer = 0
for token in word_tokens:
token_cased = ""
for token_char in token:
if token_char == word[word_char_pointer]:
token_cased += token_char
word_char_pointer += 1
else:
if token_char.upper() == word[word_char_pointer] or is_sub_or_superscript_pair(
token_char, word[word_char_pointer]
):
token_cased += token_char.upper()
word_char_pointer += 1
else:
if token_char == "▁" or token_char == "_":
if word[word_char_pointer] == "▁" or word[word_char_pointer] == "_":
token_cased += token_char
word_char_pointer += 1
elif word_char_pointer == 0:
token_cased += token_char
else:
raise RuntimeError(
f"Unexpected error - failed to recover capitalization of tokens for word {word}"
)
word_tokens_cased.append(token_cased)
return word_tokens_cased
@dataclass
class Token:
text: str = None
text_cased: str = None
s_start: int = None
s_end: int = None
t_start: float = None
t_end: float = None
@dataclass
class Word:
text: str = None
s_start: int = None
s_end: int = None
t_start: float = None
t_end: float = None
tokens: List[Token] = field(default_factory=list)
@dataclass
class Segment:
text: str = None
s_start: int = None
s_end: int = None
t_start: float = None
t_end: float = None
words_and_tokens: List[Union[Word, Token]] = field(default_factory=list)
@dataclass
class Utterance:
token_ids_with_blanks: List[int] = field(default_factory=list)
segments_and_tokens: List[Union[Segment, Token]] = field(default_factory=list)
text: str = None
pred_text: str = None
audio_filepath: str = None
utt_id: str = None
saved_output_files: dict = field(default_factory=dict)
def get_utt_obj(
text, model, separator, T, audio_filepath, utt_id,
):
"""
Function to create an Utterance object and add all necessary information to it except
for timings of the segments / words / tokens according to the alignment - that will
be done later in a different function, after the alignment is done.
The Utterance object has a list segments_and_tokens which contains Segment objects and
Token objects (for blank tokens in between segments).
Within the Segment objects, there is a list words_and_tokens which contains Word objects and
Token objects (for blank tokens in between words).
Within the Word objects, there is a list tokens tokens which contains Token objects for
blank and non-blank tokens.
We will be building up these lists in this function. This data structure will then be useful for
generating the various output files that we wish to save.
"""
if not separator: # if separator is not defined - treat the whole text as one segment
segments = [text]
else:
segments = text.split(separator)
# remove any spaces at start and end of segments
segments = [seg.strip() for seg in segments]
# remove any empty segments
segments = [seg for seg in segments if len(seg) > 0]
utt = Utterance(text=text, audio_filepath=audio_filepath, utt_id=utt_id,)
# build up lists: token_ids_with_blanks, segments_and_tokens.
# The code for these is different depending on whether we use char-based tokens or not
if hasattr(model, 'tokenizer'):
if hasattr(model, 'blank_id'):
BLANK_ID = model.blank_id
else:
BLANK_ID = len(model.tokenizer.vocab) # TODO: check
utt.token_ids_with_blanks = [BLANK_ID]
# check for text being 0 length
if len(text) == 0:
return utt
# check for # tokens + token repetitions being > T
all_tokens = model.tokenizer.text_to_ids(text)
n_token_repetitions = 0
for i_tok in range(1, len(all_tokens)):
if all_tokens[i_tok] == all_tokens[i_tok - 1]:
n_token_repetitions += 1
if len(all_tokens) + n_token_repetitions > T:
logging.info(
f"Utterance {utt_id} has too many tokens compared to the audio file duration."
" Will not generate output alignment files for this utterance."
)
return utt
# build up data structures containing segments/words/tokens
utt.segments_and_tokens.append(Token(text=BLANK_TOKEN, text_cased=BLANK_TOKEN, s_start=0, s_end=0,))
segment_s_pointer = 1 # first segment will start at s=1 because s=0 is a blank
word_s_pointer = 1 # first word will start at s=1 because s=0 is a blank
for segment in segments:
# add the segment to segment_info and increment the segment_s_pointer
segment_tokens = model.tokenizer.text_to_tokens(segment)
utt.segments_and_tokens.append(
Segment(
text=segment,
s_start=segment_s_pointer,
# segment_tokens do not contain blanks => need to muliply by 2
# s_end needs to be the index of the final token (including blanks) of the current segment:
# segment_s_pointer + len(segment_tokens) * 2 is the index of the first token of the next segment =>
# => need to subtract 2
s_end=segment_s_pointer + len(segment_tokens) * 2 - 2,
)
)
segment_s_pointer += (
len(segment_tokens) * 2
) # multiply by 2 to account for blanks (which are not present in segment_tokens)
words = segment.split(" ") # we define words to be space-separated sub-strings
for word_i, word in enumerate(words):
word_tokens = model.tokenizer.text_to_tokens(word)
word_token_ids = model.tokenizer.text_to_ids(word)
word_tokens_cased = restore_token_case(word, word_tokens)
# add the word to word_info and increment the word_s_pointer
utt.segments_and_tokens[-1].words_and_tokens.append(
# word_tokens do not contain blanks => need to muliply by 2
# s_end needs to be the index of the final token (including blanks) of the current word:
# word_s_pointer + len(word_tokens) * 2 is the index of the first token of the next word =>
# => need to subtract 2
Word(text=word, s_start=word_s_pointer, s_end=word_s_pointer + len(word_tokens) * 2 - 2)
)
word_s_pointer += (
len(word_tokens) * 2
) # multiply by 2 to account for blanks (which are not present in word_tokens)
for token_i, (token, token_id, token_cased) in enumerate(
zip(word_tokens, word_token_ids, word_tokens_cased)
):
# add the text tokens and the blanks in between them
# to our token-based variables
utt.token_ids_with_blanks.extend([token_id, BLANK_ID])
# adding Token object for non-blank token
utt.segments_and_tokens[-1].words_and_tokens[-1].tokens.append(
Token(
text=token,
text_cased=token_cased,
# utt.token_ids_with_blanks has the form [...., <this non-blank token>, <blank>] =>
# => if do len(utt.token_ids_with_blanks) - 1 you get the index of the final <blank>
# => we want to do len(utt.token_ids_with_blanks) - 2 to get the index of <this non-blank token>
s_start=len(utt.token_ids_with_blanks) - 2,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 2,
)
)
# adding Token object for blank tokens in between the tokens of the word
# (ie do not add another blank if you have reached the end)
if token_i < len(word_tokens) - 1:
utt.segments_and_tokens[-1].words_and_tokens[-1].tokens.append(
Token(
text=BLANK_TOKEN,
text_cased=BLANK_TOKEN,
# utt.token_ids_with_blanks has the form [...., <this blank token>] =>
# => if do len(utt.token_ids_with_blanks) -1 you get the index of this <blank>
s_start=len(utt.token_ids_with_blanks) - 1,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 1,
)
)
# add a Token object for blanks in between words in this segment
# (but only *in between* - do not add the token if it is after the final word)
if word_i < len(words) - 1:
utt.segments_and_tokens[-1].words_and_tokens.append(
Token(
text=BLANK_TOKEN,
text_cased=BLANK_TOKEN,
# utt.token_ids_with_blanks has the form [...., <this blank token>] =>
# => if do len(utt.token_ids_with_blanks) -1 you get the index of this <blank>
s_start=len(utt.token_ids_with_blanks) - 1,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 1,
)
)
# add the blank token in between segments/after the final segment
utt.segments_and_tokens.append(
Token(
text=BLANK_TOKEN,
text_cased=BLANK_TOKEN,
# utt.token_ids_with_blanks has the form [...., <this blank token>] =>
# => if do len(utt.token_ids_with_blanks) -1 you get the index of this <blank>
s_start=len(utt.token_ids_with_blanks) - 1,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 1,
)
)
return utt
elif hasattr(model.decoder, "vocabulary"): # i.e. tokenization is simply character-based
BLANK_ID = len(model.decoder.vocabulary) # TODO: check this is correct
SPACE_ID = model.decoder.vocabulary.index(" ")
utt.token_ids_with_blanks = [BLANK_ID]
# check for text being 0 length
if len(text) == 0:
return utt
# check for # tokens + token repetitions being > T
all_tokens = get_char_tokens(text, model)
n_token_repetitions = 0
for i_tok in range(1, len(all_tokens)):
if all_tokens[i_tok] == all_tokens[i_tok - 1]:
n_token_repetitions += 1
if len(all_tokens) + n_token_repetitions > T:
logging.info(
f"Utterance {utt_id} has too many tokens compared to the audio file duration."
" Will not generate output alignment files for this utterance."
)
return utt
# build up data structures containing segments/words/tokens
utt.segments_and_tokens.append(Token(text=BLANK_TOKEN, text_cased=BLANK_TOKEN, s_start=0, s_end=0,))
segment_s_pointer = 1 # first segment will start at s=1 because s=0 is a blank
word_s_pointer = 1 # first word will start at s=1 because s=0 is a blank
for i_segment, segment in enumerate(segments):
# add the segment to segment_info and increment the segment_s_pointer
segment_tokens = get_char_tokens(segment, model)
utt.segments_and_tokens.append(
Segment(
text=segment,
s_start=segment_s_pointer,
# segment_tokens do not contain blanks => need to muliply by 2
# s_end needs to be the index of the final token (including blanks) of the current segment:
# segment_s_pointer + len(segment_tokens) * 2 is the index of the first token of the next segment =>
# => need to subtract 2
s_end=segment_s_pointer + len(segment_tokens) * 2 - 2,
)
)
# for correct calculation: multiply len(segment_tokens) by 2 to account for blanks (which are not present in segment_tokens)
# and + 2 to account for [<token for space in between segments>, <blank token after that space token>]
segment_s_pointer += len(segment_tokens) * 2 + 2
words = segment.split(" ") # we define words to be space-separated substrings
for i_word, word in enumerate(words):
# convert string to list of characters
word_tokens = list(word)
# convert list of characters to list of their ids in the vocabulary
word_token_ids = get_char_tokens(word, model)
# add the word to word_info and increment the word_s_pointer
utt.segments_and_tokens[-1].words_and_tokens.append(
# note for s_end:
# word_tokens do not contain blanks => need to muliply by 2
# s_end needs to be the index of the final token (including blanks) of the current word:
# word_s_pointer + len(word_tokens) * 2 is the index of the first token of the next word =>
# => need to subtract 2
Word(text=word, s_start=word_s_pointer, s_end=word_s_pointer + len(word_tokens) * 2 - 2)
)
# for correct calculation: multiply len(word_tokens) by 2 to account for blanks (which are not present in word_tokens)
# and + 2 to account for [<token for space in between words>, <blank token after that space token>]
word_s_pointer += len(word_tokens) * 2 + 2
for token_i, (token, token_id) in enumerate(zip(word_tokens, word_token_ids)):
# add the text tokens and the blanks in between them
# to our token-based variables
utt.token_ids_with_blanks.extend([token_id])
utt.segments_and_tokens[-1].words_and_tokens[-1].tokens.append(
Token(
text=token,
text_cased=token,
# utt.token_ids_with_blanks has the form [..., <this non-blank token>]
# => do len(utt.token_ids_with_blanks) - 1 to get the index of this non-blank token
s_start=len(utt.token_ids_with_blanks) - 1,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 1,
)
)
if token_i < len(word_tokens) - 1: # only add blank tokens that are in the middle of words
utt.token_ids_with_blanks.extend([BLANK_ID])
utt.segments_and_tokens[-1].words_and_tokens[-1].tokens.append(
Token(
text=BLANK_TOKEN,
text_cased=BLANK_TOKEN,
# utt.token_ids_with_blanks has the form [..., <this blank token>]
# => do len(utt.token_ids_with_blanks) - 1 to get the index of this blank token
s_start=len(utt.token_ids_with_blanks) - 1,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 1,
)
)
# add space token (and the blanks around it) unless this is the final word in a segment
if i_word < len(words) - 1:
utt.token_ids_with_blanks.extend([BLANK_ID, SPACE_ID, BLANK_ID])
utt.segments_and_tokens[-1].words_and_tokens.append(
Token(
text=BLANK_TOKEN,
text_cased=BLANK_TOKEN,
# utt.token_ids_with_blanks has the form
# [..., <final token of previous word>, <blank token>, <space token>, <blank token>]
# => do len(utt.token_ids_with_blanks) - 3 to get the index of the blank token before the space token
s_start=len(utt.token_ids_with_blanks) - 3,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 3,
)
)
utt.segments_and_tokens[-1].words_and_tokens.append(
Token(
text=SPACE_TOKEN,
text_cased=SPACE_TOKEN,
# utt.token_ids_with_blanks has the form
# [..., <final token of previous word>, <blank token>, <space token>, <blank token>]
# => do len(utt.token_ids_with_blanks) - 2 to get the index of the space token
s_start=len(utt.token_ids_with_blanks) - 2,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 2,
)
)
utt.segments_and_tokens[-1].words_and_tokens.append(
Token(
text=BLANK_TOKEN,
text_cased=BLANK_TOKEN,
# utt.token_ids_with_blanks has the form
# [..., <final token of previous word>, <blank token>, <space token>, <blank token>]
# => do len(utt.token_ids_with_blanks) - 1 to get the index of the blank token after the space token
s_start=len(utt.token_ids_with_blanks) - 1,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 1,
)
)
# add a blank to the segment, and add a space after if this is not the final segment
utt.token_ids_with_blanks.extend([BLANK_ID])
utt.segments_and_tokens.append(
Token(
text=BLANK_TOKEN,
text_cased=BLANK_TOKEN,
# utt.token_ids_with_blanks has the form [..., <this blank token>]
# => do len(utt.token_ids_with_blanks) - 1 to get the index of this blank token
s_start=len(utt.token_ids_with_blanks) - 1,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 1,
)
)
if i_segment < len(segments) - 1:
utt.token_ids_with_blanks.extend([SPACE_ID, BLANK_ID])
utt.segments_and_tokens.append(
Token(
text=SPACE_TOKEN,
text_cased=SPACE_TOKEN,
# utt.token_ids_with_blanks has the form
# [..., <space token>, <blank token>]
# => do len(utt.token_ids_with_blanks) - 2 to get the index of the space token
s_start=len(utt.token_ids_with_blanks) - 2,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 2,
)
)
utt.segments_and_tokens.append(
Token(
text=BLANK_TOKEN,
text_cased=BLANK_TOKEN,
# utt.token_ids_with_blanks has the form
# [..., <space token>, <blank token>]
# => do len(utt.token_ids_with_blanks) - 1 to get the index of the blank token
s_start=len(utt.token_ids_with_blanks) - 1,
# s_end is same as s_start since the token only occupies one element in the list
s_end=len(utt.token_ids_with_blanks) - 1,
)
)
return utt
else:
raise RuntimeError("Cannot get tokens of this model.")
def add_t_start_end_to_utt_obj(utt_obj, alignment_utt, output_timestep_duration):
"""
Function to add t_start and t_end (representing time in seconds) to the Utterance object utt_obj.
Args:
utt_obj: Utterance object to which we will add t_start and t_end for its
constituent segments/words/tokens.
alignment_utt: a list of ints indicating which token does the alignment pass through at each
timestep (will take the form [0, 0, 1, 1, ..., <num of tokens including blanks in uterance>]).
output_timestep_duration: a float indicating the duration of a single output timestep from
the ASR Model.
Returns:
utt_obj: updated Utterance object.
"""
# General idea for the algorithm of how we add t_start and t_end
# the timestep where a token s starts is the location of the first appearance of s_start in alignment_utt
# the timestep where a token s ends is the location of the final appearance of s_end in alignment_utt
# We will make dictionaries num_to_first_alignment_appearance and
# num_to_last_appearance and use that to update all of
# the t_start and t_end values in utt_obj.
# We will put t_start = t_end = -1 for tokens that are skipped (should only be blanks)
num_to_first_alignment_appearance = dict()
num_to_last_alignment_appearance = dict()
prev_s = -1 # use prev_s to keep track of when the s changes
for t, s in enumerate(alignment_utt):
if s > prev_s:
num_to_first_alignment_appearance[s] = t
if prev_s >= 0: # dont record prev_s = -1
num_to_last_alignment_appearance[prev_s] = t - 1
prev_s = s
# add last appearance of the final s
num_to_last_alignment_appearance[prev_s] = len(alignment_utt) - 1
# update all the t_start and t_end in utt_obj
for segment_or_token in utt_obj.segments_and_tokens:
if type(segment_or_token) is Segment:
segment = segment_or_token
segment.t_start = num_to_first_alignment_appearance[segment.s_start] * output_timestep_duration
segment.t_end = (num_to_last_alignment_appearance[segment.s_end] + 1) * output_timestep_duration
for word_or_token in segment.words_and_tokens:
if type(word_or_token) is Word:
word = word_or_token
word.t_start = num_to_first_alignment_appearance[word.s_start] * output_timestep_duration
word.t_end = (num_to_last_alignment_appearance[word.s_end] + 1) * output_timestep_duration
for token in word.tokens:
if token.s_start in num_to_first_alignment_appearance:
token.t_start = num_to_first_alignment_appearance[token.s_start] * output_timestep_duration
else:
token.t_start = -1
if token.s_end in num_to_last_alignment_appearance:
token.t_end = (
num_to_last_alignment_appearance[token.s_end] + 1
) * output_timestep_duration
else:
token.t_end = -1
else:
token = word_or_token
if token.s_start in num_to_first_alignment_appearance:
token.t_start = num_to_first_alignment_appearance[token.s_start] * output_timestep_duration
else:
token.t_start = -1
if token.s_end in num_to_last_alignment_appearance:
token.t_end = (num_to_last_alignment_appearance[token.s_end] + 1) * output_timestep_duration
else:
token.t_end = -1
else:
token = segment_or_token
if token.s_start in num_to_first_alignment_appearance:
token.t_start = num_to_first_alignment_appearance[token.s_start] * output_timestep_duration
else:
token.t_start = -1
if token.s_end in num_to_last_alignment_appearance:
token.t_end = (num_to_last_alignment_appearance[token.s_end] + 1) * output_timestep_duration
else:
token.t_end = -1
return utt_obj
def get_batch_variables(
manifest_lines_batch,
model,
separator,
align_using_pred_text,
audio_filepath_parts_in_utt_id,
output_timestep_duration,
simulate_cache_aware_streaming=False,
use_buffered_chunked_streaming=False,
buffered_chunk_params={},
):
"""
Returns:
log_probs, y, T, U (y and U are s.t. every other token is a blank) - these are the tensors we will need
during Viterbi decoding.
utt_obj_batch: a list of Utterance objects for every utterance in the batch.
output_timestep_duration: a float indicating the duration of a single output timestep from
the ASR Model.
"""
# get hypotheses by calling 'transcribe'
# we will use the output log_probs, the duration of the log_probs,
# and (optionally) the predicted ASR text from the hypotheses
audio_filepaths_batch = [line["audio_filepath"] for line in manifest_lines_batch]
B = len(audio_filepaths_batch)
log_probs_list_batch = []
T_list_batch = []
pred_text_batch = []
if not use_buffered_chunked_streaming:
if not simulate_cache_aware_streaming:
with torch.no_grad():
hypotheses = model.transcribe(audio_filepaths_batch, return_hypotheses=True, batch_size=B)
else:
with torch.no_grad():
hypotheses = model.transcribe_simulate_cache_aware_streaming(
audio_filepaths_batch, return_hypotheses=True, batch_size=B
)
# if hypotheses form a tuple (from Hybrid model), extract just "best" hypothesis
if type(hypotheses) == tuple and len(hypotheses) == 2:
hypotheses = hypotheses[0]
for hypothesis in hypotheses:
log_probs_list_batch.append(hypothesis.y_sequence)
T_list_batch.append(hypothesis.y_sequence.shape[0])
pred_text_batch.append(hypothesis.text)
else:
delay = buffered_chunk_params["delay"]
model_stride_in_secs = buffered_chunk_params["model_stride_in_secs"]
tokens_per_chunk = buffered_chunk_params["tokens_per_chunk"]
for l in tqdm(audio_filepaths_batch, desc="Sample:"):
model.reset()
model.read_audio_file(l, delay, model_stride_in_secs)
hyp, logits = model.transcribe(tokens_per_chunk, delay, keep_logits=True)
log_probs_list_batch.append(logits)
T_list_batch.append(logits.shape[0])
pred_text_batch.append(hyp)
# we loop over every line in the manifest that is in our current batch,
# and record the y (list of tokens, including blanks), U (list of lengths of y) and
# token_info_batch, word_info_batch, segment_info_batch
y_list_batch = []
U_list_batch = []
utt_obj_batch = []
for i_line, line in enumerate(manifest_lines_batch):
if align_using_pred_text:
gt_text_for_alignment = " ".join(pred_text_batch[i_line].split())
else:
gt_text_for_alignment = line["text"]
utt_obj = get_utt_obj(
gt_text_for_alignment,
model,
separator,
T_list_batch[i_line],
audio_filepaths_batch[i_line],
_get_utt_id(audio_filepaths_batch[i_line], audio_filepath_parts_in_utt_id),
)
# update utt_obj.pred_text or utt_obj.text
if align_using_pred_text:
utt_obj.pred_text = pred_text_batch[i_line]
if len(utt_obj.pred_text) == 0:
logging.info(
f"'pred_text' of utterance {utt_obj.utt_id} is empty - we will not generate"
" any output alignment files for this utterance"
)
if "text" in line:
utt_obj.text = line["text"] # keep the text as we will save it in the output manifest
else:
utt_obj.text = line["text"]
if len(utt_obj.text) == 0:
logging.info(
f"'text' of utterance {utt_obj.utt_id} is empty - we will not generate"
" any output alignment files for this utterance"
)
y_list_batch.append(utt_obj.token_ids_with_blanks)
U_list_batch.append(len(utt_obj.token_ids_with_blanks))
utt_obj_batch.append(utt_obj)
# turn log_probs, y, T, U into dense tensors for fast computation during Viterbi decoding
T_max = max(T_list_batch)
U_max = max(U_list_batch)
# V = the number of tokens in the vocabulary + 1 for the blank token.
if hasattr(model, 'tokenizer'):
V = len(model.tokenizer.vocab) + 1
else:
V = len(model.decoder.vocabulary) + 1
T_batch = torch.tensor(T_list_batch)
U_batch = torch.tensor(U_list_batch)
# make log_probs_batch tensor of shape (B x T_max x V)
log_probs_batch = V_NEGATIVE_NUM * torch.ones((B, T_max, V))
for b, log_probs_utt in enumerate(log_probs_list_batch):
t = log_probs_utt.shape[0]
log_probs_batch[b, :t, :] = log_probs_utt
# make y tensor of shape (B x U_max)
# populate it initially with all 'V' numbers so that the 'V's will remain in the areas that
# are 'padding'. This will be useful for when we make 'log_probs_reorderd' during Viterbi decoding
# in a different function.
y_batch = V * torch.ones((B, U_max), dtype=torch.int64)
for b, y_utt in enumerate(y_list_batch):
U_utt = U_batch[b]
y_batch[b, :U_utt] = torch.tensor(y_utt)
# calculate output_timestep_duration if it is None
if output_timestep_duration is None:
if not 'window_stride' in model.cfg.preprocessor:
raise ValueError(
"Don't have attribute 'window_stride' in 'model.cfg.preprocessor' => cannot calculate "
" model_downsample_factor => stopping process"
)
if not 'sample_rate' in model.cfg.preprocessor:
raise ValueError(
"Don't have attribute 'sample_rate' in 'model.cfg.preprocessor' => cannot calculate start "
" and end time of segments => stopping process"
)
with sf.SoundFile(audio_filepaths_batch[0]) as f:
audio_dur = f.frames / f.samplerate
n_input_frames = audio_dur / model.cfg.preprocessor.window_stride
model_downsample_factor = round(n_input_frames / int(T_batch[0]))
output_timestep_duration = (
model.preprocessor.featurizer.hop_length * model_downsample_factor / model.cfg.preprocessor.sample_rate
)
logging.info(
f"Calculated that the model downsample factor is {model_downsample_factor}"
f" and therefore the ASR model output timestep duration is {output_timestep_duration}"
" -- will use this for all batches"
)
return (
log_probs_batch,
y_batch,
T_batch,
U_batch,
utt_obj_batch,
output_timestep_duration,
)
|
NeMo-main
|
tools/nemo_forced_aligner/utils/data_prep.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains functions for make ASS-format subtitle files based on the generated alignment.
ASS files can be generated highlighting token-level alignments or word-level alignments.
In both cases, 'segment' boundaries will be used to determine which parts of the text will appear
at the same time.
For the token-level ASS files, the text will be highlighted token-by-token, with the timings determined
by the NFA alignments.
For the word-level ASS files, the text will be highlighted word-by-word, with the timings determined
by the NFA alignemtns.
"""
import math
import os
import soundfile as sf
from utils.constants import BLANK_TOKEN, SPACE_TOKEN
from utils.data_prep import Segment, Token, Word
PLAYERRESX = 384
PLAYERRESY = 288
MARGINL = 10
MARGINR = 10
MARGINV = 20
def seconds_to_ass_format(seconds_float):
seconds_float = float(seconds_float)
mm, ss_decimals = divmod(seconds_float, 60)
hh, mm = divmod(mm, 60)
hh = str(round(hh))
if len(hh) == 1:
hh = '0' + hh
mm = str(round(mm))
if len(mm) == 1:
mm = '0' + mm
ss_decimals = f"{ss_decimals:.2f}"
if len(ss_decimals.split(".")[0]) == 1:
ss_decimals = "0" + ss_decimals
srt_format_time = f"{hh}:{mm}:{ss_decimals}"
return srt_format_time
def rgb_list_to_hex_bgr(rgb_list):
r, g, b = rgb_list
return f"{b:x}{g:x}{r:x}"
def make_ass_files(
utt_obj, output_dir_root, ass_file_config,
):
# don't try to make files if utt_obj.segments_and_tokens is empty, which will happen
# in the case of the ground truth text being empty or the number of tokens being too large vs audio duration
if not utt_obj.segments_and_tokens:
return utt_obj
if ass_file_config.resegment_text_to_fill_space:
utt_obj = resegment_utt_obj(utt_obj, ass_file_config)
# get duration of the utterance, so we know the final timestamp of the final set of subtitles,
# which we will keep showing until the end
with sf.SoundFile(utt_obj.audio_filepath) as f:
audio_dur = f.frames / f.samplerate
utt_obj = make_word_level_ass_file(utt_obj, output_dir_root, ass_file_config, audio_dur)
utt_obj = make_token_level_ass_file(utt_obj, output_dir_root, ass_file_config, audio_dur)
return utt_obj
def _get_word_n_chars(word):
n_chars = 0
for token in word.tokens:
if token.text != BLANK_TOKEN:
n_chars += len(token.text)
return n_chars
def _get_segment_n_chars(segment):
n_chars = 0
for word_or_token in segment.words_and_tokens:
if word_or_token.text == SPACE_TOKEN:
n_chars += 1
elif word_or_token.text != BLANK_TOKEN:
n_chars += len(word_or_token.text)
return n_chars
def resegment_utt_obj(utt_obj, ass_file_config):
# get list of just all words and tokens
all_words_and_tokens = []
for segment_or_token in utt_obj.segments_and_tokens:
if type(segment_or_token) is Segment:
all_words_and_tokens.extend(segment_or_token.words_and_tokens)
else:
all_words_and_tokens.append(segment_or_token)
# figure out how many chars will fit into one 'slide' and thus should be the max
# size of a segment
approx_chars_per_line = (PLAYERRESX - MARGINL - MARGINR) / (
ass_file_config.fontsize * 0.6
) # assume chars 0.6 as wide as they are tall
approx_lines_per_segment = (PLAYERRESY - MARGINV) / (
ass_file_config.fontsize * 1.15
) # assume line spacing is 1.15
if approx_lines_per_segment > ass_file_config.max_lines_per_segment:
approx_lines_per_segment = ass_file_config.max_lines_per_segment
max_chars_per_segment = int(approx_chars_per_line * approx_lines_per_segment)
new_segments_and_tokens = []
all_words_and_tokens_pointer = 0
for word_or_token in all_words_and_tokens:
if type(word_or_token) is Token:
new_segments_and_tokens.append(word_or_token)
all_words_and_tokens_pointer += 1
else:
break
new_segments_and_tokens.append(Segment())
while all_words_and_tokens_pointer < len(all_words_and_tokens):
word_or_token = all_words_and_tokens[all_words_and_tokens_pointer]
if type(word_or_token) is Word:
# if this is going to be the first word in the segment, we definitely want
# to add it to the segment
if not new_segments_and_tokens[-1].words_and_tokens:
new_segments_and_tokens[-1].words_and_tokens.append(word_or_token)
else:
# if not the first word, check what the new length of the segment will be
# if short enough - add this word to this segment;
# if too long - add to a new segment
this_word_n_chars = _get_word_n_chars(word_or_token)
segment_so_far_n_chars = _get_segment_n_chars(new_segments_and_tokens[-1])
if this_word_n_chars + segment_so_far_n_chars < max_chars_per_segment:
new_segments_and_tokens[-1].words_and_tokens.append(word_or_token)
else:
new_segments_and_tokens.append(Segment())
new_segments_and_tokens[-1].words_and_tokens.append(word_or_token)
else: # i.e. word_or_token is a token
# currently this breaks the convention of tokens at the end/beginning
# of segments being listed as separate tokens in segment.word_and_tokens
# TODO: change code so we follow this convention
new_segments_and_tokens[-1].words_and_tokens.append(word_or_token)
all_words_and_tokens_pointer += 1
utt_obj.segments_and_tokens = new_segments_and_tokens
return utt_obj
def make_word_level_ass_file(utt_obj, output_dir_root, ass_file_config, audio_dur):
default_style_dict = {
"Name": "Default",
"Fontname": "Arial",
"Fontsize": str(ass_file_config.fontsize),
"PrimaryColour": "&Hffffff",
"SecondaryColour": "&Hffffff",
"OutlineColour": "&H0",
"BackColour": "&H0",
"Bold": "0",
"Italic": "0",
"Underline": "0",
"StrikeOut": "0",
"ScaleX": "100",
"ScaleY": "100",
"Spacing": "0",
"Angle": "0",
"BorderStyle": "1",
"Outline": "1",
"Shadow": "0",
"Alignment": None, # will specify below
"MarginL": str(MARGINL),
"MarginR": str(MARGINR),
"MarginV": str(MARGINV),
"Encoding": "0",
}
if ass_file_config.vertical_alignment == "top":
default_style_dict["Alignment"] = "8" # text will be 'center-justified' and in the top of the screen
elif ass_file_config.vertical_alignment == "center":
default_style_dict["Alignment"] = "5" # text will be 'center-justified' and in the middle of the screen
elif ass_file_config.vertical_alignment == "bottom":
default_style_dict["Alignment"] = "2" # text will be 'center-justified' and in the bottom of the screen
else:
raise ValueError(f"got an unexpected value for ass_file_config.vertical_alignment")
output_dir = os.path.join(output_dir_root, "ass", "words")
os.makedirs(output_dir, exist_ok=True)
output_file = os.path.join(output_dir, f"{utt_obj.utt_id}.ass")
already_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_already_spoken_rgb) + r"&}"
being_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_being_spoken_rgb) + r"&}"
not_yet_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_not_yet_spoken_rgb) + r"&}"
with open(output_file, 'w') as f:
default_style_top_line = "Format: " + ", ".join(default_style_dict.keys())
default_style_bottom_line = "Style: " + ",".join(default_style_dict.values())
f.write(
(
"[Script Info]\n"
"ScriptType: v4.00+\n"
f"PlayResX: {PLAYERRESX}\n"
f"PlayResY: {PLAYERRESY}\n"
"\n"
"[V4+ Styles]\n"
f"{default_style_top_line}\n"
f"{default_style_bottom_line}\n"
"\n"
"[Events]\n"
"Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n\n"
)
)
# write first set of subtitles for text before speech starts to be spoken
words_in_first_segment = []
for segment_or_token in utt_obj.segments_and_tokens:
if type(segment_or_token) is Segment:
first_segment = segment_or_token
for word_or_token in first_segment.words_and_tokens:
if type(word_or_token) is Word:
words_in_first_segment.append(word_or_token)
break
text_before_speech = not_yet_spoken_color_code + " ".join([x.text for x in words_in_first_segment]) + r"{\r}"
subtitle_text = (
f"Dialogue: 0,{seconds_to_ass_format(0)},{seconds_to_ass_format(words_in_first_segment[0].t_start)},Default,,0,0,0,,"
+ text_before_speech.rstrip()
)
f.write(subtitle_text + '\n')
for segment_or_token in utt_obj.segments_and_tokens:
if type(segment_or_token) is Segment:
segment = segment_or_token
words_in_segment = []
for word_or_token in segment.words_and_tokens:
if type(word_or_token) is Word:
words_in_segment.append(word_or_token)
for word_i, word in enumerate(words_in_segment):
text_before = " ".join([x.text for x in words_in_segment[:word_i]])
if text_before != "":
text_before += " "
text_before = already_spoken_color_code + text_before + r"{\r}"
if word_i < len(words_in_segment) - 1:
text_after = " " + " ".join([x.text for x in words_in_segment[word_i + 1 :]])
else:
text_after = ""
text_after = not_yet_spoken_color_code + text_after + r"{\r}"
aligned_text = being_spoken_color_code + word.text + r"{\r}"
aligned_text_off = already_spoken_color_code + word.text + r"{\r}"
subtitle_text = (
f"Dialogue: 0,{seconds_to_ass_format(word.t_start)},{seconds_to_ass_format(word.t_end)},Default,,0,0,0,,"
+ text_before
+ aligned_text
+ text_after.rstrip()
)
f.write(subtitle_text + '\n')
# add subtitles without word-highlighting for when words are not being spoken
if word_i < len(words_in_segment) - 1:
last_word_end = float(words_in_segment[word_i].t_end)
next_word_start = float(words_in_segment[word_i + 1].t_start)
if next_word_start - last_word_end > 0.001:
subtitle_text = (
f"Dialogue: 0,{seconds_to_ass_format(last_word_end)},{seconds_to_ass_format(next_word_start)},Default,,0,0,0,,"
+ text_before
+ aligned_text_off
+ text_after.rstrip()
)
f.write(subtitle_text + '\n')
# write final set of subtitles for text after speech has been spoken
words_in_final_segment = []
for segment_or_token in utt_obj.segments_and_tokens[::-1]:
if type(segment_or_token) is Segment:
final_segment = segment_or_token
for word_or_token in final_segment.words_and_tokens:
if type(word_or_token) is Word:
words_in_final_segment.append(word_or_token)
break
text_after_speech = already_spoken_color_code + " ".join([x.text for x in words_in_final_segment]) + r"{\r}"
# note: for now doing some extra padding with math.ceil(audio_dur)+1) to account for the fact that the video with subtitles can become
# longer than the original audio during the MP4 creation stage.
subtitle_text = (
f"Dialogue: 0,{seconds_to_ass_format(words_in_final_segment[-1].t_end)},{seconds_to_ass_format(math.ceil(audio_dur)+1)},Default,,0,0,0,,"
+ text_after_speech.rstrip()
)
f.write(subtitle_text + '\n')
utt_obj.saved_output_files[f"words_level_ass_filepath"] = output_file
return utt_obj
def make_token_level_ass_file(utt_obj, output_dir_root, ass_file_config, audio_dur):
default_style_dict = {
"Name": "Default",
"Fontname": "Arial",
"Fontsize": str(ass_file_config.fontsize),
"PrimaryColour": "&Hffffff",
"SecondaryColour": "&Hffffff",
"OutlineColour": "&H0",
"BackColour": "&H0",
"Bold": "0",
"Italic": "0",
"Underline": "0",
"StrikeOut": "0",
"ScaleX": "100",
"ScaleY": "100",
"Spacing": "0",
"Angle": "0",
"BorderStyle": "1",
"Outline": "1",
"Shadow": "0",
"Alignment": None, # will specify below
"MarginL": str(MARGINL),
"MarginR": str(MARGINR),
"MarginV": str(MARGINV),
"Encoding": "0",
}
if ass_file_config.vertical_alignment == "top":
default_style_dict["Alignment"] = "8" # text will be 'center-justified' and in the top of the screen
elif ass_file_config.vertical_alignment == "center":
default_style_dict["Alignment"] = "5" # text will be 'center-justified' and in the middle of the screen
elif ass_file_config.vertical_alignment == "bottom":
default_style_dict["Alignment"] = "2" # text will be 'center-justified' and in the bottom of the screen
else:
raise ValueError(f"got an unexpected value for ass_file_config.vertical_alignment")
output_dir = os.path.join(output_dir_root, "ass", "tokens")
os.makedirs(output_dir, exist_ok=True)
output_file = os.path.join(output_dir, f"{utt_obj.utt_id}.ass")
already_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_already_spoken_rgb) + r"&}"
being_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_being_spoken_rgb) + r"&}"
not_yet_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_not_yet_spoken_rgb) + r"&}"
with open(output_file, 'w') as f:
default_style_top_line = "Format: " + ", ".join(default_style_dict.keys())
default_style_bottom_line = "Style: " + ",".join(default_style_dict.values())
f.write(
(
"[Script Info]\n"
"ScriptType: v4.00+\n"
f"PlayResX: {PLAYERRESX}\n"
f"PlayResY: {PLAYERRESY}\n"
"ScaledBorderAndShadow: yes\n"
"\n"
"[V4+ Styles]\n"
f"{default_style_top_line}\n"
f"{default_style_bottom_line}\n"
"\n"
"[Events]\n"
"Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n\n"
)
)
# write first set of subtitles for text before speech starts to be spoken
tokens_in_first_segment = []
for segment_or_token in utt_obj.segments_and_tokens:
if type(segment_or_token) is Segment:
for word_or_token in segment_or_token.words_and_tokens:
if type(word_or_token) is Token:
if word_or_token.text != BLANK_TOKEN:
tokens_in_first_segment.append(word_or_token)
else:
for token in word_or_token.tokens:
if token.text != BLANK_TOKEN:
tokens_in_first_segment.append(token)
break
for token in tokens_in_first_segment:
token.text_cased = token.text_cased.replace(
"▁", " "
) # replace underscores used in subword tokens with spaces
token.text_cased = token.text_cased.replace(SPACE_TOKEN, " ") # space token with actual space
text_before_speech = (
not_yet_spoken_color_code + "".join([x.text_cased for x in tokens_in_first_segment]) + r"{\r}"
)
subtitle_text = (
f"Dialogue: 0,{seconds_to_ass_format(0)},{seconds_to_ass_format(tokens_in_first_segment[0].t_start)},Default,,0,0,0,,"
+ text_before_speech.rstrip()
)
f.write(subtitle_text + '\n')
for segment_or_token in utt_obj.segments_and_tokens:
if type(segment_or_token) is Segment:
segment = segment_or_token
tokens_in_segment = [] # make list of (non-blank) tokens
for word_or_token in segment.words_and_tokens:
if type(word_or_token) is Token:
if word_or_token.text != BLANK_TOKEN:
tokens_in_segment.append(word_or_token)
else:
for token in word_or_token.tokens:
if token.text != BLANK_TOKEN:
tokens_in_segment.append(token)
for token in tokens_in_segment:
token.text_cased = token.text_cased.replace(
"▁", " "
) # replace underscores used in subword tokens with spaces
token.text_cased = token.text_cased.replace(SPACE_TOKEN, " ") # space token with actual space
for token_i, token in enumerate(tokens_in_segment):
text_before = "".join([x.text_cased for x in tokens_in_segment[:token_i]])
text_before = already_spoken_color_code + text_before + r"{\r}"
if token_i < len(tokens_in_segment) - 1:
text_after = "".join([x.text_cased for x in tokens_in_segment[token_i + 1 :]])
else:
text_after = ""
text_after = not_yet_spoken_color_code + text_after + r"{\r}"
aligned_text = being_spoken_color_code + token.text_cased + r"{\r}"
aligned_text_off = already_spoken_color_code + token.text_cased + r"{\r}"
subtitle_text = (
f"Dialogue: 0,{seconds_to_ass_format(token.t_start)},{seconds_to_ass_format(token.t_end)},Default,,0,0,0,,"
+ text_before
+ aligned_text
+ text_after.rstrip()
)
f.write(subtitle_text + '\n')
# add subtitles without word-highlighting for when words are not being spoken
if token_i < len(tokens_in_segment) - 1:
last_token_end = float(tokens_in_segment[token_i].t_end)
next_token_start = float(tokens_in_segment[token_i + 1].t_start)
if next_token_start - last_token_end > 0.001:
subtitle_text = (
f"Dialogue: 0,{seconds_to_ass_format(last_token_end)},{seconds_to_ass_format(next_token_start)},Default,,0,0,0,,"
+ text_before
+ aligned_text_off
+ text_after.rstrip()
)
f.write(subtitle_text + '\n')
# Write final set of subtitles for text after speech has been spoken.
# To do this, we need to collect 'tokens_in_final_segment' so that we know what the final line is.
tokens_in_final_segment = []
for segment_or_token in utt_obj.segments_and_tokens[::-1]:
# Collect tokens from final segment - will 'break' so we only look at the final one.
if type(segment_or_token) is Segment:
# 'segment_or_token' is known to be Segment, which has attribute 'words_and_tokens'
for word_or_token in segment_or_token.words_and_tokens:
if type(word_or_token) is Token:
if word_or_token.text != BLANK_TOKEN:
tokens_in_final_segment.append(word_or_token)
else:
# 'word_or_token' is known to be a Word, which has attribute 'tokens'
for token in word_or_token.tokens:
if token.text != BLANK_TOKEN:
tokens_in_final_segment.append(token)
break
for token in tokens_in_final_segment:
token.text_cased = token.text_cased.replace(
"▁", " "
) # replace underscores used in subword tokens with spaces
token.text_cased = token.text_cased.replace(SPACE_TOKEN, " ") # space token with actual space
text_after_speech = (
already_spoken_color_code + "".join([x.text_cased for x in tokens_in_final_segment]) + r"{\r}"
)
# note: for now doing some extra padding with math.ceil(audio_dur)+1) to account for the fact that the video with subtitles can become
# longer than the original audio during the MP4 creation stage.
subtitle_text = (
f"Dialogue: 0,{seconds_to_ass_format(tokens_in_final_segment[-1].t_end)},{seconds_to_ass_format(math.ceil(audio_dur)+1)},Default,,0,0,0,,"
+ text_after_speech.rstrip()
)
f.write(subtitle_text + '\n')
utt_obj.saved_output_files[f"tokens_level_ass_filepath"] = output_file
return utt_obj
|
NeMo-main
|
tools/nemo_forced_aligner/utils/make_ass_files.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import soundfile as sf
from utils.constants import BLANK_TOKEN, SPACE_TOKEN
from utils.data_prep import Segment, Word
def make_ctm_files(
utt_obj, output_dir_root, ctm_file_config,
):
"""
Function to save CTM files for all the utterances in the incoming batch.
"""
# don't try to make files if utt_obj.segments_and_tokens is empty, which will happen
# in the case of the ground truth text being empty or the number of tokens being too large vs audio duration
if not utt_obj.segments_and_tokens:
return utt_obj
# get audio file duration if we will need it later
if ctm_file_config.minimum_timestamp_duration > 0:
with sf.SoundFile(utt_obj.audio_filepath) as f:
audio_file_duration = f.frames / f.samplerate
else:
audio_file_duration = None
utt_obj = make_ctm("tokens", utt_obj, output_dir_root, audio_file_duration, ctm_file_config,)
utt_obj = make_ctm("words", utt_obj, output_dir_root, audio_file_duration, ctm_file_config,)
utt_obj = make_ctm("segments", utt_obj, output_dir_root, audio_file_duration, ctm_file_config,)
return utt_obj
def make_ctm(
alignment_level, utt_obj, output_dir_root, audio_file_duration, ctm_file_config,
):
output_dir = os.path.join(output_dir_root, "ctm", alignment_level)
os.makedirs(output_dir, exist_ok=True)
boundary_info_utt = []
for segment_or_token in utt_obj.segments_and_tokens:
if type(segment_or_token) is Segment:
segment = segment_or_token
if alignment_level == "segments":
boundary_info_utt.append(segment)
for word_or_token in segment.words_and_tokens:
if type(word_or_token) is Word:
word = word_or_token
if alignment_level == "words":
boundary_info_utt.append(word)
for token in word.tokens:
if alignment_level == "tokens":
boundary_info_utt.append(token)
else:
token = word_or_token
if alignment_level == "tokens":
boundary_info_utt.append(token)
else:
token = segment_or_token
if alignment_level == "tokens":
boundary_info_utt.append(token)
with open(os.path.join(output_dir, f"{utt_obj.utt_id}.ctm"), "w") as f_ctm:
for boundary_info_ in boundary_info_utt: # loop over every token/word/segment
# skip if t_start = t_end = negative number because we used it as a marker to skip some blank tokens
if not (boundary_info_.t_start < 0 or boundary_info_.t_end < 0):
text = boundary_info_.text
start_time = boundary_info_.t_start
end_time = boundary_info_.t_end
if (
ctm_file_config.minimum_timestamp_duration > 0
and ctm_file_config.minimum_timestamp_duration > end_time - start_time
):
# make the predicted duration of the token/word/segment longer, growing it outwards equal
# amounts from the predicted center of the token/word/segment
token_mid_point = (start_time + end_time) / 2
start_time = max(token_mid_point - ctm_file_config.minimum_timestamp_duration / 2, 0)
end_time = min(
token_mid_point + ctm_file_config.minimum_timestamp_duration / 2, audio_file_duration
)
if not (
text == BLANK_TOKEN and ctm_file_config.remove_blank_tokens
): # don't save blanks if we don't want to
# replace any spaces with <space> so we dont introduce extra space characters to our CTM files
text = text.replace(" ", SPACE_TOKEN)
f_ctm.write(f"{utt_obj.utt_id} 1 {start_time:.2f} {end_time - start_time:.2f} {text}\n")
utt_obj.saved_output_files[f"{alignment_level}_level_ctm_filepath"] = os.path.join(
output_dir, f"{utt_obj.utt_id}.ctm"
)
return utt_obj
|
NeMo-main
|
tools/nemo_forced_aligner/utils/make_ctm_files.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import set_start_method
from nemo.collections.asr.data.data_simulation import MultiSpeakerSimulator, RIRMultiSpeakerSimulator
from nemo.core.config import hydra_runner
"""
This script creates a synthetic diarization session using the provided audio dataset with ctm files.
Usage:
python <NEMO_ROOT>/tools/speech_data_simulator/multispeaker_simulator.py \
num_workers=10 \
data_simulator.random_seed=42 \
data_simulator.manifest_filepath=manifest_with_alignment_file.json \
data_simulator.outputs.output_dir=./simulated_data \
data_simulator.outputs.output_filename=sim_spk2_sess20 \
data_simulator.session_config.num_sessions=1000 \
data_simulator.session_config.num_speakers=2 \
data_simulator.session_config.session_length=20 \
data_simulator.background_noise.add_bg=False \
data_simulator.background_noise.background_manifest=background_noise.json \
data_simulator.background_noise.snr=40 \
Check out parameters in ./conf/data_simulator.yaml.
"""
@hydra_runner(config_path="conf", config_name="data_simulator.yaml")
def main(cfg):
if cfg.data_simulator.rir_generation.use_rir:
simulator = RIRMultiSpeakerSimulator(cfg=cfg)
else:
simulator = MultiSpeakerSimulator(cfg=cfg)
set_start_method('spawn', force=True)
simulator.generate_sessions()
if __name__ == "__main__":
main()
|
NeMo-main
|
tools/speech_data_simulator/multispeaker_simulator.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
from glob import glob
from typing import List, Optional
import regex
from joblib import Parallel, delayed
from normalization_helpers import LATIN_TO_RU, RU_ABBREVIATIONS
from num2words import num2words
from sox import Transformer
from tqdm import tqdm
from nemo.collections.asr.models import ASRModel
from nemo.utils import model_utils
try:
from nemo_text_processing.text_normalization.normalize import Normalizer
NEMO_NORMALIZATION_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
NEMO_NORMALIZATION_AVAILABLE = False
parser = argparse.ArgumentParser(description="Prepares text and audio files for segmentation")
parser.add_argument("--in_text", type=str, default=None, help="Path to a text file or a directory with .txt files")
parser.add_argument("--output_dir", type=str, required=True, help="Path to output directory")
parser.add_argument("--audio_dir", type=str, help="Path to folder with .mp3 or .wav audio files")
parser.add_argument("--sample_rate", type=int, default=16000, help="Sampling rate used during ASR model training, Hz")
parser.add_argument("--bit_depth", type=int, default=16, help="Bit depth to use for processed audio files")
parser.add_argument("--n_jobs", default=-2, type=int, help="The maximum number of concurrently running jobs")
parser.add_argument(
"--language",
type=str,
default="en",
choices=["en", "ru", "de", "es", 'other'],
help='Add target language based on the num2words list of supported languages',
)
parser.add_argument(
"--cut_prefix", type=int, default=0, help="Number of seconds to cut from the beginning of the audio files.",
)
parser.add_argument(
"--model", type=str, default="QuartzNet15x5Base-En", help="Pre-trained model name or path to model checkpoint"
)
parser.add_argument(
"--max_length", type=int, default=40, help="Max number of words of the text segment for alignment."
)
parser.add_argument(
"--additional_split_symbols",
type=str,
default="",
help="Additional symbols to use for \
sentence split if eos sentence split resulted in sequence longer than --max_length. "
"Use '|' as a separator between symbols, for example: ';|:'. Use '\s' to split by space.",
)
parser.add_argument(
"--use_nemo_normalization",
action="store_true",
help="Set to True to use NeMo Normalization tool to convert numbers from written to spoken format.",
)
parser.add_argument(
"--batch_size", type=int, default=100, help="Batch size for NeMo Normalization tool.",
)
def process_audio(
in_file: str, wav_file: str = None, cut_prefix: int = 0, sample_rate: int = 16000, bit_depth: int = 16
):
"""Process audio file: .mp3 to .wav conversion and cut a few seconds from the beginning of the audio
Args:
in_file: path to the .mp3 or .wav file for processing
wav_file: path to the output .wav file
cut_prefix: number of seconds to cut from the beginning of the audio file
sample_rate: target sampling rate
bit_depth: target bit_depth
"""
try:
if not os.path.exists(in_file):
raise ValueError(f'{in_file} not found')
tfm = Transformer()
tfm.convert(samplerate=sample_rate, n_channels=1, bitdepth=bit_depth)
tfm.trim(cut_prefix)
tfm.build(input_filepath=in_file, output_filepath=wav_file)
except Exception as e:
print(f'{in_file} skipped - {e}')
def split_text(
in_file: str,
out_file: str,
vocabulary: List[str],
language="en",
remove_brackets: bool = True,
do_lower_case: bool = True,
max_length: bool = 100,
additional_split_symbols: bool = None,
use_nemo_normalization: bool = False,
n_jobs: Optional[int] = 1,
batch_size: Optional[int] = 1.0,
):
"""
Breaks down the in_file roughly into sentences. Each sentence will be on a separate line.
Written form of the numbers will be converted to its spoken equivalent, OOV punctuation will be removed.
Args:
in_file: path to original transcript
out_file: path to the output file
vocabulary: ASR model vocabulary
language: text language
remove_brackets: Set to True if square [] and curly {} brackets should be removed from text.
Text in square/curly brackets often contains inaudible fragments like notes or translations
do_lower_case: flag that determines whether to apply lower case to the in_file text
max_length: Max number of words of the text segment for alignment
additional_split_symbols: Additional symbols to use for sentence split if eos sentence split resulted in
segments longer than --max_length
use_nemo_normalization: Set to True to use NeMo normalization tool to convert numbers from written to spoken
format. Normalization using num2words will be applied afterwards to make sure there are no numbers present
in the text, otherwise they will be replaced with a space and that could deteriorate segmentation results.
n_jobs (if use_nemo_normalization=True): the maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given,
no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used.
batch_size (if use_nemo_normalization=True): Number of examples for each process
"""
print(f"Splitting text in {in_file} into sentences.")
with open(in_file, "r") as f:
transcript = f.read()
# remove some symbols for better split into sentences
transcript = (
transcript.replace("\n", " ")
.replace("\t", " ")
.replace("…", "...")
.replace("\\", " ")
.replace("--", " -- ")
.replace(". . .", "...")
)
# end of quoted speech - to be able to split sentences by full stop
transcript = re.sub(r"([\.\?\!])([\"\'”])", r"\g<2>\g<1> ", transcript)
# remove extra space
transcript = re.sub(r" +", " ", transcript)
if remove_brackets:
transcript = re.sub(r'(\[.*?\])', ' ', transcript)
# remove text in curly brackets
transcript = re.sub(r'(\{.*?\})', ' ', transcript)
lower_case_unicode = ''
upper_case_unicode = ''
if language == "ru":
lower_case_unicode = '\u0430-\u04FF'
upper_case_unicode = '\u0410-\u042F'
elif language not in ["ru", "en"]:
print(f"Consider using {language} unicode letters for better sentence split.")
# remove space in the middle of the lower case abbreviation to avoid splitting into separate sentences
matches = re.findall(r'[a-z' + lower_case_unicode + ']\.\s[a-z' + lower_case_unicode + ']\.', transcript)
for match in matches:
transcript = transcript.replace(match, match.replace('. ', '.'))
# find phrases in quotes
with_quotes = re.finditer(r'“[A-Za-z ?]+.*?”', transcript)
sentences = []
last_idx = 0
for m in with_quotes:
match = m.group()
match_idx = m.start()
if last_idx < match_idx:
sentences.append(transcript[last_idx:match_idx])
sentences.append(match)
last_idx = m.end()
sentences.append(transcript[last_idx:])
sentences = [s.strip() for s in sentences if s.strip()]
# Read and split transcript by utterance (roughly, sentences)
split_pattern = f"(?<!\w\.\w.)(?<![A-Z{upper_case_unicode}][a-z{lower_case_unicode}]\.)(?<![A-Z{upper_case_unicode}]\.)(?<=\.|\?|\!|\.”|\?”\!”)\s"
new_sentences = []
for sent in sentences:
new_sentences.extend(regex.split(split_pattern, sent))
sentences = [s.strip() for s in new_sentences if s.strip()]
def additional_split(sentences, split_on_symbols):
if len(split_on_symbols) == 0:
return sentences
split_on_symbols = split_on_symbols.split("|")
def _split(sentences, delimiter):
result = []
for sent in sentences:
split_sent = sent.split(delimiter)
# keep the delimiter
split_sent = [(s + delimiter).strip() for s in split_sent[:-1]] + [split_sent[-1]]
if "," in delimiter:
# split based on comma usually results in too short utterance, combine sentences
# that result in a single word split. It's usually not recommended to do that for other delimiters.
comb = []
for s in split_sent:
MIN_LEN = 2
# if the previous sentence is too short, combine it with the current sentence
if len(comb) > 0 and (len(comb[-1].split()) <= MIN_LEN or len(s.split()) <= MIN_LEN):
comb[-1] = comb[-1] + " " + s
else:
comb.append(s)
result.extend(comb)
else:
result.extend(split_sent)
return result
another_sent_split = []
for sent in sentences:
split_sent = [sent]
for delimiter in split_on_symbols:
if len(delimiter) == 0:
continue
split_sent = _split(split_sent, delimiter + " " if delimiter != " " else delimiter)
another_sent_split.extend(split_sent)
sentences = [s.strip() for s in another_sent_split if s.strip()]
return sentences
additional_split_symbols = additional_split_symbols.replace("/s", " ")
sentences = additional_split(sentences, additional_split_symbols)
vocabulary_symbols = []
for x in vocabulary:
if x != "<unk>":
# for BPE models
vocabulary_symbols.extend([x for x in x.replace("##", "").replace("▁", "")])
vocabulary_symbols = list(set(vocabulary_symbols))
vocabulary_symbols += [x.upper() for x in vocabulary_symbols]
# check to make sure there will be no utterances for segmentation with only OOV symbols
vocab_no_space_with_digits = set(vocabulary_symbols + [str(i) for i in range(10)])
if " " in vocab_no_space_with_digits:
vocab_no_space_with_digits.remove(" ")
sentences = [
s.strip() for s in sentences if len(vocab_no_space_with_digits.intersection(set(s.lower()))) > 0 and s.strip()
]
# when no punctuation marks present in the input text, split based on max_length
if len(sentences) == 1:
sent = sentences[0].split()
sentences = []
for i in range(0, len(sent), max_length):
sentences.append(" ".join(sent[i : i + max_length]))
sentences = [s.strip() for s in sentences if s.strip()]
# save split text with original punctuation and case
out_dir, out_file_name = os.path.split(out_file)
with open(os.path.join(out_dir, out_file_name[:-4] + "_with_punct.txt"), "w") as f:
f.write(re.sub(r' +', ' ', "\n".join(sentences)))
# substitute common abbreviations before applying lower case
if language == "ru":
for k, v in RU_ABBREVIATIONS.items():
sentences = [s.replace(k, v) for s in sentences]
# replace Latin characters with Russian
for k, v in LATIN_TO_RU.items():
sentences = [s.replace(k, v) for s in sentences]
if language == "en" and use_nemo_normalization:
if not NEMO_NORMALIZATION_AVAILABLE:
raise ValueError("NeMo normalization tool is not installed.")
print("Using NeMo normalization tool...")
normalizer = Normalizer(input_case="cased", cache_dir=os.path.join(os.path.dirname(out_file), "en_grammars"))
sentences_norm = normalizer.normalize_list(
sentences, verbose=False, punct_post_process=True, n_jobs=n_jobs, batch_size=batch_size
)
if len(sentences_norm) != len(sentences):
raise ValueError("Normalization failed, number of sentences does not match.")
else:
sentences = sentences_norm
sentences = '\n'.join(sentences)
# replace numbers with num2words
try:
p = re.compile("\d+")
new_text = ""
match_end = 0
for i, m in enumerate(p.finditer(sentences)):
match = m.group()
match_start = m.start()
if i == 0:
new_text = sentences[:match_start]
else:
new_text += sentences[match_end:match_start]
match_end = m.end()
new_text += sentences[match_start:match_end].replace(match, num2words(match, lang=language))
new_text += sentences[match_end:]
sentences = new_text
except NotImplementedError:
print(
f"{language} might be missing in 'num2words' package. Add required language to the choices for the"
f"--language argument."
)
raise
sentences = re.sub(r' +', ' ', sentences)
with open(os.path.join(out_dir, out_file_name[:-4] + "_with_punct_normalized.txt"), "w") as f:
f.write(sentences)
if do_lower_case:
sentences = sentences.lower()
symbols_to_remove = ''.join(set(sentences).difference(set(vocabulary_symbols + ["\n", " "])))
sentences = sentences.translate(''.maketrans(symbols_to_remove, len(symbols_to_remove) * " "))
# remove extra space
sentences = re.sub(r' +', ' ', sentences)
with open(out_file, "w") as f:
f.write(sentences)
if __name__ == "__main__":
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
text_files = []
if args.in_text:
if args.model is None:
raise ValueError(f"ASR model must be provided to extract vocabulary for text processing")
elif os.path.exists(args.model):
model_cfg = ASRModel.restore_from(restore_path=args.model, return_config=True)
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath) # type: ASRModel
print(f"Restoring model : {imported_class.__name__}")
asr_model = imported_class.restore_from(restore_path=args.model) # type: ASRModel
model_name = os.path.splitext(os.path.basename(args.model))[0]
else:
# restore model by name
asr_model = ASRModel.from_pretrained(model_name=args.model) # type: ASRModel
model_name = args.model
vocabulary = asr_model.cfg.decoder.vocabulary
if os.path.isdir(args.in_text):
text_files = glob(f"{args.in_text}/*.txt")
else:
text_files.append(args.in_text)
for text in text_files:
base_name = os.path.basename(text)[:-4]
out_text_file = os.path.join(args.output_dir, base_name + ".txt")
split_text(
text,
out_text_file,
vocabulary=vocabulary,
language=args.language,
max_length=args.max_length,
additional_split_symbols=args.additional_split_symbols,
use_nemo_normalization=args.use_nemo_normalization,
n_jobs=args.n_jobs,
batch_size=args.batch_size,
)
print(f"Processed text saved at {args.output_dir}")
if args.audio_dir:
if not os.path.exists(args.audio_dir):
raise ValueError(f"{args.audio_dir} not found. '--audio_dir' should contain .mp3 or .wav files.")
audio_paths = glob(f"{args.audio_dir}/*")
Parallel(n_jobs=args.n_jobs)(
delayed(process_audio)(
audio_paths[i],
os.path.join(args.output_dir, os.path.splitext(os.path.basename(audio_paths[i]))[0] + ".wav"),
args.cut_prefix,
args.sample_rate,
args.bit_depth,
)
for i in tqdm(range(len(audio_paths)))
)
print("Data preparation is complete.")
|
NeMo-main
|
tools/ctc_segmentation/scripts/prepare_data.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from glob import glob
import editdistance
from joblib import Parallel, delayed
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.utils import logging
parser = argparse.ArgumentParser("Calculate metrics and filters out samples based on thresholds")
parser.add_argument(
"--manifest", required=True, help="Path .json manifest file with ASR predictions saved at `pred_text` field.",
)
parser.add_argument(
"--edge_len", type=int, help="Number of characters to use for CER calculation at the edges", default=5
)
parser.add_argument("--audio_dir", type=str, help="Path to original .wav files", default=None)
parser.add_argument("--max_cer", type=int, help="Threshold CER value, %", default=30)
parser.add_argument("--max_wer", type=int, help="Threshold WER value, %", default=75)
parser.add_argument(
"--max_len_diff_ratio",
type=float,
help="Threshold for len diff ratio between reference text "
"length and predicted text length with respect to "
"the reference text length (length measured "
"in number of characters)",
default=0.3,
)
parser.add_argument("--max_edge_cer", type=int, help="Threshold edge CER value, %", default=60)
parser.add_argument("--max_duration", type=int, help="Max duration of a segment, seconds", default=-1)
parser.add_argument("--min_duration", type=int, help="Min duration of a segment, seconds", default=1)
parser.add_argument(
"--num_jobs",
default=-2,
type=int,
help="The maximum number of concurrently running jobs, `-2` - all CPUs but one are used",
)
parser.add_argument(
"--only_filter",
action="store_true",
help="Set to True to perform only filtering (when transcripts" "are already available)",
)
def _calculate(line: dict, edge_len: int):
"""
Calculates metrics for every entry on manifest.json.
Args:
line - line of manifest.json (dict)
edge_len - number of characters for edge Character Error Rate (CER) calculations
Returns:
line - line of manifest.json (dict) with the following metrics added:
WER - word error rate
CER - character error rate
start_CER - CER at the beginning of the audio sample considering first 'edge_len' characters
end_CER - CER at the end of the audio sample considering last 'edge_len' characters
len_diff_ratio - ratio between reference text length and predicted text length with respect to
the reference text length (length measured in number of characters)
"""
eps = 1e-9
text = line["text"].split()
pred_text = line["pred_text"].split()
num_words = max(len(text), eps)
word_dist = editdistance.eval(text, pred_text)
line["WER"] = word_dist / num_words * 100.0
num_chars = max(len(line["text"]), eps)
char_dist = editdistance.eval(line["text"], line["pred_text"])
line["CER"] = char_dist / num_chars * 100.0
line["start_CER"] = editdistance.eval(line["text"][:edge_len], line["pred_text"][:edge_len]) / edge_len * 100
line["end_CER"] = editdistance.eval(line["text"][-edge_len:], line["pred_text"][-edge_len:]) / edge_len * 100
line["len_diff_ratio"] = 1.0 * abs(len(text) - len(pred_text)) / max(len(text), eps)
return line
def get_metrics(manifest, manifest_out):
"""Calculate metrics for sample in manifest and saves the results to manifest_out"""
with open(manifest, "r") as f:
lines = f.readlines()
lines = Parallel(n_jobs=args.num_jobs)(
delayed(_calculate)(json.loads(line), edge_len=args.edge_len) for line in tqdm(lines)
)
with open(manifest_out, "w") as f_out:
for line in lines:
f_out.write(json.dumps(line) + "\n")
logging.info(f"Metrics save at {manifest_out}")
def _apply_filters(
manifest,
manifest_out,
max_cer,
max_wer,
max_edge_cer,
max_len_diff_ratio,
max_dur=-1,
min_dur=1,
original_duration=0,
):
""" Filters out samples that do not satisfy specified threshold values and saves remaining samples to manifest_out"""
remaining_duration = 0
segmented_duration = 0
with open(manifest, "r") as f, open(manifest_out, "w") as f_out:
for line in f:
item = json.loads(line)
cer = item["CER"]
wer = item["WER"]
len_diff_ratio = item["len_diff_ratio"]
duration = item["duration"]
segmented_duration += duration
if (
cer <= max_cer
and wer <= max_wer
and len_diff_ratio <= max_len_diff_ratio
and item["end_CER"] <= max_edge_cer
and item["start_CER"] <= max_edge_cer
and (max_dur == -1 or (max_dur > -1 and duration < max_dur))
and duration > min_dur
):
remaining_duration += duration
f_out.write(json.dumps(item) + "\n")
logging.info("-" * 50)
logging.info("Threshold values:")
logging.info(f"max WER, %: {max_wer}")
logging.info(f"max CER, %: {max_cer}")
logging.info(f"max edge CER, %: {max_edge_cer}")
logging.info(f"max Word len diff: {max_len_diff_ratio}")
logging.info(f"max Duration, s: {max_dur}")
logging.info("-" * 50)
remaining_duration = remaining_duration / 60
original_duration = original_duration / 60
segmented_duration = segmented_duration / 60
logging.info(f"Original audio dur: {round(original_duration, 2)} min")
logging.info(
f"Segmented duration: {round(segmented_duration, 2)} min ({round(100 * segmented_duration / original_duration, 2)}% of original audio)"
)
logging.info(
f"Retained {round(remaining_duration, 2)} min ({round(100*remaining_duration/original_duration, 2)}% of original or {round(100 * remaining_duration / segmented_duration, 2)}% of segmented audio)."
)
logging.info(f"Retained data saved to {manifest_out}")
def filter(manifest):
"""
Filters out samples that do not satisfy specified threshold values.
Args:
manifest: path to .json manifest
"""
original_duration = 0
if args.audio_dir:
audio_files = glob(f"{os.path.abspath(args.audio_dir)}/*")
for audio in audio_files:
try:
audio_data = AudioSegment.from_file(audio)
duration = len(audio_data._samples) / audio_data._sample_rate
original_duration += duration
except Exception as e:
logging.info(f"Skipping {audio} -- {e}")
_apply_filters(
manifest=manifest,
manifest_out=manifest.replace(".json", "_filtered.json"),
max_cer=args.max_cer,
max_wer=args.max_wer,
max_edge_cer=args.max_edge_cer,
max_len_diff_ratio=args.max_len_diff_ratio,
max_dur=args.max_duration,
min_dur=args.min_duration,
original_duration=original_duration,
)
if __name__ == "__main__":
args = parser.parse_args()
if not args.only_filter:
manifest_with_metrics = args.manifest.replace(".json", "_metrics.json")
get_metrics(args.manifest, manifest_with_metrics)
else:
manifest_with_metrics = args.manifest
filter(manifest_with_metrics)
|
NeMo-main
|
tools/ctc_segmentation/scripts/get_metrics_and_filter.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["LATIN_TO_RU", "RU_ABBREVIATIONS"]
LATIN_TO_RU = {
"a": "а",
"b": "б",
"c": "к",
"d": "д",
"e": "е",
"f": "ф",
"g": "г",
"h": "х",
"i": "и",
"j": "ж",
"k": "к",
"l": "л",
"m": "м",
"n": "н",
"o": "о",
"p": "п",
"q": "к",
"r": "р",
"s": "с",
"t": "т",
"u": "у",
"v": "в",
"w": "в",
"x": "к",
"y": "у",
"z": "з",
"à": "а",
"è": "е",
"é": "е",
"ß": "в",
"ä": "а",
"ö": "о",
"ü": "у",
"є": "е",
"ç": "с",
"ê": "е",
"ó": "о",
}
RU_ABBREVIATIONS = {
" р.": " рублей",
" к.": " копеек",
" коп.": " копеек",
" копек.": " копеек",
" т.д.": " так далее",
" т. д.": " так далее",
" т.п.": " тому подобное",
" т. п.": " тому подобное",
" т.е.": " то есть",
" т. е.": " то есть",
" стр. ": " страница ",
}
|
NeMo-main
|
tools/ctc_segmentation/scripts/normalization_helpers.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
from pathlib import Path
import pandas as pd
parser = argparse.ArgumentParser(description="Compare alignment segments generated with different window sizes")
parser.add_argument(
"--base_dir",
default="output",
type=str,
required=True,
help="Path to directory with 'logs' and 'segments' folders generated during the segmentation step",
)
if __name__ == "__main__":
args = parser.parse_args()
segments_dir = os.path.join(args.base_dir, "segments")
if not os.path.exists(segments_dir):
raise ValueError(f"'segments' directory was not found at {args.base_dir}.")
all_files = Path(segments_dir).glob("*_segments.txt")
all_alignment_files = {}
for file in all_files:
base_name = re.sub(r"^\d+_", "", file.name)
if base_name not in all_alignment_files:
all_alignment_files[base_name] = []
all_alignment_files[base_name].append(file)
verified_dir = os.path.join(args.base_dir, "verified_segments")
os.makedirs(verified_dir, exist_ok=True)
def readlines(file):
with open(file, "r") as f:
lines = f.readlines()
return lines
stats = {}
for part, alignment_files in all_alignment_files.items():
stats[part] = {}
num_alignment_files = len(alignment_files)
all_alignments = []
for alignment in alignment_files:
all_alignments.append(readlines(alignment))
with open(os.path.join(verified_dir, part), "w") as f:
num_segments = len(all_alignments[0])
stats[part]["Original number of segments"] = num_segments
stats[part]["Verified segments"] = 0
stats[part]["Original Duration, min"] = 0
stats[part]["Verified Duration, min"] = 0
for i in range(num_segments):
line = all_alignments[0][i]
valid_line = True
if i == 0:
duration = 0
else:
info = line.split("|")[0].split()
duration = (float(info[1]) - float(info[0])) / 60
stats[part]["Original Duration, min"] += duration
for alignment in all_alignments:
if line != alignment[i]:
valid_line = False
if valid_line:
f.write(line)
stats[part]["Verified segments"] += 1
stats[part]["Verified Duration, min"] += duration
stats = pd.DataFrame.from_dict(stats, orient="index").reset_index()
stats["Number dropped"] = stats["Original number of segments"] - stats["Verified segments"]
stats["Duration of dropped, min"] = round(stats["Original Duration, min"] - stats["Verified Duration, min"])
stats["% dropped, min"] = round(stats["Duration of dropped, min"] / stats["Original number of segments"] * 100)
stats["Misalignment present"] = stats["Number dropped"] > 0
stats["Original Duration, min"] = round(stats["Original Duration, min"])
stats["Verified Duration, min"] = round(stats["Verified Duration, min"])
stats.loc["Total"] = stats.sum()
stats_file = os.path.join(args.base_dir, "alignment_summary.csv")
stats.to_csv(stats_file, index=False)
print(stats)
print(f"Alignment summary saved to {stats_file}")
|
NeMo-main
|
tools/ctc_segmentation/scripts/verify_segments.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from glob import glob
import numpy as np
from scipy.io import wavfile
from tqdm import tqdm
parser = argparse.ArgumentParser(description="Cut audio on the segments based on segments")
parser.add_argument("--output_dir", type=str, help="Path to output directory", required=True)
parser.add_argument(
"--alignment",
type=str,
required=True,
help="Path to a data directory with alignments or a single .txt file with timestamps - result of the ctc-segmentation",
)
parser.add_argument("--threshold", type=float, default=-5, help="Minimum score value accepted")
parser.add_argument("--offset", type=int, default=0, help="Offset, s")
parser.add_argument("--batch_size", type=int, default=64, help="Batch size for inference")
parser.add_argument(
"--edge_duration",
type=float,
help="Duration of audio for mean absolute value calculation at the edges, s",
default=0.05,
)
parser.add_argument("--sample_rate", type=int, help="Sample rate, Hz", default=16000)
parser.add_argument(
"--max_duration",
type=int,
help="Maximum audio duration (seconds). Samples that are longer will be dropped",
default=60,
)
def process_alignment(alignment_file: str, manifest: str, clips_dir: str, args):
""" Cut original audio file into audio segments based on alignment_file
Args:
alignment_file: path to the file with segmented text and corresponding time stamps.
The first line of the file contains the path to the original audio file
manifest: path to .json manifest to save segments metadata
clips_dir: path to a directory to save audio clips
args: main script args
"""
if not os.path.exists(alignment_file):
raise ValueError(f"{alignment_file} not found")
base_name = os.path.basename(alignment_file).replace("_segments.txt", "")
# read the segments, note the first line contains the path to the original audio
segments = []
ref_text_processed = []
ref_text_no_preprocessing = []
ref_text_normalized = []
with open(alignment_file, "r") as f:
for line in f:
line = line.split("|")
# read audio file name from the first line
if len(line) == 1:
audio_file = line[0].strip()
continue
ref_text_processed.append(line[1].strip())
ref_text_no_preprocessing.append(line[2].strip())
ref_text_normalized.append(line[3].strip())
line = line[0].split()
segments.append((float(line[0]) + args.offset / 1000, float(line[1]) + args.offset / 1000, float(line[2])))
# cut the audio into segments and save the final manifests at output_dir
sampling_rate, signal = wavfile.read(audio_file)
original_duration = len(signal) / sampling_rate
num_samples = int(args.edge_duration * args.sample_rate)
low_score_dur = 0
high_score_dur = 0
with open(manifest, "a", encoding="utf8") as f:
for i, (st, end, score) in enumerate(segments):
segment = signal[round(st * sampling_rate) : round(end * sampling_rate)]
duration = len(segment) / sampling_rate
if duration > args.max_duration:
continue
if duration > 0:
text_processed = ref_text_processed[i].strip()
text_no_preprocessing = ref_text_no_preprocessing[i].strip()
text_normalized = ref_text_normalized[i].strip()
if score >= args.threshold:
high_score_dur += duration
audio_filepath = os.path.join(clips_dir, f"{base_name}_{i:04}.wav")
wavfile.write(audio_filepath, sampling_rate, segment)
assert len(signal.shape) == 1 and sampling_rate == args.sample_rate, "check sampling rate"
info = {
"audio_filepath": audio_filepath,
"duration": duration,
"text": text_processed,
"text_no_preprocessing": text_no_preprocessing,
"text_normalized": text_normalized,
"score": round(score, 2),
"start_abs": float(np.mean(np.abs(segment[:num_samples]))),
"end_abs": float(np.mean(np.abs(segment[-num_samples:]))),
}
json.dump(info, f, ensure_ascii=False)
f.write("\n")
else:
low_score_dur += duration
# keep track of duration of the deleted segments
del_duration = 0
begin = 0
for i, (st, end, _) in enumerate(segments):
if st - begin > 0.01:
segment = signal[int(begin * sampling_rate) : int(st * sampling_rate)]
duration = len(segment) / sampling_rate
del_duration += duration
begin = end
segment = signal[int(begin * sampling_rate) :]
duration = len(segment) / sampling_rate
del_duration += duration
stats = (
args.output_dir,
base_name,
round(original_duration),
round(high_score_dur),
round(low_score_dur),
round(del_duration),
)
return stats
if __name__ == "__main__":
args = parser.parse_args()
print("Splitting audio files into segments...")
if os.path.isdir(args.alignment):
alignment_files = glob(f"{args.alignment}/*_segments.txt")
else:
alignment_files = [args.alignment]
# create a directory to store segments with alignement confindence score avove the threshold
args.output_dir = os.path.abspath(args.output_dir)
clips_dir = os.path.join(args.output_dir, "clips")
manifest_dir = os.path.join(args.output_dir, "manifests")
os.makedirs(clips_dir, exist_ok=True)
os.makedirs(manifest_dir, exist_ok=True)
manifest = os.path.join(manifest_dir, "manifest.json")
if os.path.exists(manifest):
os.remove(manifest)
stats_file = os.path.join(args.output_dir, "stats.tsv")
with open(stats_file, "w") as f:
f.write("Folder\tSegment\tOriginal dur (s)\tHigh quality dur (s)\tLow quality dur (s)\tDeleted dur (s)\n")
high_score_dur = 0
low_score_dur = 0
del_duration = 0
original_dur = 0
for alignment_file in tqdm(alignment_files):
stats = process_alignment(alignment_file, manifest, clips_dir, args)
original_dur += stats[-4]
high_score_dur += stats[-3]
low_score_dur += stats[-2]
del_duration += stats[-1]
stats = "\t".join([str(t) for t in stats]) + "\n"
f.write(stats)
f.write(f"Total\t\t{round(high_score_dur)}\t{round(low_score_dur)}\t{del_duration}")
print(f"Original duration : {round(original_dur / 60)}min")
print(f"High score segments: {round(high_score_dur / 60)}min ({round(high_score_dur/original_dur*100)}%)")
print(f"Low score segments : {round(low_score_dur / 60)}min ({round(low_score_dur/original_dur*100)}%)")
print(f"Deleted segments : {round(del_duration / 60)}min ({round(del_duration/original_dur*100)}%)")
print(f"Stats saved at {stats_file}")
print(f"Manifest saved at {manifest}")
|
NeMo-main
|
tools/ctc_segmentation/scripts/cut_audio.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import logging.handlers
import math
import os
import sys
from pathlib import PosixPath
from typing import List, Tuple, Union
import ctc_segmentation as cs
import numpy as np
from tqdm import tqdm
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
def get_segments(
log_probs: np.ndarray,
path_wav: Union[PosixPath, str],
transcript_file: Union[PosixPath, str],
output_file: str,
vocabulary: List[str],
tokenizer: SentencePieceTokenizer,
bpe_model: bool,
index_duration: float,
window_size: int = 8000,
log_file: str = "log.log",
debug: bool = False,
) -> None:
"""
Segments the audio into segments and saves segments timings to a file
Args:
log_probs: Log probabilities for the original audio from an ASR model, shape T * |vocabulary|.
values for blank should be at position 0
path_wav: path to the audio .wav file
transcript_file: path to
output_file: path to the file to save timings for segments
vocabulary: vocabulary used to train the ASR model, note blank is at position len(vocabulary) - 1
tokenizer: ASR model tokenizer (for BPE models, None for char-based models)
bpe_model: Indicates whether the model uses BPE
window_size: the length of each utterance (in terms of frames of the CTC outputs) fits into that window.
index_duration: corresponding time duration of one CTC output index (in seconds)
"""
level = "DEBUG" if debug else "INFO"
file_handler = logging.FileHandler(filename=log_file)
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
logging.basicConfig(handlers=handlers, level=level)
try:
with open(transcript_file, "r") as f:
text = f.readlines()
text = [t.strip() for t in text if t.strip()]
# add corresponding original text without pre-processing
transcript_file_no_preprocessing = transcript_file.replace(".txt", "_with_punct.txt")
if not os.path.exists(transcript_file_no_preprocessing):
raise ValueError(f"{transcript_file_no_preprocessing} not found.")
with open(transcript_file_no_preprocessing, "r") as f:
text_no_preprocessing = f.readlines()
text_no_preprocessing = [t.strip() for t in text_no_preprocessing if t.strip()]
# add corresponding normalized original text
transcript_file_normalized = transcript_file.replace(".txt", "_with_punct_normalized.txt")
if not os.path.exists(transcript_file_normalized):
raise ValueError(f"{transcript_file_normalized} not found.")
with open(transcript_file_normalized, "r") as f:
text_normalized = f.readlines()
text_normalized = [t.strip() for t in text_normalized if t.strip()]
if len(text_no_preprocessing) != len(text):
raise ValueError(f"{transcript_file} and {transcript_file_no_preprocessing} do not match")
if len(text_normalized) != len(text):
raise ValueError(f"{transcript_file} and {transcript_file_normalized} do not match")
config = cs.CtcSegmentationParameters()
config.char_list = vocabulary
config.min_window_size = window_size
config.index_duration = index_duration
if bpe_model:
ground_truth_mat, utt_begin_indices = _prepare_tokenized_text_for_bpe_model(text, tokenizer, vocabulary, 0)
else:
config.excluded_characters = ".,-?!:»«;'›‹()"
config.blank = vocabulary.index(" ")
ground_truth_mat, utt_begin_indices = cs.prepare_text(config, text)
_print(ground_truth_mat, config.char_list)
# set this after text prepare_text()
config.blank = 0
logging.debug(f"Syncing {transcript_file}")
logging.debug(
f"Audio length {os.path.basename(path_wav)}: {log_probs.shape[0]}. "
f"Text length {os.path.basename(transcript_file)}: {len(ground_truth_mat)}"
)
timings, char_probs, char_list = cs.ctc_segmentation(config, log_probs, ground_truth_mat)
_print(ground_truth_mat, vocabulary)
segments = determine_utterance_segments(config, utt_begin_indices, char_probs, timings, text, char_list)
write_output(output_file, path_wav, segments, text, text_no_preprocessing, text_normalized)
# Also writes labels in audacity format
output_file_audacity = output_file[:-4] + "_audacity.txt"
write_labels_for_audacity(output_file_audacity, segments, text_no_preprocessing)
logging.info(f"Label file for Audacity written to {output_file_audacity}.")
for i, (word, segment) in enumerate(zip(text, segments)):
if i < 5:
logging.debug(f"{segment[0]:.2f} {segment[1]:.2f} {segment[2]:3.4f} {word}")
logging.info(f"segmentation of {transcript_file} complete.")
except Exception as e:
logging.info(f"{e} -- segmentation of {transcript_file} failed")
def _prepare_tokenized_text_for_bpe_model(text: List[str], tokenizer, vocabulary: List[str], blank_idx: int = 0):
""" Creates a transition matrix for BPE-based models"""
space_idx = vocabulary.index("▁")
ground_truth_mat = [[-1, -1]]
utt_begin_indices = []
for uttr in text:
ground_truth_mat += [[blank_idx, space_idx]]
utt_begin_indices.append(len(ground_truth_mat))
token_ids = tokenizer.text_to_ids(uttr)
# blank token is moved from the last to the first (0) position in the vocabulary
token_ids = [idx + 1 for idx in token_ids]
ground_truth_mat += [[t, -1] for t in token_ids]
utt_begin_indices.append(len(ground_truth_mat))
ground_truth_mat += [[blank_idx, space_idx]]
ground_truth_mat = np.array(ground_truth_mat, np.int64)
return ground_truth_mat, utt_begin_indices
def _print(ground_truth_mat, vocabulary, limit=20):
"""Prints transition matrix"""
chars = []
for row in ground_truth_mat:
chars.append([])
for ch_id in row:
if ch_id != -1:
chars[-1].append(vocabulary[int(ch_id)])
for x in chars[:limit]:
logging.debug(x)
def _get_blank_spans(char_list, blank="ε"):
"""
Returns a list of tuples:
(start index, end index (exclusive), count)
ignores blank symbols at the beginning and end of the char_list
since they're not suitable for split in between
"""
blanks = []
start = None
end = None
for i, ch in enumerate(char_list):
if ch == blank:
if start is None:
start, end = i, i
else:
end = i
else:
if start is not None:
# ignore blank tokens at the beginning
if start > 0:
end += 1
blanks.append((start, end, end - start))
start = None
end = None
return blanks
def _compute_time(index, align_type, timings):
"""Compute start and end time of utterance.
Adapted from https://github.com/lumaku/ctc-segmentation
Args:
index: frame index value
align_type: one of ["begin", "end"]
Return:
start/end time of utterance in seconds
"""
middle = (timings[index] + timings[index - 1]) / 2
if align_type == "begin":
return max(timings[index + 1] - 0.5, middle)
elif align_type == "end":
return min(timings[index - 1] + 0.5, middle)
def determine_utterance_segments(config, utt_begin_indices, char_probs, timings, text, char_list):
"""Utterance-wise alignments from char-wise alignments.
Adapted from https://github.com/lumaku/ctc-segmentation
Args:
config: an instance of CtcSegmentationParameters
utt_begin_indices: list of time indices of utterance start
char_probs: character positioned probabilities obtained from backtracking
timings: mapping of time indices to seconds
text: list of utterances
Return:
segments, a list of: utterance start and end [s], and its confidence score
"""
segments = []
min_prob = np.float64(-10000000000.0)
for i in tqdm(range(len(text))):
start = _compute_time(utt_begin_indices[i], "begin", timings)
end = _compute_time(utt_begin_indices[i + 1], "end", timings)
start_t = start / config.index_duration_in_seconds
start_t_floor = math.floor(start_t)
# look for the left most blank symbol and split in the middle to fix start utterance segmentation
if char_list[start_t_floor] == config.char_list[config.blank]:
start_blank = None
j = start_t_floor - 1
while char_list[j] == config.char_list[config.blank] and j > start_t_floor - 20:
start_blank = j
j -= 1
if start_blank:
start_t = int(round(start_blank + (start_t_floor - start_blank) / 2))
else:
start_t = start_t_floor
start = start_t * config.index_duration_in_seconds
else:
start_t = int(round(start_t))
end_t = int(round(end / config.index_duration_in_seconds))
# Compute confidence score by using the min mean probability after splitting into segments of L frames
n = config.score_min_mean_over_L
if end_t <= start_t:
min_avg = min_prob
elif end_t - start_t <= n:
min_avg = char_probs[start_t:end_t].mean()
else:
min_avg = np.float64(0.0)
for t in range(start_t, end_t - n):
min_avg = min(min_avg, char_probs[t : t + n].mean())
segments.append((start, end, min_avg))
return segments
def write_output(
out_path: str,
path_wav: str,
segments: List[Tuple[float]],
text: str,
text_no_preprocessing: str,
text_normalized: str,
):
"""
Write the segmentation output to a file
out_path: Path to output file
path_wav: Path to the original audio file
segments: Segments include start, end and alignment score
text: Text used for alignment
text_no_preprocessing: Reference txt without any pre-processing
text_normalized: Reference text normalized
"""
# Uses char-wise alignments to get utterance-wise alignments and writes them into the given file
with open(str(out_path), "w") as outfile:
outfile.write(str(path_wav) + "\n")
for i, segment in enumerate(segments):
if isinstance(segment, list):
for j, x in enumerate(segment):
start, end, score = x
outfile.write(
f"{start} {end} {score} | {text[i][j]} | {text_no_preprocessing[i][j]} | {text_normalized[i][j]}\n"
)
else:
start, end, score = segment
outfile.write(
f"{start} {end} {score} | {text[i]} | {text_no_preprocessing[i]} | {text_normalized[i]}\n"
)
def write_labels_for_audacity(
out_path: str, segments: List[Tuple[float]], text_no_preprocessing: str,
):
"""
Write the segmentation output to a file ready to be imported in Audacity with the unprocessed text as labels
out_path: Path to output file
segments: Segments include start, end and alignment score
text_no_preprocessing: Reference txt without any pre-processing
"""
# Audacity uses tab to separate each field (start end text)
TAB_CHAR = " "
# Uses char-wise alignments to get utterance-wise alignments and writes them into the given file
with open(str(out_path), "w") as outfile:
for i, segment in enumerate(segments):
if isinstance(segment, list):
for j, x in enumerate(segment):
start, end, _ = x
outfile.write(f"{start}{TAB_CHAR}{end}{TAB_CHAR}{text_no_preprocessing[i][j]} \n")
else:
start, end, _ = segment
outfile.write(f"{start}{TAB_CHAR}{end}{TAB_CHAR}{text_no_preprocessing[i]} \n")
|
NeMo-main
|
tools/ctc_segmentation/scripts/utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import time
from pathlib import Path
import numpy as np
import scipy.io.wavfile as wav
import torch
from joblib import Parallel, delayed
from tqdm import tqdm
from utils import get_segments
import nemo.collections.asr as nemo_asr
parser = argparse.ArgumentParser(description="CTC Segmentation")
parser.add_argument("--output_dir", default="output", type=str, help="Path to output directory")
parser.add_argument(
"--data",
type=str,
required=True,
help="Path to directory with audio files and associated transcripts (same respective names only formats are "
"different or path to wav file (transcript should have the same base name and be located in the same folder"
"as the wav file.",
)
parser.add_argument("--window_len", type=int, default=8000, help="Window size for ctc segmentation algorithm")
parser.add_argument("--sample_rate", type=int, default=16000, help="Sampling rate, Hz")
parser.add_argument(
"--model", type=str, default="QuartzNet15x5Base-En", help="Path to model checkpoint or pre-trained model name",
)
parser.add_argument("--debug", action="store_true", help="Flag to enable debugging messages")
parser.add_argument(
"--num_jobs",
default=-2,
type=int,
help="The maximum number of concurrently running jobs, `-2` - all CPUs but one are used",
)
logger = logging.getLogger("ctc_segmentation") # use module name
if __name__ == "__main__":
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
# setup logger
log_dir = os.path.join(args.output_dir, "logs")
os.makedirs(log_dir, exist_ok=True)
log_file = os.path.join(log_dir, f"ctc_segmentation_{args.window_len}.log")
if os.path.exists(log_file):
os.remove(log_file)
level = "DEBUG" if args.debug else "INFO"
logger = logging.getLogger("CTC")
file_handler = logging.FileHandler(filename=log_file)
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
logging.basicConfig(handlers=handlers, level=level)
if os.path.exists(args.model):
asr_model = nemo_asr.models.EncDecCTCModel.restore_from(args.model)
elif args.model in nemo_asr.models.EncDecCTCModel.get_available_model_names():
asr_model = nemo_asr.models.EncDecCTCModel.from_pretrained(args.model, strict=False)
else:
try:
asr_model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained(args.model)
except:
raise ValueError(
f"Provide path to the pretrained checkpoint or choose from {nemo_asr.models.EncDecCTCModel.get_available_model_names()}"
)
bpe_model = isinstance(asr_model, nemo_asr.models.EncDecCTCModelBPE)
# get tokenizer used during training, None for char based models
if bpe_model:
tokenizer = asr_model.tokenizer
else:
tokenizer = None
# extract ASR vocabulary and add blank symbol
vocabulary = ["ε"] + list(asr_model.cfg.decoder.vocabulary)
logging.debug(f"ASR Model vocabulary: {vocabulary}")
data = Path(args.data)
output_dir = Path(args.output_dir)
if os.path.isdir(data):
audio_paths = data.glob("*.wav")
data_dir = data
else:
audio_paths = [Path(data)]
data_dir = Path(os.path.dirname(data))
all_log_probs = []
all_transcript_file = []
all_segment_file = []
all_wav_paths = []
segments_dir = os.path.join(args.output_dir, "segments")
os.makedirs(segments_dir, exist_ok=True)
index_duration = None
for path_audio in audio_paths:
logging.info(f"Processing {path_audio.name}...")
transcript_file = os.path.join(data_dir, path_audio.name.replace(".wav", ".txt"))
segment_file = os.path.join(
segments_dir, f"{args.window_len}_" + path_audio.name.replace(".wav", "_segments.txt")
)
if not os.path.exists(transcript_file):
logging.info(f"{transcript_file} not found. Skipping {path_audio.name}")
continue
try:
sample_rate, signal = wav.read(path_audio)
if len(signal) == 0:
logging.error(f"Skipping {path_audio.name}")
continue
assert (
sample_rate == args.sample_rate
), f"Sampling rate of the audio file {path_audio} doesn't match --sample_rate={args.sample_rate}"
original_duration = len(signal) / sample_rate
logging.debug(f"len(signal): {len(signal)}, sr: {sample_rate}")
logging.debug(f"Duration: {original_duration}s, file_name: {path_audio}")
log_probs = asr_model.transcribe(paths2audio_files=[str(path_audio)], batch_size=1, logprobs=True)[0]
# move blank values to the first column (ctc-package compatibility)
blank_col = log_probs[:, -1].reshape((log_probs.shape[0], 1))
log_probs = np.concatenate((blank_col, log_probs[:, :-1]), axis=1)
all_log_probs.append(log_probs)
all_segment_file.append(str(segment_file))
all_transcript_file.append(str(transcript_file))
all_wav_paths.append(path_audio)
if index_duration is None:
index_duration = len(signal) / log_probs.shape[0] / sample_rate
except Exception as e:
logging.error(e)
logging.error(f"Skipping {path_audio.name}")
continue
asr_model_type = type(asr_model)
del asr_model
torch.cuda.empty_cache()
if len(all_log_probs) > 0:
start_time = time.time()
normalized_lines = Parallel(n_jobs=args.num_jobs)(
delayed(get_segments)(
all_log_probs[i],
all_wav_paths[i],
all_transcript_file[i],
all_segment_file[i],
vocabulary,
tokenizer,
bpe_model,
index_duration,
args.window_len,
log_file=log_file,
debug=args.debug,
)
for i in tqdm(range(len(all_log_probs)))
)
total_time = time.time() - start_time
logger.info(f"Total execution time: ~{round(total_time/60)}min")
logger.info(f"Saving logs to {log_file}")
if os.path.exists(log_file):
with open(log_file, "r") as f:
lines = f.readlines()
|
NeMo-main
|
tools/ctc_segmentation/scripts/run_ctc_segmentation.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import glob
import os
import shutil
import time
from html import unescape
from uuid import uuid4
import model_api
import torch
import werkzeug
from flask import Flask, make_response, render_template, request, url_for
from flask_cors import CORS
from werkzeug.utils import secure_filename
from nemo.utils import logging
app = Flask(__name__)
CORS(app)
# Upload folder for audio files; models are stored in permanent cache
# which gets deleted once the container shuts down
app.config[f'UPLOAD_FOLDER'] = f"tmp/"
@app.route('/initialize_model', methods=['POST'])
def initialize_model():
"""
API Endpoint to instantiate a model
Loads ASR model by its pretrained checkpoint name or upload ASR model that is provided by the user,
then load that checkpoint into the cache.
Loading of the model into cache is done once per worker. Number of workers should be limited
so as not to exhaust the GPU memory available on device (if GPU is being used).
"""
logging.info("Starting ASR service")
if torch.cuda.is_available():
logging.info("CUDA is available. Running on GPU")
else:
logging.info("CUDA is not available. Defaulting to CPUs")
# get form fields
model_name = request.form['model_names_select']
use_gpu_if_available = request.form.get('use_gpu_ckbx', "off")
# get nemo model from user (if not none)
nemo_model_file = request.files.get('nemo_model', '')
# if nemo model is not None, upload it to model cache
if nemo_model_file != '':
model_name = _store_model(nemo_model_file)
# Alert user that model has been uploaded into the model cache,
# and they should refresh the page to access the model
result = render_template(
'toast_msg.html', toast_message=f"Model {model_name} has been uploaded. " f"Refresh page !", timeout=5000
)
else:
# Alert user that model has been loaded onto a workers memory
result = render_template(
'toast_msg.html', toast_message=f"Model {model_name} has been initialized !", timeout=2000
)
# Load model into memory cache
model_api.initialize_model(model_name=model_name)
# reset file banner
reset_nemo_model_file_script = """
<script>
document.getElementById('nemo_model_file').value = ""
</script>
"""
result = result + reset_nemo_model_file_script
result = make_response(result)
# set cookies
result.set_cookie("model_name", model_name)
result.set_cookie("use_gpu", use_gpu_if_available)
return result
def _store_model(nemo_model_file):
"""
Preserve the model supplied by user into permanent cache
This cache needs to be manually deleted (if run locally), or gets deleted automatically
(when the container gets shutdown / killed).
Args:
nemo_model_file: User path to .nemo checkpoint.
Returns:
A file name (with a .nemo) at the end - to signify this is an uploaded checkpoint.
"""
filename = secure_filename(nemo_model_file.filename)
file_basename = os.path.basename(filename)
model_dir = os.path.splitext(file_basename)[0]
model_store = os.path.join('models', model_dir)
if not os.path.exists(model_store):
os.makedirs(model_store)
# upload model
model_path = os.path.join(model_store, filename)
nemo_model_file.save(model_path)
return file_basename
@app.route('/upload_audio_files', methods=['POST'])
def upload_audio_files():
"""
API Endpoint to upload audio files for inference.
The uploaded files must be wav files, 16 KHz sample rate, mono-channel audio samples.
"""
# Try to get one or more files from form
try:
f = request.files.getlist('file')
except werkzeug.exceptions.BadRequestKeyError:
f = None
# If user did not select any file to upload, notify them.
if f is None or len(f) == 0:
toast = render_template('toast_msg.html', toast_message="No file has been selected to upload !", timeout=2000)
result = render_template('updates/upload_files_failed.html', pre_exec=toast, url=url_for('upload_audio_files'))
result = unescape(result)
return result
# temporary id to store data
uuid = str(uuid4())
data_store = os.path.join(app.config[f'UPLOAD_FOLDER'], uuid)
# If the user attempt to upload another set of files without first transcribing them,
# delete the old cache of files and create a new cache entirely
_remove_older_files_if_exists()
# Save each file into this unique cache
for fn in f:
filename = secure_filename(fn.filename)
if not os.path.exists(data_store):
os.makedirs(data_store)
fn.save(os.path.join(data_store, filename))
logging.info(f"Saving file : {fn.filename}")
# Update user that N files were uploaded.
msg = f"{len(f)} file(s) uploaded. Click to upload more !"
toast = render_template('toast_msg.html', toast_message=f"{len(f)} file(s) uploaded !", timeout=2000)
result = render_template(
'updates/upload_files_successful.html', pre_exec=toast, msg=msg, url=url_for('upload_audio_files')
)
result = unescape(result)
result = make_response(result)
result.set_cookie("uuid", uuid)
return result
def _remove_older_files_if_exists():
"""
Helper method to prevent cache leakage when user attempts to upload another set of files
without first transcribing the files already uploaded.
"""
# remove old data store (if exists)
old_uuid = secure_filename(request.cookies.get('uuid', ''))
if old_uuid is not None and old_uuid != '':
# delete old data store
old_data_store = os.path.join(app.config[f'UPLOAD_FOLDER'], old_uuid)
logging.info("Tried uploading more data without using old uploaded data. Purging data cache.")
shutil.rmtree(old_data_store, ignore_errors=True)
@app.route('/remove_audio_files', methods=['POST'])
def remove_audio_files():
"""
API Endpoint for removing audio files
# Note: Sometimes data may persist due to set of circumstances:
- User uploads audio then closes app without transcribing anything
In such a case, the files will be deleted when gunicorn shutsdown, or container is stopped.
However the data may not be automatically deleted if the flast server is used as is.
"""
# Get the unique cache id from cookie
uuid = secure_filename(request.cookies.get("uuid", ""))
data_store = os.path.join(app.config[f'UPLOAD_FOLDER'], uuid)
# If the data does not exist (cache is empty), notify user
if not os.path.exists(data_store) or uuid == "":
files_dont_exist = render_template(
'toast_msg.html', toast_message="No files have been uploaded !", timeout=2000
)
result = render_template(
'updates/remove_files.html', pre_exec=files_dont_exist, url=url_for('remove_audio_files')
)
result = unescape(result)
return result
else:
# delete data that exists in cache
shutil.rmtree(data_store, ignore_errors=True)
logging.info("Removed all data")
# Notify user that cache was deleted.
toast = render_template('toast_msg.html', toast_message="All files removed !", timeout=2000)
result = render_template('updates/remove_files.html', pre_exec=toast, url=url_for('remove_audio_files'))
result = unescape(result)
result = make_response(result)
result.set_cookie("uuid", '', expires=0)
return result
@app.route('/transcribe', methods=['POST'])
def transcribe():
"""
API Endpoint to transcribe a set of audio files.
The files are sorted according to their name, so order may not be same as upload order.
Utilizing the cached info inside the cookies, a model with selected name will be loaded into memory,
and maybe onto a GPU (if it is supported on the device).
Then the transcription api will be called from the model_api. If all is successful, a template is updated
with results. If some issue occurs (memory ran out, file is invalid format), notify the user.
"""
# load model name from cookie
model_name = request.cookies.get('model_name')
logging.info(f"Model name : {model_name}")
# If model name is not selected via Load Model, notify user.
if model_name is None or model_name == '':
result = render_template('toast_msg.html', toast_message="Model has not been initialized !", timeout=2000)
return result
# load whether gpu should be used
use_gpu_if_available = request.cookies.get('use_gpu') == 'on'
gpu_used = torch.cuda.is_available() and use_gpu_if_available
# Load audio from paths
uuid = secure_filename(request.cookies.get("uuid", ""))
data_store = os.path.join(app.config[f'UPLOAD_FOLDER'], uuid)
files = list(glob.glob(os.path.join(data_store, "*.wav")))
# If no files found in cache, notify user
if len(files) == 0:
result = render_template('toast_msg.html', toast_message="No audio files were found !", timeout=2000)
return result
# transcribe file via model api
t1 = time.time()
transcriptions = model_api.transcribe_all(files, model_name, use_gpu_if_available=use_gpu_if_available)
t2 = time.time()
# delete all transcribed files immediately
for fp in files:
try:
os.remove(fp)
except FileNotFoundError:
logging.info(f"Failed to delete transcribed file : {os.path.basename(fp)}")
# delete temporary transcription directory
shutil.rmtree(data_store, ignore_errors=True)
# If something happened during transcription, and it failed, notify user.
if type(transcriptions) == str and transcriptions == model_api.TAG_ERROR_DURING_TRANSCRIPTION:
toast = render_template(
'toast_msg.html',
toast_message=f"Failed to transcribe files due to unknown reason. "
f"Please provide 16 KHz Monochannel wav files onle.",
timeout=5000,
)
transcriptions = ["" for _ in range(len(files))]
else:
# Transcriptions obtained successfully, notify user.
toast = render_template(
'toast_msg.html',
toast_message=f"Transcribed {len(files)} files using {model_name} (gpu={gpu_used}), "
f"in {(t2 - t1): 0.2f} s",
timeout=5000,
)
# Write results to data table
results = []
for filename, transcript in zip(files, transcriptions):
results.append(dict(filename=os.path.basename(filename), transcription=transcript))
result = render_template('transcripts.html', transcripts=results)
result = toast + result
result = unescape(result)
result = make_response(result)
result.set_cookie("uuid", "", expires=0)
return result
def remove_tmp_dir_at_exit():
"""
Helper method to attempt a deletion of audio file cache on flask api exit.
Gunicorn and Docker container (based on gunicorn) will delete any remaining files on
shutdown of the gunicorn server or the docker container.
This is a patch that might not always work for Flask server, but in general should ensure
that local audio file cache is deleted.
This does *not* impact the model cache. Flask and Gunicorn servers will *never* delete uploaded models.
Docker container will delete models *only* when the container is killed (since models are uploaded to
local storage path inside container).
"""
try:
uuid = secure_filename(request.cookies.get("uuid", ""))
if uuid is not None or uuid != "":
cache_dir = os.path.join(os.path.join(app.config[f'UPLOAD_FOLDER'], uuid))
logging.info(f"Removing cache file for worker : {os.getpid()}")
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
logging.info(f"Deleted tmp folder : {cache_dir}")
except RuntimeError:
# Working outside of request context (probably shutdown)
# simply delete entire tmp folder
shutil.rmtree(app.config[f'UPLOAD_FOLDER'], ignore_errors=True)
@app.route('/')
def main():
"""
API Endpoint for ASR Service.
"""
nemo_model_names, local_model_names = model_api.get_model_names()
model_names = []
model_names.extend(local_model_names) # prioritize local models
model_names.extend(nemo_model_names) # attach all other pretrained models
# page initializations
result = render_template('main.html', model_names=model_names)
result = make_response(result)
# Reset cookies
result.set_cookie("model_name", '', expires=0) # model name from pretrained model list
result.set_cookie("use_gpu", '', expires=0) # flag to use gpu (if available)
result.set_cookie("uuid", '', expires=0) # session id
return result
# Register hook to delete file cache (for flask server only)
atexit.register(remove_tmp_dir_at_exit)
if __name__ == '__main__':
app.run(False)
|
NeMo-main
|
tools/asr_webapp/asr_service.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import glob
import os
import torch
import nemo.collections.asr as nemo_asr
from nemo.utils import logging, model_utils
# setup AMP (optional)
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
MODEL_CACHE = {}
# Special tags for fallbacks / user notifications
TAG_ERROR_DURING_TRANSCRIPTION = "<ERROR_DURING_TRANSCRIPTION>"
def get_model_names():
# Populate local copy of models
local_model_paths = glob.glob(os.path.join('models', "**", "*.nemo"), recursive=True)
local_model_names = list(sorted([os.path.basename(path) for path in local_model_paths]))
# Populate with pretrained nemo checkpoint list
nemo_model_names = set()
for model_info in nemo_asr.models.ASRModel.list_available_models():
for superclass in model_info.class_.mro():
if 'CTC' in superclass.__name__ or 'RNNT' in superclass.__name__:
if 'align' in model_info.pretrained_model_name:
continue
nemo_model_names.add(model_info.pretrained_model_name)
nemo_model_names = list(sorted(nemo_model_names))
return nemo_model_names, local_model_names
def initialize_model(model_name):
# load model
if model_name not in MODEL_CACHE:
if '.nemo' in model_name:
# use local model
model_name_no_ext = os.path.splitext(model_name)[0]
model_path = os.path.join('models', model_name_no_ext, model_name)
# Extract config
model_cfg = nemo_asr.models.ASRModel.restore_from(restore_path=model_path, return_config=True)
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath) # type: ASRModel
logging.info(f"Restoring local model : {imported_class.__name__}")
# load model from checkpoint
model = imported_class.restore_from(restore_path=model_path, map_location='cpu') # type: ASRModel
else:
# use pretrained model
model = nemo_asr.models.ASRModel.from_pretrained(model_name, map_location='cpu')
model.freeze()
# cache model
MODEL_CACHE[model_name] = model
model = MODEL_CACHE[model_name]
return model
def transcribe_all(filepaths, model_name, use_gpu_if_available=True):
# instantiate model
if model_name in MODEL_CACHE:
model = MODEL_CACHE[model_name]
else:
model = initialize_model(model_name)
if torch.cuda.is_available() and use_gpu_if_available:
model = model.cuda()
# transcribe audio
logging.info("Begin transcribing audio...")
try:
with autocast():
with torch.no_grad():
transcriptions = model.transcribe(filepaths, batch_size=32)
except RuntimeError:
# Purge the cache to clear some memory
MODEL_CACHE.clear()
logging.info("Ran out of memory on device - performing inference on CPU for now")
try:
model = model.cpu()
with torch.no_grad():
transcriptions = model.transcribe(filepaths, batch_size=32)
except Exception as e:
logging.info(f"Exception {e} occured while attemting to transcribe audio. Returning error message")
return TAG_ERROR_DURING_TRANSCRIPTION
logging.info(f"Finished transcribing {len(filepaths)} files !")
# If RNNT models transcribe, they return a tuple (greedy, beam_scores)
if type(transcriptions[0]) == list and len(transcriptions) == 2:
# get greedy transcriptions only
transcriptions = transcriptions[0]
# Force onto CPU
model = model.cpu()
MODEL_CACHE[model_name] = model
return transcriptions
|
NeMo-main
|
tools/asr_webapp/model_api.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://docs.gunicorn.org/en/stable/settings.html
# NOTE: Do not import nemo / torch code here
# Gunicorn creates forked processes - and CUDA cannot be used in forked multiprocessing environment.
import shutil
# General config
bind = "0.0.0.0:8000"
workers = 2
# Worker specific config
worker_connections = 1000
timeout = 180 # 3 minutes of timeout
def on_exit(server):
# delete tmp dir
print("Shutting down server ...")
print("Deleteing tmp directory ...")
shutil.rmtree('tmp/', ignore_errors=True)
print("Tmp directory deleted !")
|
NeMo-main
|
tools/asr_webapp/gunicorn.conf.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asr_service import app
if __name__ == '__main__':
app.run()
|
NeMo-main
|
tools/asr_webapp/wsgi.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import base64
import csv
import datetime
import difflib
import io
import json
import logging
import math
import operator
import os
import pickle
from collections import defaultdict
from os.path import expanduser
from pathlib import Path
import dash
import dash_bootstrap_components as dbc
import diff_match_patch
import editdistance
import jiwer
import librosa
import numpy as np
import pandas as pd
import soundfile as sf
import tqdm
from dash import dash_table, dcc, html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from plotly import express as px
from plotly import graph_objects as go
from plotly.subplots import make_subplots
# number of items in a table per page
DATA_PAGE_SIZE = 10
# operators for filtering items
filter_operators = {
'>=': 'ge',
'<=': 'le',
'<': 'lt',
'>': 'gt',
'!=': 'ne',
'=': 'eq',
'contains ': 'contains',
}
comparison_mode = False
# parse table filter queries
def split_filter_part(filter_part):
for op in filter_operators:
if op in filter_part:
name_part, value_part = filter_part.split(op, 1)
name = name_part[name_part.find('{') + 1 : name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if v0 == value_part[-1] and v0 in ("'", '"', '`'):
value = value_part[1:-1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
return name, filter_operators[op], value
return [None] * 3
# standard command-line arguments parser
def parse_args():
parser = argparse.ArgumentParser(description='Speech Data Explorer')
parser.add_argument(
'manifest', help='path to JSON manifest file',
)
parser.add_argument('--vocab', help='optional vocabulary to highlight OOV words')
parser.add_argument('--port', default='8050', help='serving port for establishing connection')
parser.add_argument(
'--disable-caching-metrics', action='store_true', help='disable caching metrics for errors analysis'
)
parser.add_argument(
'--estimate-audio-metrics',
'-a',
action='store_true',
help='estimate frequency bandwidth and signal level of audio recordings',
)
parser.add_argument(
'--audio-base-path',
default=None,
type=str,
help='A base path for the relative paths in manifest. It defaults to manifest path.',
)
parser.add_argument('--debug', '-d', action='store_true', help='enable debug mode')
parser.add_argument(
'--names_compared',
'-nc',
nargs=2,
type=str,
help='names of the two fields that will be compared, example: pred_text_contextnet pred_text_conformer. "pred_text_" prefix IS IMPORTANT!',
)
parser.add_argument(
'--show_statistics',
'-shst',
type=str,
help='field name for which you want to see statistics (optional). Example: pred_text_contextnet.',
)
args = parser.parse_args()
# assume audio_filepath is relative to the directory where the manifest is stored
if args.audio_base_path is None:
args.audio_base_path = os.path.dirname(args.manifest)
# automaticly going in comparison mode, if there is names_compared argument
if args.names_compared is not None:
comparison_mode = True
logging.error("comparison mod set to true")
else:
comparison_mode = False
print(args, comparison_mode)
return args, comparison_mode
# estimate frequency bandwidth of signal
def eval_bandwidth(signal, sr, threshold=-50):
time_stride = 0.01
hop_length = int(sr * time_stride)
n_fft = 512
spectrogram = np.mean(
np.abs(librosa.stft(y=signal, n_fft=n_fft, hop_length=hop_length, window='blackmanharris')) ** 2, axis=1
)
power_spectrum = librosa.power_to_db(S=spectrogram, ref=np.max, top_db=100)
freqband = 0
for idx in range(len(power_spectrum) - 1, -1, -1):
if power_spectrum[idx] > threshold:
freqband = idx / n_fft * sr
break
return freqband
# load data from JSON manifest file
def load_data(
data_filename,
disable_caching=False,
estimate_audio=False,
vocab=None,
audio_base_path=None,
comparison_mode=False,
names=None,
):
if comparison_mode:
if names is None:
logging.error(f'Please, specify names of compared models')
name_1, name_2 = names
if not comparison_mode:
if vocab is not None:
# load external vocab
vocabulary_ext = {}
with open(vocab, 'r') as f:
for line in f:
if '\t' in line:
# parse word from TSV file
word = line.split('\t')[0]
else:
# assume each line contains just a single word
word = line.strip()
vocabulary_ext[word] = 1
if not disable_caching:
pickle_filename = data_filename.split('.json')[0]
json_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(data_filename))
timestamp = json_mtime.strftime('%Y%m%d_%H%M')
pickle_filename += '_' + timestamp + '.pkl'
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as f:
data, wer, cer, wmr, mwa, num_hours, vocabulary_data, alphabet, metrics_available = pickle.load(f)
if vocab is not None:
for item in vocabulary_data:
item['OOV'] = item['word'] not in vocabulary_ext
if estimate_audio:
for item in data:
filepath = absolute_audio_filepath(item['audio_filepath'], audio_base_path)
signal, sr = librosa.load(path=filepath, sr=None)
bw = eval_bandwidth(signal, sr)
item['freq_bandwidth'] = int(bw)
item['level_db'] = 20 * np.log10(np.max(np.abs(signal)))
with open(pickle_filename, 'wb') as f:
pickle.dump(
[data, wer, cer, wmr, mwa, num_hours, vocabulary_data, alphabet, metrics_available],
f,
pickle.HIGHEST_PROTOCOL,
)
return data, wer, cer, wmr, mwa, num_hours, vocabulary_data, alphabet, metrics_available
data = []
wer_count = 0
cer_count = 0
wmr_count = 0
wer = 0
cer = 0
wmr = 0
mwa = 0
num_hours = 0
match_vocab_1 = defaultdict(lambda: 0)
match_vocab_2 = defaultdict(lambda: 0)
def append_data(
data_filename, estimate_audio, field_name='pred_text',
):
data = []
wer_dist = 0.0
wer_count = 0
cer_dist = 0.0
cer_count = 0
wmr_count = 0
wer = 0
cer = 0
wmr = 0
mwa = 0
num_hours = 0
vocabulary = defaultdict(lambda: 0)
alphabet = set()
match_vocab = defaultdict(lambda: 0)
sm = difflib.SequenceMatcher()
metrics_available = False
with open(data_filename, 'r', encoding='utf8') as f:
for line in tqdm.tqdm(f):
item = json.loads(line)
if not isinstance(item['text'], str):
item['text'] = ''
num_chars = len(item['text'])
orig = item['text'].split()
num_words = len(orig)
for word in orig:
vocabulary[word] += 1
for char in item['text']:
alphabet.add(char)
num_hours += item['duration']
if field_name in item:
metrics_available = True
pred = item[field_name].split()
measures = jiwer.compute_measures(item['text'], item[field_name])
word_dist = measures['substitutions'] + measures['insertions'] + measures['deletions']
char_dist = editdistance.eval(item['text'], item[field_name])
wer_dist += word_dist
cer_dist += char_dist
wer_count += num_words
cer_count += num_chars
sm.set_seqs(orig, pred)
for m in sm.get_matching_blocks():
for word_idx in range(m[0], m[0] + m[2]):
match_vocab[orig[word_idx]] += 1
wmr_count += measures['hits']
else:
if comparison_mode:
if field_name != 'pred_text':
if field_name == name_1:
logging.error(f"The .json file has no field with name: {name_1}")
exit()
if field_name == name_2:
logging.error(f"The .json file has no field with name: {name_2}")
exit()
data.append(
{
'audio_filepath': item['audio_filepath'],
'duration': round(item['duration'], 2),
'num_words': num_words,
'num_chars': num_chars,
'word_rate': round(num_words / item['duration'], 2),
'char_rate': round(num_chars / item['duration'], 2),
'text': item['text'],
}
)
if metrics_available:
data[-1][field_name] = item[field_name]
if num_words == 0:
num_words = 1e-9
if num_chars == 0:
num_chars = 1e-9
data[-1]['WER'] = round(word_dist / num_words * 100.0, 2)
data[-1]['CER'] = round(char_dist / num_chars * 100.0, 2)
data[-1]['WMR'] = round(measures['hits'] / num_words * 100.0, 2)
data[-1]['I'] = measures['insertions']
data[-1]['D'] = measures['deletions']
data[-1]['D-I'] = measures['deletions'] - measures['insertions']
if estimate_audio:
filepath = absolute_audio_filepath(item['audio_filepath'], data_filename)
signal, sr = librosa.load(path=filepath, sr=None)
bw = eval_bandwidth(signal, sr)
item['freq_bandwidth'] = int(bw)
item['level_db'] = 20 * np.log10(np.max(np.abs(signal)))
for k in item:
if k not in data[-1]:
data[-1][k] = item[k]
vocabulary_data = [{'word': word, 'count': vocabulary[word]} for word in vocabulary]
return (
vocabulary_data,
metrics_available,
data,
wer_dist,
wer_count,
cer_dist,
cer_count,
wmr_count,
wer,
cer,
wmr,
mwa,
num_hours,
vocabulary,
alphabet,
match_vocab,
)
(
vocabulary_data,
metrics_available,
data,
wer_dist,
wer_count,
cer_dist,
cer_count,
wmr_count,
wer,
cer,
wmr,
mwa,
num_hours,
vocabulary,
alphabet,
match_vocab,
) = append_data(data_filename, estimate_audio, field_name=fld_nm)
if comparison_mode:
(
vocabulary_data_1,
metrics_available_1,
data_1,
wer_dist_1,
wer_count_1,
cer_dist_1,
cer_count_1,
wmr_count_1,
wer_1,
cer_1,
wmr_1,
mwa_1,
num_hours_1,
vocabulary_1,
alphabet_1,
match_vocab_1,
) = append_data(data_filename, estimate_audio, field_name=name_1)
(
vocabulary_data_2,
metrics_available_2,
data_2,
wer_dist_2,
wer_count_2,
cer_dist_2,
cer_count_2,
wmr_count_2,
wer_2,
cer_2,
wmr_2,
mwa_2,
num_hours_2,
vocabulary_2,
alphabet_2,
match_vocab_2,
) = append_data(data_filename, estimate_audio, field_name=name_2)
if not comparison_mode:
if vocab is not None:
for item in vocabulary_data:
item['OOV'] = item['word'] not in vocabulary_ext
if metrics_available or comparison_mode:
if metrics_available:
wer = wer_dist / wer_count * 100.0
cer = cer_dist / cer_count * 100.0
wmr = wmr_count / wer_count * 100.0
if comparison_mode:
if metrics_available_1 and metrics_available_2:
wer_1 = wer_dist_1 / wer_count_1 * 100.0
cer_1 = cer_dist_1 / cer_count_1 * 100.0
wmr_1 = wmr_count_1 / wer_count_1 * 100.0
wer = wer_dist_2 / wer_count_2 * 100.0
cer = cer_dist_2 / cer_count_2 * 100.0
wmr = wmr_count_2 / wer_count_2 * 100.0
acc_sum_1 = 0
acc_sum_2 = 0
for item in vocabulary_data_1:
w = item['word']
word_accuracy_1 = match_vocab_1[w] / vocabulary_1[w] * 100.0
acc_sum_1 += word_accuracy_1
item['accuracy_1'] = round(word_accuracy_1, 1)
mwa_1 = acc_sum_1 / len(vocabulary_data_1)
for item in vocabulary_data_2:
w = item['word']
word_accuracy_2 = match_vocab_2[w] / vocabulary_2[w] * 100.0
acc_sum_2 += word_accuracy_2
item['accuracy_2'] = round(word_accuracy_2, 1)
mwa_2 = acc_sum_2 / len(vocabulary_data_2)
acc_sum = 0
for item in vocabulary_data:
w = item['word']
word_accuracy = match_vocab[w] / vocabulary[w] * 100.0
acc_sum += word_accuracy
item['accuracy'] = round(word_accuracy, 1)
mwa = acc_sum / len(vocabulary_data)
num_hours /= 3600.0
if not comparison_mode:
if not disable_caching:
with open(pickle_filename, 'wb') as f:
pickle.dump(
[data, wer, cer, wmr, mwa, num_hours, vocabulary_data, alphabet, metrics_available],
f,
pickle.HIGHEST_PROTOCOL,
)
if comparison_mode:
return (
data,
wer,
cer,
wmr,
mwa,
num_hours,
vocabulary_data,
alphabet,
metrics_available,
data_1,
wer_1,
cer_1,
wmr_1,
mwa_1,
num_hours_1,
vocabulary_data_1,
alphabet_1,
metrics_available_1,
data_2,
wer_2,
cer_2,
wmr_2,
mwa_2,
num_hours_2,
vocabulary_data_2,
alphabet_2,
metrics_available_2,
)
return data, wer, cer, wmr, mwa, num_hours, vocabulary_data, alphabet, metrics_available
# plot histogram of specified field in data list
def plot_histogram(data, key, label):
fig = px.histogram(
data_frame=[item[key] for item in data],
nbins=50,
log_y=True,
labels={'value': label},
opacity=0.5,
color_discrete_sequence=['green'],
height=200,
)
fig.update_layout(showlegend=False, margin=dict(l=0, r=0, t=0, b=0, pad=0))
return fig
def plot_word_accuracy(vocabulary_data):
labels = ['Unrecognized', 'Sometimes recognized', 'Always recognized']
counts = [0, 0, 0]
for word in vocabulary_data:
if word['accuracy'] == 0:
counts[0] += 1
elif word['accuracy'] < 100:
counts[1] += 1
else:
counts[2] += 1
colors = ['red', 'orange', 'green']
fig = go.Figure(
data=[
go.Bar(
x=labels,
y=counts,
marker_color=colors,
text=['{:.2%}'.format(count / sum(counts)) for count in counts],
textposition='auto',
)
]
)
fig.update_layout(
showlegend=False, margin=dict(l=0, r=0, t=0, b=0, pad=0), height=200, yaxis={'title_text': '#words'}
)
return fig
def absolute_audio_filepath(audio_filepath, audio_base_path):
"""Return absolute path to an audio file.
Check if a file existst at audio_filepath.
If not, assume that the path is relative to audio_base_path.
"""
audio_filepath = Path(audio_filepath)
if not audio_filepath.is_file() and not audio_filepath.is_absolute():
audio_filepath = audio_base_path / audio_filepath
if audio_filepath.is_file():
filename = str(audio_filepath)
else:
filename = expanduser(audio_filepath)
else:
filename = expanduser(audio_filepath)
return filename
# parse the CLI arguments
args, comparison_mode = parse_args()
if args.show_statistics is not None:
fld_nm = args.show_statistics
else:
fld_nm = 'pred_text'
# parse names of compared models, if any
if comparison_mode:
name_1, name_2 = args.names_compared
print(name_1, name_2)
print('Loading data...')
if not comparison_mode:
data, wer, cer, wmr, mwa, num_hours, vocabulary, alphabet, metrics_available = load_data(
args.manifest,
args.disable_caching_metrics,
args.estimate_audio_metrics,
args.vocab,
args.audio_base_path,
comparison_mode,
args.names_compared,
)
else:
(
data,
wer,
cer,
wmr,
mwa,
num_hours,
vocabulary,
alphabet,
metrics_available,
data_1,
wer_1,
cer_1,
wmr_1,
mwa_1,
num_hours_1,
vocabulary_1,
alphabet_1,
metrics_available_1,
data_2,
wer_2,
cer_2,
wmr_2,
mwa_2,
num_hours_2,
vocabulary_2,
alphabet_2,
metrics_available_2,
) = load_data(
args.manifest,
args.disable_caching_metrics,
args.estimate_audio_metrics,
args.vocab,
args.audio_base_path,
comparison_mode,
args.names_compared,
)
print('Starting server...')
app = dash.Dash(
__name__,
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.BOOTSTRAP],
title=os.path.basename(args.manifest),
)
figures_labels = {
'duration': ['Duration', 'Duration, sec'],
'num_words': ['Number of Words', '#words'],
'num_chars': ['Number of Characters', '#chars'],
'word_rate': ['Word Rate', '#words/sec'],
'char_rate': ['Character Rate', '#chars/sec'],
'WER': ['Word Error Rate', 'WER, %'],
'CER': ['Character Error Rate', 'CER, %'],
'WMR': ['Word Match Rate', 'WMR, %'],
'I': ['# Insertions (I)', '#words'],
'D': ['# Deletions (D)', '#words'],
'D-I': ['# Deletions - # Insertions (D-I)', '#words'],
'freq_bandwidth': ['Frequency Bandwidth', 'Bandwidth, Hz'],
'level_db': ['Peak Level', 'Level, dB'],
}
figures_hist = {}
for k in data[0]:
val = data[0][k]
if isinstance(val, (int, float)) and not isinstance(val, bool):
if k in figures_labels:
ylabel = figures_labels[k][0]
xlabel = figures_labels[k][1]
else:
title = k.replace('_', ' ')
title = title[0].upper() + title[1:].lower()
ylabel = title
xlabel = title
figures_hist[k] = [ylabel + ' (per utterance)', plot_histogram(data, k, xlabel)]
if metrics_available:
figure_word_acc = plot_word_accuracy(vocabulary)
stats_layout = [
dbc.Row(dbc.Col(html.H5(children='Global Statistics'), class_name='text-secondary'), class_name='mt-3'),
dbc.Row(
[
dbc.Col(html.Div('Number of hours', className='text-secondary'), width=3, class_name='border-end'),
dbc.Col(html.Div('Number of utterances', className='text-secondary'), width=3, class_name='border-end'),
dbc.Col(html.Div('Vocabulary size', className='text-secondary'), width=3, class_name='border-end'),
dbc.Col(html.Div('Alphabet size', className='text-secondary'), width=3),
],
class_name='bg-light mt-2 rounded-top border-top border-start border-end',
),
dbc.Row(
[
dbc.Col(
html.H5(
'{:.2f} hours'.format(num_hours),
className='text-center p-1',
style={'color': 'green', 'opacity': 0.7},
),
width=3,
class_name='border-end',
),
dbc.Col(
html.H5(len(data), className='text-center p-1', style={'color': 'green', 'opacity': 0.7}),
width=3,
class_name='border-end',
),
dbc.Col(
html.H5(
'{} words'.format(len(vocabulary)),
className='text-center p-1',
style={'color': 'green', 'opacity': 0.7},
),
width=3,
class_name='border-end',
),
dbc.Col(
html.H5(
'{} chars'.format(len(alphabet)),
className='text-center p-1',
style={'color': 'green', 'opacity': 0.7},
),
width=3,
),
],
class_name='bg-light rounded-bottom border-bottom border-start border-end',
),
]
if metrics_available:
stats_layout += [
dbc.Row(
[
dbc.Col(
html.Div('Word Error Rate (WER), %', className='text-secondary'), width=3, class_name='border-end'
),
dbc.Col(
html.Div('Character Error Rate (CER), %', className='text-secondary'),
width=3,
class_name='border-end',
),
dbc.Col(
html.Div('Word Match Rate (WMR), %', className='text-secondary'), width=3, class_name='border-end',
),
dbc.Col(html.Div('Mean Word Accuracy, %', className='text-secondary'), width=3),
],
class_name='bg-light mt-2 rounded-top border-top border-start border-end',
),
dbc.Row(
[
dbc.Col(
html.H5(
'{:.2f}'.format(wer), className='text-center p-1', style={'color': 'green', 'opacity': 0.7},
),
width=3,
class_name='border-end',
),
dbc.Col(
html.H5(
'{:.2f}'.format(cer), className='text-center p-1', style={'color': 'green', 'opacity': 0.7}
),
width=3,
class_name='border-end',
),
dbc.Col(
html.H5(
'{:.2f}'.format(wmr), className='text-center p-1', style={'color': 'green', 'opacity': 0.7},
),
width=3,
class_name='border-end',
),
dbc.Col(
html.H5(
'{:.2f}'.format(mwa), className='text-center p-1', style={'color': 'green', 'opacity': 0.7},
),
width=3,
),
],
class_name='bg-light rounded-bottom border-bottom border-start border-end',
),
]
stats_layout += [
dbc.Row(dbc.Col(html.H5(children='Alphabet'), class_name='text-secondary'), class_name='mt-3'),
dbc.Row(
dbc.Col(html.Div('{}'.format(sorted(alphabet))),), class_name='mt-2 bg-light font-monospace rounded border'
),
]
for k in figures_hist:
stats_layout += [
dbc.Row(dbc.Col(html.H5(figures_hist[k][0]), class_name='text-secondary'), class_name='mt-3'),
dbc.Row(dbc.Col(dcc.Graph(id='duration-graph', figure=figures_hist[k][1]),),),
]
if metrics_available:
stats_layout += [
dbc.Row(dbc.Col(html.H5('Word accuracy distribution'), class_name='text-secondary'), class_name='mt-3'),
dbc.Row(dbc.Col(dcc.Graph(id='word-acc-graph', figure=figure_word_acc),),),
]
wordstable_columns = [{'name': 'Word', 'id': 'word'}, {'name': 'Count', 'id': 'count'}]
if 'OOV' in vocabulary[0]:
wordstable_columns.append({'name': 'OOV', 'id': 'OOV'})
if metrics_available:
wordstable_columns.append({'name': 'Accuracy, %', 'id': 'accuracy'})
stats_layout += [
dbc.Row(dbc.Col(html.H5('Vocabulary'), class_name='text-secondary'), class_name='mt-3'),
dbc.Row(
dbc.Col(
dash_table.DataTable(
id='wordstable',
columns=wordstable_columns,
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='single',
page_action='custom',
page_current=0,
page_size=DATA_PAGE_SIZE,
cell_selectable=False,
page_count=math.ceil(len(vocabulary) / DATA_PAGE_SIZE),
sort_by=[{'column_id': 'word', 'direction': 'asc'}],
style_cell={'maxWidth': 0, 'textAlign': 'left'},
style_header={'color': 'text-primary'},
css=[{'selector': '.dash-filter--case', 'rule': 'display: none'},],
),
),
class_name='m-2',
),
dbc.Row(dbc.Col([html.Button('Download Vocabulary', id='btn_csv'), dcc.Download(id='download-vocab-csv'),]),),
]
@app.callback(
Output('download-vocab-csv', 'data'),
[Input('btn_csv', 'n_clicks'), State('wordstable', 'sort_by'), State('wordstable', 'filter_query')],
prevent_initial_call=True,
)
def download_vocabulary(n_clicks, sort_by, filter_query):
vocabulary_view = vocabulary
filtering_expressions = filter_query.split(' && ')
for filter_part in filtering_expressions:
col_name, op, filter_value = split_filter_part(filter_part)
if op in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
vocabulary_view = [x for x in vocabulary_view if getattr(operator, op)(x[col_name], filter_value)]
elif op == 'contains':
vocabulary_view = [x for x in vocabulary_view if filter_value in str(x[col_name])]
if len(sort_by):
col = sort_by[0]['column_id']
descending = sort_by[0]['direction'] == 'desc'
vocabulary_view = sorted(vocabulary_view, key=lambda x: x[col], reverse=descending)
with open('sde_vocab.csv', encoding='utf-8', mode='w', newline='') as fo:
writer = csv.writer(fo)
writer.writerow(vocabulary_view[0].keys())
for item in vocabulary_view:
writer.writerow([str(item[k]) for k in item])
return dcc.send_file("sde_vocab.csv")
@app.callback(
[Output('wordstable', 'data'), Output('wordstable', 'page_count')],
[Input('wordstable', 'page_current'), Input('wordstable', 'sort_by'), Input('wordstable', 'filter_query')],
)
def update_wordstable(page_current, sort_by, filter_query):
vocabulary_view = vocabulary
filtering_expressions = filter_query.split(' && ')
for filter_part in filtering_expressions:
col_name, op, filter_value = split_filter_part(filter_part)
if op in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
vocabulary_view = [x for x in vocabulary_view if getattr(operator, op)(x[col_name], filter_value)]
elif op == 'contains':
vocabulary_view = [x for x in vocabulary_view if filter_value in str(x[col_name])]
if len(sort_by):
col = sort_by[0]['column_id']
descending = sort_by[0]['direction'] == 'desc'
vocabulary_view = sorted(vocabulary_view, key=lambda x: x[col], reverse=descending)
if page_current * DATA_PAGE_SIZE >= len(vocabulary_view):
page_current = len(vocabulary_view) // DATA_PAGE_SIZE
return [
vocabulary_view[page_current * DATA_PAGE_SIZE : (page_current + 1) * DATA_PAGE_SIZE],
math.ceil(len(vocabulary_view) / DATA_PAGE_SIZE),
]
samples_layout = [
dbc.Row(dbc.Col(html.H5('Data'), class_name='text-secondary'), class_name='mt-3'),
html.Hr(),
dbc.Row(
dbc.Col(
dash_table.DataTable(
id='datatable',
columns=[{'name': k.replace('_', ' '), 'id': k, 'hideable': True} for k in data[0]],
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='single',
sort_by=[],
row_selectable='single',
selected_rows=[0],
page_action='custom',
page_current=0,
page_size=DATA_PAGE_SIZE,
page_count=math.ceil(len(data) / DATA_PAGE_SIZE),
style_cell={'overflow': 'hidden', 'textOverflow': 'ellipsis', 'maxWidth': 0, 'textAlign': 'center'},
style_header={
'color': 'text-primary',
'text_align': 'center',
'height': 'auto',
'whiteSpace': 'normal',
},
css=[
{'selector': '.dash-spreadsheet-menu', 'rule': 'position:absolute; bottom: 8px'},
{'selector': '.dash-filter--case', 'rule': 'display: none'},
{'selector': '.column-header--hide', 'rule': 'display: none'},
],
),
)
),
] + [
dbc.Row(
[
dbc.Col(
html.Div(children=k.replace('_', ' ')),
width=2,
class_name='mt-1 bg-light font-monospace text-break small rounded border',
),
dbc.Col(html.Div(id='_' + k), class_name='mt-1 bg-light font-monospace text-break small rounded border'),
]
)
for k in data[0]
]
if metrics_available:
samples_layout += [
dbc.Row(
[
dbc.Col(
html.Div(children='text diff'),
width=2,
class_name='mt-1 bg-light font-monospace text-break small rounded border',
),
dbc.Col(
html.Iframe(
id='_diff',
sandbox='',
srcDoc='',
style={'border': 'none', 'width': '100%', 'height': '100%'},
className='bg-light font-monospace text-break small',
),
class_name='mt-1 bg-light font-monospace text-break small rounded border',
),
]
)
]
samples_layout += [
dbc.Row(dbc.Col(html.Audio(id='player', controls=True),), class_name='mt-3 '),
dbc.Row(dbc.Col(dcc.Graph(id='signal-graph')), class_name='mt-3'),
]
# updating vocabulary to show
wordstable_columns_tool = [{'name': 'Word', 'id': 'word'}, {'name': 'Count', 'id': 'count'}]
wordstable_columns_tool.append({'name': 'Accuracy_1, %', 'id': 'accuracy_1'})
wordstable_columns_tool.append({'name': 'Accuracy_2, %', 'id': 'accuracy_2'})
if comparison_mode:
model_name_1, model_name_2 = name_1, name_2
for i in range(len(vocabulary_1)):
vocabulary_1[i].update(vocabulary_2[i])
def _wer_(grnd, pred):
grnd_words = grnd.split()
pred_words = pred.split()
edit_distance = editdistance.eval(grnd_words, pred_words)
wer = edit_distance / len(grnd_words)
return wer
def metric(a, b, met=None):
cer = editdistance.distance(a, b) / len(a)
wer = _wer_(a, b)
return round(float(wer) * 100, 2), round(float(cer) * 100, 2)
def write_metrics(data, Ox, Oy):
da = pd.DataFrame.from_records(data)
gt = da['text']
tt_1 = da[Ox]
tt_2 = da[Oy]
wer_tt1_c, cer_tt1_c = [], []
wer_tt2_c, cer_tt2_c = [], []
for j in range(len(gt)):
wer_tt1, cer_tt1 = metric(gt[j], tt_1[j]) # first model
wer_tt2, cer_tt2 = metric(gt[j], tt_2[j]) # second model
wer_tt1_c.append(wer_tt1)
cer_tt1_c.append(cer_tt1)
wer_tt2_c.append(wer_tt2)
cer_tt2_c.append(cer_tt2)
da['wer_' + Ox] = pd.Series(wer_tt1_c, index=da.index)
da['wer_' + Oy] = pd.Series(wer_tt2_c, index=da.index)
da['cer_' + Ox] = pd.Series(cer_tt1_c, index=da.index)
da['cer_' + Oy] = pd.Series(cer_tt2_c, index=da.index)
return da.to_dict('records')
data_with_metrics = write_metrics(data, model_name_1, model_name_2)
if args.show_statistics is not None:
textdiffstyle = {'border': 'none', 'width': '100%', 'height': '100%'}
else:
textdiffstyle = {'border': 'none', 'width': '1%', 'height': '1%', 'display': 'none'}
def prepare_data(df, name1=model_name_1, name2=model_name_2):
res = pd.DataFrame()
tmp = df['word']
res.insert(0, 'word', tmp)
res.insert(1, 'count', [float(i) for i in df['count']])
res.insert(2, 'accuracy_model_' + name1, df['accuracy_1'])
res.insert(3, 'accuracy_model_' + name2, df['accuracy_2'])
res.insert(4, 'accuracy_diff ' + '(' + name1 + ' - ' + name2 + ')', df['accuracy_1'] - df['accuracy_2'])
res.insert(2, 'count^(-1)', 1 / df['count'])
return res
for_col_names = pd.DataFrame()
for_col_names.insert(0, 'word', ['a'])
for_col_names.insert(1, 'count', [0])
for_col_names.insert(2, 'accuracy_model_' + model_name_1, [0])
for_col_names.insert(3, 'accuracy_model_' + model_name_2, [0])
for_col_names.insert(4, 'accuracy_diff ' + '(' + model_name_1 + ' - ' + model_name_2 + ')', [0])
for_col_names.insert(5, 'count^(-1)', [0])
@app.callback(
Output('voc_graph', 'figure'),
[
Input('xaxis-column', 'value'),
Input('yaxis-column', 'value'),
Input('color-column', 'value'),
Input('size-column', 'value'),
Input("datatable-advanced-filtering", "derived_virtual_data"),
Input("dot_spacing", 'value'),
Input("radius", 'value'),
],
prevent_initial_call=False,
)
def draw_vocab(Ox, Oy, color, size, data, dot_spacing='no', rad=0.01):
import math
import random
import pandas as pd
df = pd.DataFrame.from_records(data)
res = prepare_data(df)
res_spacing = res.copy(deep=True)
if dot_spacing == 'yes':
rad = float(rad)
if Ox[0] == 'a' or 'c':
tmp = []
for i in range(len(res[Ox])):
tmp.append(
res[Ox][i]
+ rad
* random.randrange(1, 10)
* math.cos(random.randrange(1, len(res[Ox])) * 2 * math.pi / len(res[Ox]))
)
res_spacing[Ox] = tmp
if Ox[0] == 'a' or 'c':
tmp = []
for i in range(len(res[Oy])):
tmp.append(
res[Oy][i]
+ rad
* random.randrange(1, 10)
* math.sin(random.randrange(1, len(res[Oy])) * 2 * math.pi / len(res[Oy]))
)
res_spacing[Oy] = tmp
res = res_spacing
fig = px.scatter(
res,
x=Ox,
y=Oy,
color=color,
size=size,
hover_data={'word': True, Ox: True, Oy: True, 'count': True},
width=1300,
height=1000,
)
if (Ox == 'accuracy_model_' + model_name_1 and Oy == 'accuracy_model_' + model_name_2) or (
Oy == 'accuracy_model_' + model_name_1 and Ox == 'accuracy_model_' + model_name_2
):
fig.add_shape(
type="line", x0=0, y0=0, x1=100, y1=100, line=dict(color="MediumPurple", width=1, dash="dot",)
)
return fig
@app.callback(
Output('filter-query-input', 'style'),
Output('filter-query-output', 'style'),
Input('filter-query-read-write', 'value'),
)
def query_input_output(val):
input_style = {'width': '100%'}
output_style = {}
input_style.update(display='inline-block')
output_style.update(display='none')
return input_style, output_style
@app.callback(Output('datatable-advanced-filtering', 'filter_query'), Input('filter-query-input', 'value'))
def write_query(query):
if query is None:
return ''
return query
@app.callback(Output('filter-query-output', 'children'), Input('datatable-advanced-filtering', 'filter_query'))
def read_query(query):
if query is None:
return "No filter query"
return dcc.Markdown('`filter_query = "{}"`'.format(query))
############
@app.callback(
Output('filter-query-input-2', 'style'),
Output('filter-query-output-2', 'style'),
Input('filter-query-read-write', 'value'),
)
def query_input_output(val):
input_style = {'width': '100%'}
output_style = {}
input_style.update(display='inline-block')
output_style.update(display='none')
return input_style, output_style
@app.callback(Output('datatable-advanced-filtering-2', 'filter_query'), Input('filter-query-input-2', 'value'))
def write_query(query):
if query is None:
return ''
return query
@app.callback(Output('filter-query-output-2', 'children'), Input('datatable-advanced-filtering-2', 'filter_query'))
def read_query(query):
if query is None:
return "No filter query"
return dcc.Markdown('`filter_query = "{}"`'.format(query))
############
def display_query(query):
if query is None:
return ''
return html.Details(
[
html.Summary('Derived filter query structure'),
html.Div(
dcc.Markdown(
'''```json
{}
```'''.format(
json.dumps(query, indent=4)
)
)
),
]
)
comparison_layout = [
html.Div(
[
dcc.Markdown("model 1:" + ' ' + model_name_1[10:]),
dcc.Markdown("model 2:" + ' ' + model_name_2[10:]),
dcc.Dropdown(
['word level', 'utterance level'],
'word level',
placeholder="choose comparison lvl",
id='lvl_choose',
),
]
),
html.Hr(),
html.Div(
[
html.Div(
[
dcc.Dropdown(for_col_names.columns[::], 'accuracy_model_' + model_name_1, id='xaxis-column'),
dcc.Dropdown(for_col_names.columns[::], 'accuracy_model_' + model_name_2, id='yaxis-column'),
dcc.Dropdown(
for_col_names.select_dtypes(include='number').columns[::],
placeholder='Select what will encode color of points',
id='color-column',
),
dcc.Dropdown(
for_col_names.select_dtypes(include='number').columns[::],
placeholder='Select what will encode size of points',
id='size-column',
),
dcc.Dropdown(
['yes', 'no'],
placeholder='if you want to enable dot spacing',
id='dot_spacing',
style={'width': '200%'},
),
dcc.Input(id='radius', placeholder='Enter radius of spacing (std is 0.01)'),
html.Hr(),
dcc.Input(id='filter-query-input', placeholder='Enter filter query',),
],
style={'width': '200%', 'display': 'inline-block', 'float': 'middle'},
),
html.Hr(),
html.Div(id='filter-query-output'),
dash_table.DataTable(
id='datatable-advanced-filtering',
columns=wordstable_columns_tool,
data=vocabulary_1,
editable=False,
page_action='native',
page_size=5,
filter_action="native",
),
html.Hr(),
html.Div(id='datatable-query-structure', style={'whitespace': 'pre'}),
html.Hr(),
dbc.Row(dbc.Col(dcc.Graph(id='voc_graph'),),),
html.Hr(),
],
id='wrd_lvl',
style={'display': 'block'},
),
html.Div(
[
html.Div(
[
dcc.Dropdown(['WER', 'CER'], 'WER', placeholder="Choose metric", id="choose_metric"),
dbc.Row(dbc.Col(html.H5('Data'), class_name='text-secondary'), class_name='mt-3'),
html.Hr(),
html.Hr(),
dcc.Input(
id='filter-query-input-2', placeholder='Enter filter query', style={'width': '100%'}
),
html.Div(id='filter-query-output-2'),
dbc.Row(
dbc.Col(
[
dash_table.DataTable(
id='datatable-advanced-filtering-2',
columns=[
{'name': k.replace('_', ' '), 'id': k, 'hideable': True}
for k in data_with_metrics[0]
],
data=data_with_metrics,
editable=False,
page_action='native',
page_size=5,
row_selectable='single',
selected_rows=[0],
page_current=0,
filter_action="native",
style_cell={
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 0,
'textAlign': 'center',
},
style_header={
'color': 'text-primary',
'text_align': 'center',
'height': 'auto',
'whiteSpace': 'normal',
},
css=[
{
'selector': '.dash-spreadsheet-menu',
'rule': 'position:absolute; bottom: 8px',
},
{'selector': '.dash-filter--case', 'rule': 'display: none'},
{'selector': '.column-header--hide', 'rule': 'display: none'},
],
),
dbc.Row(dbc.Col(html.Audio(id='player-1', controls=True),), class_name='mt-3'),
]
)
),
]
+ [
dbc.Row(
[
dbc.Col(
html.Div(children=k.replace('_', '-')),
width=2,
class_name='mt-1 bg-light font-monospace text-break small rounded border',
),
dbc.Col(
html.Div(id='__' + k),
class_name='mt-1 bg-light font-monospace text-break small rounded border',
),
]
)
for k in data_with_metrics[0]
]
),
],
id='unt_lvl',
),
] + [
html.Div(
[
html.Div(
[
dbc.Row(dbc.Col(dcc.Graph(id='utt_graph'),),),
html.Hr(),
dcc.Input(id='clicked_aidopath', style={'width': '100%'}),
html.Hr(),
dcc.Input(id='my-output-1', style={'display': 'none'}), # we do need this
]
),
html.Div([dbc.Row(dbc.Col(dcc.Graph(id='signal-graph-1')), class_name='mt-3'),]),
],
id='down_thing',
style={'display': 'block'},
)
]
if args.show_statistics is not None:
comparison_layout += [
html.Div(
[
dbc.Row(
[
dbc.Col(
html.Div(children='text diff'),
width=2,
class_name='mt-1 bg-light font-monospace text-break small rounded border',
),
dbc.Col(
html.Iframe(
id='__diff',
sandbox='',
srcDoc='',
style=textdiffstyle,
className='bg-light font-monospace text-break small',
),
class_name='mt-1 bg-light font-monospace text-break small rounded border',
),
],
id="text_diff_div",
)
],
id='mid_thing',
style={'display': 'block'},
),
]
@app.callback(
[
Output(component_id='wrd_lvl', component_property='style'),
Output(component_id='unt_lvl', component_property='style'),
Output(component_id='mid_thing', component_property='style'),
Output(component_id='down_thing', component_property='style'),
Input(component_id='lvl_choose', component_property='value'),
]
)
def show_hide_element(visibility_state):
if visibility_state == 'word level':
return (
{'width': '50%', 'display': 'inline-block', 'float': 'middle'},
{'width': '50%', 'display': 'none', 'float': 'middle'},
{'display': 'none'},
{'display': 'none'},
)
else:
return (
{'width': '100%', 'display': 'none', 'float': 'middle'},
{'width': '100%', 'display': 'inline-block', 'float': 'middle'},
{'display': 'block'},
{'display': 'block'},
)
if args.show_statistics is None:
@app.callback(
[
Output(component_id='wrd_lvl', component_property='style'),
Output(component_id='unt_lvl', component_property='style'),
Output(component_id='down_thing', component_property='style'),
Input(component_id='lvl_choose', component_property='value'),
]
)
def show_hide_element(visibility_state):
if args.show_statistics is not None:
a = {'border': 'none', 'width': '100%', 'height': '100%', 'display': 'block'}
else:
a = {'border': 'none', 'width': '100%', 'height': '100%', 'display': 'none'}
if visibility_state == 'word level':
return (
{'width': '50%', 'display': 'inline-block', 'float': 'middle'},
{'width': '50%', 'display': 'none', 'float': 'middle'},
{'display': 'none'},
)
else:
return (
{'width': '100%', 'display': 'none', 'float': 'middle'},
{'width': '100%', 'display': 'inline-block', 'float': 'middle'},
{'display': 'block'},
)
store = []
@app.callback(
[Output('datatable-advanced-filtering-2', 'page_current'), Output('my-output-1', 'value')],
[Input('utt_graph', 'clickData'),],
)
def real_select_click(hoverData):
if hoverData is not None:
path = str(hoverData['points'][0]['customdata'][-1])
for t in range(len(data_with_metrics)):
if data_with_metrics[t]['audio_filepath'] == path:
ind = t
s = t #% 5
sel = s
pg = math.ceil(ind // 5)
return pg, sel
else:
return 0, 0
@app.callback(
[Output('datatable-advanced-filtering-2', 'selected_rows')], [Input('my-output-1', 'value')],
)
def real_select_click(num):
s = num
return [[s]]
CALCULATED_METRIC = [False, False]
@app.callback(
[
Output('utt_graph', 'figure'),
Output('clicked_aidopath', 'value'),
Input('choose_metric', 'value'),
Input('utt_graph', 'clickData'),
Input('datatable-advanced-filtering-2', 'derived_virtual_data'),
],
)
def draw_table_with_metrics(met, hoverData, data_virt):
Ox = name_1
Oy = name_2
if met == "WER":
cerower = 'wer_'
else:
cerower = 'cer_'
da = pd.DataFrame.from_records(data_virt)
c = da
fig = px.scatter(
c,
x=cerower + Ox,
y=cerower + Oy,
width=1000,
height=900,
color='num_words',
hover_data={
'text': True,
Ox: True,
Oy: True,
'wer_' + Ox: True,
'wer_' + Oy: True,
'cer_' + Ox: True,
'cer_' + Oy: True,
'audio_filepath': True,
},
) #'numwords': True,
fig.add_shape(type="line", x0=0, y0=0, x1=100, y1=100, line=dict(color="Red", width=1, dash="dot",))
fig.update_layout(clickmode='event+select')
fig.update_traces(marker_size=10)
path = None
if hoverData is not None:
path = str(hoverData['points'][0]['customdata'][-1])
return fig, path
@app.callback(
[Output('datatable', 'data'), Output('datatable', 'page_count')],
[Input('datatable', 'page_current'), Input('datatable', 'sort_by'), Input('datatable', 'filter_query')],
)
def update_datatable(page_current, sort_by, filter_query):
data_view = data
filtering_expressions = filter_query.split(' && ')
for filter_part in filtering_expressions:
col_name, op, filter_value = split_filter_part(filter_part)
if op in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
data_view = [x for x in data_view if getattr(operator, op)(x[col_name], filter_value)]
elif op == 'contains':
data_view = [x for x in data_view if filter_value in str(x[col_name])]
if len(sort_by):
col = sort_by[0]['column_id']
descending = sort_by[0]['direction'] == 'desc'
data_view = sorted(data_view, key=lambda x: x[col], reverse=descending)
if page_current * DATA_PAGE_SIZE >= len(data_view):
page_current = len(data_view) // DATA_PAGE_SIZE
return [
data_view[page_current * DATA_PAGE_SIZE : (page_current + 1) * DATA_PAGE_SIZE],
math.ceil(len(data_view) / DATA_PAGE_SIZE),
]
if comparison_mode:
app.layout = html.Div(
[
dcc.Location(id='url', refresh=False),
dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink('Statistics', id='stats_link', href='/', active=True)),
dbc.NavItem(dbc.NavLink('Samples', id='samples_link', href='/samples')),
dbc.NavItem(dbc.NavLink('Comparison tool', id='comp_tool', href='/comparison')),
],
brand='Speech Data Explorer',
sticky='top',
color='green',
dark=True,
),
dbc.Container(id='page-content'),
]
)
else:
app.layout = html.Div(
[
dcc.Location(id='url', refresh=False),
dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink('Statistics', id='stats_link', href='/', active=True)),
dbc.NavItem(dbc.NavLink('Samples', id='samples_link', href='/samples')),
],
brand='Speech Data Explorer',
sticky='top',
color='green',
dark=True,
),
dbc.Container(id='page-content'),
]
)
if comparison_mode:
@app.callback(
[
Output('page-content', 'children'),
Output('stats_link', 'active'),
Output('samples_link', 'active'),
Output('comp_tool', 'active'),
],
[Input('url', 'pathname')],
)
def nav_click(url):
if url == '/samples':
return [samples_layout, False, True, False]
elif url == '/comparison':
return [comparison_layout, False, False, True]
else:
return [stats_layout, True, False, False]
else:
@app.callback(
[Output('page-content', 'children'), Output('stats_link', 'active'), Output('samples_link', 'active'),],
[Input('url', 'pathname')],
)
def nav_click(url):
if url == '/samples':
return [samples_layout, False, True]
else:
return [stats_layout, True, False]
@app.callback(
[Output('_' + k, 'children') for k in data[0]], [Input('datatable', 'selected_rows'), Input('datatable', 'data')]
)
def show_item(idx, data):
if len(idx) == 0:
raise PreventUpdate
return [data[idx[0]][k] for k in data[0]]
if comparison_mode:
@app.callback(
[Output('__' + k, 'children') for k in data_with_metrics[0]],
[Input('datatable-advanced-filtering-2', 'selected_rows'), Input('datatable-advanced-filtering-2', 'data')],
)
def show_item(idx, data):
if len(idx) == 0:
raise PreventUpdate
return [data[idx[0]][k] for k in data_with_metrics[0]]
@app.callback(Output('_diff', 'srcDoc'), [Input('datatable', 'selected_rows'), Input('datatable', 'data'),])
def show_diff(
idx, data,
):
if len(idx) == 0:
raise PreventUpdate
orig_words = data[idx[0]]['text']
orig_words = '\n'.join(orig_words.split()) + '\n'
pred_words = data[idx[0]][fld_nm]
pred_words = '\n'.join(pred_words.split()) + '\n'
diff = diff_match_patch.diff_match_patch()
diff.Diff_Timeout = 0
orig_enc, pred_enc, enc = diff.diff_linesToChars(orig_words, pred_words)
diffs = diff.diff_main(orig_enc, pred_enc, False)
diff.diff_charsToLines(diffs, enc)
diffs_post = []
for d in diffs:
diffs_post.append((d[0], d[1].replace('\n', ' ')))
diff_html = diff.diff_prettyHtml(diffs_post)
return diff_html
@app.callback(
Output('__diff', 'srcDoc'),
[Input('datatable-advanced-filtering-2', 'selected_rows'), Input('datatable-advanced-filtering-2', 'data'),],
)
def show_diff(
idx, data,
):
if len(idx) == 0:
raise PreventUpdate
orig_words = data[idx[0]]['text']
orig_words = '\n'.join(orig_words.split()) + '\n'
pred_words = data[idx[0]][fld_nm]
pred_words = '\n'.join(pred_words.split()) + '\n'
diff = diff_match_patch.diff_match_patch()
diff.Diff_Timeout = 0
orig_enc, pred_enc, enc = diff.diff_linesToChars(orig_words, pred_words)
diffs = diff.diff_main(orig_enc, pred_enc, False)
diff.diff_charsToLines(diffs, enc)
diffs_post = []
for d in diffs:
diffs_post.append((d[0], d[1].replace('\n', ' ')))
diff_html = diff.diff_prettyHtml(diffs_post)
return diff_html
@app.callback(Output('signal-graph', 'figure'), [Input('datatable', 'selected_rows'), Input('datatable', 'data')])
def plot_signal(idx, data):
if len(idx) == 0:
raise PreventUpdate
figs = make_subplots(rows=2, cols=1, subplot_titles=('Waveform', 'Spectrogram'))
try:
filename = absolute_audio_filepath(data[idx[0]]['audio_filepath'], args.audio_base_path)
audio, fs = librosa.load(path=filename, sr=None)
if 'offset' in data[idx[0]]:
audio = audio[
int(data[idx[0]]['offset'] * fs) : int((data[idx[0]]['offset'] + data[idx[0]]['duration']) * fs)
]
time_stride = 0.01
hop_length = int(fs * time_stride)
n_fft = 512
# linear scale spectrogram
s = librosa.stft(y=audio, n_fft=n_fft, hop_length=hop_length)
s_db = librosa.power_to_db(S=np.abs(s) ** 2, ref=np.max, top_db=100)
figs.add_trace(
go.Scatter(
x=np.arange(audio.shape[0]) / fs,
y=audio,
line={'color': 'green'},
name='Waveform',
hovertemplate='Time: %{x:.2f} s<br>Amplitude: %{y:.2f}<br><extra></extra>',
),
row=1,
col=1,
)
figs.add_trace(
go.Heatmap(
z=s_db,
colorscale=[[0, 'rgb(30,62,62)'], [0.5, 'rgb(30,128,128)'], [1, 'rgb(30,255,30)'],],
colorbar=dict(yanchor='middle', lenmode='fraction', y=0.2, len=0.5, ticksuffix=' dB'),
dx=time_stride,
dy=fs / n_fft / 1000,
name='Spectrogram',
hovertemplate='Time: %{x:.2f} s<br>Frequency: %{y:.2f} kHz<br>Magnitude: %{z:.2f} dB<extra></extra>',
),
row=2,
col=1,
)
figs.update_layout({'margin': dict(l=0, r=0, t=20, b=0, pad=0), 'height': 500})
figs.update_xaxes(title_text='Time, s', row=1, col=1)
figs.update_yaxes(title_text='Amplitude', row=1, col=1)
figs.update_xaxes(title_text='Time, s', row=2, col=1)
figs.update_yaxes(title_text='Frequency, kHz', row=2, col=1)
except Exception as ex:
app.logger.error(f'ERROR in plot signal: {ex}')
return figs
@app.callback(
Output('signal-graph-1', 'figure'),
[Input('datatable-advanced-filtering-2', 'selected_rows'), Input('datatable-advanced-filtering-2', 'data')],
)
def plot_signal(idx, data):
if len(idx) == 0:
raise PreventUpdate
figs = make_subplots(rows=2, cols=1, subplot_titles=('Waveform', 'Spectrogram'))
try:
filename = absolute_audio_filepath(data[idx[0]]['audio_filepath'], args.audio_base_path)
audio, fs = librosa.load(path=filename, sr=None)
if 'offset' in data[idx[0]]:
audio = audio[
int(data[idx[0]]['offset'] * fs) : int((data[idx[0]]['offset'] + data[idx[0]]['duration']) * fs)
]
time_stride = 0.01
hop_length = int(fs * time_stride)
n_fft = 512
# linear scale spectrogram
s = librosa.stft(y=audio, n_fft=n_fft, hop_length=hop_length)
s_db = librosa.power_to_db(S=np.abs(s) ** 2, ref=np.max, top_db=100)
figs.add_trace(
go.Scatter(
x=np.arange(audio.shape[0]) / fs,
y=audio,
line={'color': 'green'},
name='Waveform',
hovertemplate='Time: %{x:.2f} s<br>Amplitude: %{y:.2f}<br><extra></extra>',
),
row=1,
col=1,
)
figs.add_trace(
go.Heatmap(
z=s_db,
colorscale=[[0, 'rgb(30,62,62)'], [0.5, 'rgb(30,128,128)'], [1, 'rgb(30,255,30)'],],
colorbar=dict(yanchor='middle', lenmode='fraction', y=0.2, len=0.5, ticksuffix=' dB'),
dx=time_stride,
dy=fs / n_fft / 1000,
name='Spectrogram',
hovertemplate='Time: %{x:.2f} s<br>Frequency: %{y:.2f} kHz<br>Magnitude: %{z:.2f} dB<extra></extra>',
),
row=2,
col=1,
)
figs.update_layout({'margin': dict(l=0, r=0, t=20, b=0, pad=0), 'height': 500})
figs.update_xaxes(title_text='Time, s', row=1, col=1)
figs.update_yaxes(title_text='Amplitude', row=1, col=1)
figs.update_xaxes(title_text='Time, s', row=2, col=1)
figs.update_yaxes(title_text='Frequency, kHz', row=2, col=1)
except Exception as ex:
app.logger.error(f'ERROR in plot signal: {ex}')
return figs
@app.callback(Output('player', 'src'), [Input('datatable', 'selected_rows'), Input('datatable', 'data')])
def update_player(idx, data):
if len(idx) == 0:
raise PreventUpdate
try:
filename = absolute_audio_filepath(data[idx[0]]['audio_filepath'], args.audio_base_path)
signal, sr = librosa.load(path=filename, sr=None)
if 'offset' in data[idx[0]]:
signal = signal[
int(data[idx[0]]['offset'] * sr) : int((data[idx[0]]['offset'] + data[idx[0]]['duration']) * sr)
]
with io.BytesIO() as buf:
# convert to PCM .wav
sf.write(buf, signal, sr, format='WAV')
buf.seek(0)
encoded = base64.b64encode(buf.read())
return 'data:audio/wav;base64,{}'.format(encoded.decode())
except Exception as ex:
app.logger.error(f'ERROR in audio player: {ex}')
return ''
@app.callback(
Output('player-1', 'src'),
[Input('datatable-advanced-filtering-2', 'selected_rows'), Input('datatable-advanced-filtering-2', 'data')],
)
def update_player(idx, data):
if len(idx) == 0:
raise PreventUpdate
try:
filename = absolute_audio_filepath(data[idx[0]]['audio_filepath'], args.audio_base_path)
signal, sr = librosa.load(path=filename, sr=None)
if 'offset' in data[idx[0]]:
signal = signal[
int(data[idx[0]]['offset'] * sr) : int((data[idx[0]]['offset'] + data[idx[0]]['duration']) * sr)
]
with io.BytesIO() as buf:
# convert to PCM .wav
sf.write(buf, signal, sr, format='WAV')
buf.seek(0)
encoded = base64.b64encode(buf.read())
return 'data:audio/wav;base64,{}'.format(encoded.decode())
except Exception as ex:
app.logger.error(f'ERROR in audio player: {ex}')
return ''
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port=args.port, debug=args.debug)
|
NeMo-main
|
tools/speech_data_explorer/data_explorer.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
tools/customization_dataset_preparation/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NeMo LLM Customization service requires data to be in the form of .jsonl file with each line having only two fields (namely prompt and completion).
However, you might not have your data readily in this format (or even filetype).
This script will help you to convert from what you have to what you will need quickly and easily.
You will need your datafile (in the form of a .jsonl, .json, .csv, .tsv or .xlsx).
Each row should contain one sample.
Make sure that the directory your file is in is readable and writeable.
Otherwise, please change it using chmod. Don't worry, we will not overwrite your existing file.
With close to a dozen consideration factors that makes training optimal, there might just be something you overlook (we all do!).
To check if dataset has been prepared correctly
!python customization_dataset_preparation.py --filename <filename>
To format dataset from an alternative jsonl/json/csv/tsv/xlsx column structure (example here for Question Answering task)
For instances, if you are working on a Question Answering Task, you would typically have the columns `context`, `question` and `answer`
!python customization_dataset_preparation.py --filename <filename> --prompt_template "Context: {context} Question: {question} Answer:" --completion_template "{answer}"
Other flags that can be set
1. `--drop_duplicates` : Use this flag to drop rows that are exactly the same for both prompt and completion
2. `--split_train_validation` : Use this flag to split one file into separate train and validation files.
3. `--val_proportion 0.1`: Use a float (default 0.1) between 0 and 1 to control how much of the dataset to allocate to the validation set and the remaining for the train dataset.
4. `--short_context_model`: Use this flag to prepare data for use with models that have shorter context length of 2048 tokens (e.g. 5B and 20B models)
What to expect
After running this code, you see a list of suggestions to use under ACTIONABLE MESSAGES as well as some insights into your dataset under INFORMATIONAL MESSAGES.
We suggest you prioritize changes suggested under ACTIONABLE MESSAGES but also have a look at the INFORMATIONAL MESSAGES to ensure that changes are done in an expected manner.
"""
import argparse
import math
import os
import pathlib
from collections import Counter
import numpy as np
import pandas as pd
def load_file_into_df(filename):
message = None
if not os.path.isfile(filename):
raise ValueError(f"File {filename} does not exist")
if filename.lower().endswith(".jsonl"):
df = pd.read_json(filename, lines=True, dtype=str).fillna("")
elif filename.lower().endswith(".json"):
df = pd.read_json(filename, dtype=str).fillna("")
elif filename.lower().endswith(".xlsx"):
df = pd.read_excel(filename, dtype=str).fillna("")
message = "Note only the first sheet in your Excel file will be read."
elif filename.lower().endswith(".csv"):
df = pd.read_csv(filename, sep=",", dtype=str).fillna("")
elif filename.lower().endswith(".tsv"):
df = pd.read_csv(filename, sep="\t", dtype=str).fillna("")
else:
raise ValueError(
f"Filename {filename} does not have the acceptable extension of .jsonl, .json, .xlsx, .csv or .tsv"
)
return df, message
def recommend_hyperparameters_human_readable(recommended_hyperparameters):
message = 'TODO: Recommended hyperparameters\n'
for param, param_value in recommended_hyperparameters.items():
message += f'{param}: {param_value}\n'
return message
def recommend_hyperparameters(df, model=None):
"""
Makes recommendations on the batch_size to use for training, based on the dataset size
"""
potential_batch_sizes = [2, 4, 8, 12, 16, 32, 64, 128]
max_bs = 128
if len(df) < 128:
max_bs = 2
for potential_bs in potential_batch_sizes:
if potential_bs < len(df) * 0.9:
max_bs = potential_bs
bs = min(max_bs, 32)
df_char_length = df.apply(lambda x: len(x.prompt) + len(x.completion), axis=1)
length_by_chars = sorted(list(df_char_length))
n_samples_under_99p5_limit = math.ceil(len(df_char_length) * 0.995)
char_length_99p5 = length_by_chars[n_samples_under_99p5_limit - 1]
mean_char_length = np.mean(length_by_chars)
std_char_length = np.std(length_by_chars)
# filter out only outliers that are >2 std above mean
max_char_length = max(min(mean_char_length + 2 * std_char_length, length_by_chars[-1]), char_length_99p5)
# every token is around 4 chars + 100 for extra capacity
max_seq_length = max_char_length // 4 + 100
if len(df) <= 100:
encoder_hidden_size = 1024
elif len(df) <= 1000:
encoder_hidden_size = 2048
else:
encoder_hidden_size = 4096
if len(df) <= 100:
lr = 5e-3
elif len(df) <= 1000:
lr = 1e-3
elif len(df) <= 10000:
lr = 5e-4
else:
lr = 1e-4
return {
'batch_size': bs,
'max_batch_size': max_bs,
'num_virtual_tokens': 10,
'lr': lr,
'epochs': 10,
'max_seq_length': max_seq_length,
'encoder_hidden_size': encoder_hidden_size,
}
def estimating_customization_job_time(df, recommended_hyperparameters):
recommended_batch_size = recommended_hyperparameters['batch_size']
size = df.memory_usage(index=True, deep=True).sum()
time_in_seconds_per_epoch = size / recommended_batch_size * 0.0025
if time_in_seconds_per_epoch < 60:
time_per_epoch = f"{round(time_in_seconds_per_epoch, 2)} seconds"
elif time_in_seconds_per_epoch < 3600:
time_per_epoch = f"{round(time_in_seconds_per_epoch/60, 2)} minutes"
else:
time_per_epoch = f"{round(time_in_seconds_per_epoch/3600, 2)} hours"
message = f"TODO: Training will take around {time_per_epoch} for each epoch for gpt20b model and around half of that for gpt5b. Please set no. of epochs accordingly to ensure that the limit of 8h total is not exceeded."
return message
def warn_completion_is_not_empty(df):
message = None
field = "completion"
empty_rows = (df[field] == "") | (df[field].isnull())
empty_indexes = df.reset_index().index[empty_rows].tolist()
if len(empty_indexes) == len(df):
message = (
"TODO: Note all completion fields are empty. This is possibly expected for inference but not for training"
)
elif len(empty_indexes) != 0:
message = f"""TODO: completion contains {len(empty_indexes)} empty values at rows ({empty_indexes})
Please check the original file that the fields for prompt template are
not empty and rerun dataset validation"""
return message
def warn_imbalanced_completion(df):
completions = df["completion"].tolist()
completions_counter = Counter(completions)
message = None
# low variety of unique completions relative to completions
# suggesting it is a classification set up
if len(completions_counter) < len(completions) / 3:
message = f"There are {len(completions_counter)} unique completions over {len(completions)} samples.\nThe five most common completions are:"
for completion, n in completions_counter.most_common(5):
message += f"\n {n} samples ({round(100*n/len(completions),0)}%) with completion: {completion}"
return message
def get_common_suffix(series):
common_suffix = ""
while True:
candidate_common_suffixes = series.str[-(len(common_suffix) + 1) :]
if candidate_common_suffixes.nunique() != 1:
# candidate_common_suffixes contains more than one value
# therefore, it is no longer a common suffix
break
elif common_suffix == candidate_common_suffixes.values[0]:
# candidate is the same as previous common_suffix
# therefore values in series are too short to move back by one char
break
else:
common_suffix = candidate_common_suffixes.values[0]
return common_suffix
def warn_missing_suffix(df):
message = ''
for field in ["prompt", "completion"]:
if not get_common_suffix(df[field]):
message += f"TODO: {field} does not have common suffix, please add one (e.g. \\n) at the end of {field}_template\n"
return message if message else None
def validate_template(template):
template_with_only_brackets = [i for i in template if i in ["{", "}"]]
error_msg = (
"Your template ("
+ template
+ ") is not in the correct format.\
Template must be in the format contains zero or more fields, \
each field specified by {field}\
For instance, it can be 'Context: {context} Question: {question}:"
)
if len(template_with_only_brackets) % 2 != 0:
raise ValueError(error_msg)
for i in range(0, len(template_with_only_brackets), 2):
if not (template_with_only_brackets[i] == "{" and template_with_only_brackets[i + 1] == "}"):
raise ValueError(error_msg)
return None
def parse_template(template):
field_names = []
i = 0
in_field = False
while i < len(template):
if template[i] == "{":
field_names.append("")
in_field = True
elif template[i] == "}":
in_field = False
elif in_field:
field_names[-1] += template[i]
else:
pass
i += 1
return field_names
def warn_duplicated_rows(df):
message = None
duplicated_rows = df.duplicated()
duplicated_indices = df.reset_index().index[duplicated_rows].tolist()
if len(duplicated_indices) > 0:
message = f"TODO: There are {len(duplicated_indices)} duplicated rows "
message += f"at rows ({duplicated_indices}) \n"
message += "Please check the original file to make sure that is expected\n"
message += "If it is not, please add the argument --drop_duplicate"
return message
def drop_duplicated_rows(df):
duplicated_rows = df.duplicated()
duplicated_indices = df.reset_index().index[duplicated_rows].tolist()
message = None
if len(duplicated_indices) > 0:
df = df.drop_duplicates()
message = f"There are {len(duplicated_indices)} duplicated rows\n"
message += f"Removed {len(duplicated_indices)} duplicate rows"
return df, message
def template_mapper(row, field_names, template):
for field_name in field_names:
template = template.replace("{" + field_name + "}", row[field_name])
return template
def drop_unrequired_fields(df, required_fields=["prompt", "completion"]):
for column in df.columns:
if column not in required_fields:
df = df.drop(column, axis=1)
return df
def convert_into_template(df, template, prompt_or_completion="prompt"):
validate_template(template)
template = template.replace("\\n", "\n")
field_names = parse_template(template)
for field_name in field_names:
if field_name not in df.columns:
raise ValueError(
f"Field {field_name} requested in {prompt_or_completion}_template ({template}) but not found in file columns, which contains {list(df.columns)}"
)
df[prompt_or_completion] = df.apply(lambda row: template_mapper(row, field_names, template), axis=1)
return df
def convert_into_prompt_completion_only(df, prompt_template="{prompt}", completion_template="{completion}"):
df = convert_into_template(df, prompt_template, prompt_or_completion="prompt")
df = convert_into_template(df, completion_template, prompt_or_completion="completion")
df = drop_unrequired_fields(df)
return df
def warn_and_drop_long_samples(df, max_total_char_length):
long_examples = df.apply(lambda x: len(x.prompt) + len(x.completion) > max_total_char_length, axis=1)
indices_of_long_examples = df.reset_index().index[long_examples].tolist()
message = None
if len(indices_of_long_examples) > 0:
message = f"""TODO: There are {len(indices_of_long_examples)} / {len(df)}
samples that have its prompt and completion too long
(over {max_total_char_length} chars), which have been dropped."""
df = df.drop(indices_of_long_examples).reset_index()
df = df.drop('index', axis=1)
return df, message
def warn_low_n_samples(df, min_samples=64):
if len(df) < min_samples:
return f"""TODO: We would recommend having more samples (>{min_samples}) if possible but current_file only contains {len(df)} samples. """
return None
def show_first_example_in_df(df):
message = ''
for column in df.columns:
# prints \n instead of an a newline
column_value = df[column][0].replace('\n', '\\n')
message += f"-->Column {column}:\n{column_value}\n"
return message
def get_prepared_filename(filename, split_train_validation=False):
message = ""
file_extension = pathlib.Path(filename).suffix
if not split_train_validation:
new_filename = filename.replace(file_extension, "_prepared.jsonl")
retry = 0
while os.path.isfile(new_filename):
message += f"File {new_filename} exists. Trying next available filename increment\n"
retry += 1
new_filename = filename.replace(file_extension, f"_prepared{retry}.jsonl")
return new_filename, message if message else None
else:
train_filename = filename.replace(file_extension, "_prepared_train.jsonl")
val_filename = filename.replace(file_extension, "_prepared_val.jsonl")
retry = 0
while os.path.isfile(train_filename) or os.path.isfile(val_filename):
message += f"File {train_filename} or {val_filename} exists. Trying next available filename increment\n"
retry += 1
train_filename = filename.replace(file_extension, f"_prepared_train{retry}.jsonl")
val_filename = filename.replace(file_extension, f"_prepared_val{retry}.jsonl")
return [train_filename, val_filename], message if message else None
def split_into_train_validation(df, val_proportion=0.1):
n_val = int(val_proportion * len(df))
df_val = df.sample(n=n_val, random_state=42)
df_train = df.drop(df_val.index)
return df_train, df_val
def write_df_to_jsonl(df, filename):
df.to_json(filename, lines=True, orient="records", force_ascii=False)
return f"File {filename} written"
def print_select_messages(title, select_messages):
print("*" * 40)
print(title)
print("*" * 40)
for idx, message in enumerate(select_messages):
print(f"{idx+1}.")
print(message)
def print_all_messages(messages):
messages = [message for message in messages if message]
info_messages = [message for message in messages if not message.startswith("TODO")]
to_do_messages = [message for message in messages if message.startswith("TODO")]
print_select_messages("ACTIONABLE MESSAGES", to_do_messages)
print_select_messages("INFORMATIONAL MESSAGES", info_messages)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepares data for NeMoLLM Customization Service")
parser.add_argument("--filename", "-f", required=True)
parser.add_argument("--prompt_template", "-pt", default="{prompt}")
parser.add_argument("--completion_template", "-ct", default="{completion}")
parser.add_argument("--drop_duplicates", "-dd", action="store_true")
parser.add_argument("--split_train_validation", "-stv", action="store_true")
parser.add_argument(
"--short_context_model",
"-scm",
action="store_true",
help="Specifies if using models with shorter context length of 2048 tokens e.g. 5B and 20B models",
)
parser.add_argument(
"--val_proportion",
"-vp",
default=0.1,
type=float,
help="Give a number between 0 to 1, \
representing proportion of samples to go into the validation set\
only use when --split_train_validation is set",
)
args = parser.parse_args()
messages = []
messages.append(str(args))
if args.short_context_model:
MAX_TOKEN_LENGTH = 2048
else:
MAX_TOKEN_LENGTH = 4096
# every token is around 4 chars
MAX_TOTAL_CHAR_LENGTH = 4 * MAX_TOKEN_LENGTH
df, message = load_file_into_df(args.filename)
messages.append(message)
messages.append("-------Before converting into prompt and completion template------ \n")
messages[-1] += show_first_example_in_df(df)
df = convert_into_prompt_completion_only(
df, prompt_template=args.prompt_template, completion_template=args.completion_template
)
messages.append("-------After converting into prompt and completion template------ \n")
messages[-1] += show_first_example_in_df(df)
if args.drop_duplicates:
df, message = drop_duplicated_rows(df)
messages.append(message)
else:
messages.append(warn_duplicated_rows(df))
messages.append(warn_missing_suffix(df))
messages.append(warn_completion_is_not_empty(df))
messages.append(warn_imbalanced_completion(df))
messages.append(warn_low_n_samples(df))
df, message = warn_and_drop_long_samples(df, MAX_TOTAL_CHAR_LENGTH)
messages.append(message)
recommended_hyperparameters = recommend_hyperparameters(df)
recommend_hyperparameters_message = recommend_hyperparameters_human_readable(recommended_hyperparameters)
messages.append(recommend_hyperparameters_message)
messages.append(estimating_customization_job_time(df, recommended_hyperparameters))
prepared_filename, message = get_prepared_filename(
args.filename, split_train_validation=args.split_train_validation
)
messages.append(message)
if args.split_train_validation:
df_train, df_val = split_into_train_validation(df, val_proportion=args.val_proportion)
messages.append(write_df_to_jsonl(df_train, prepared_filename[0]))
messages.append(write_df_to_jsonl(df_val, prepared_filename[1]))
else:
messages.append(write_df_to_jsonl(df, prepared_filename))
print_all_messages(messages)
|
NeMo-main
|
tools/customization_dataset_preparation/customization_dataset_preparation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytest
from ..customization_dataset_preparation import (
convert_into_prompt_completion_only,
convert_into_template,
drop_duplicated_rows,
drop_unrequired_fields,
get_common_suffix,
get_prepared_filename,
parse_template,
recommend_hyperparameters,
show_first_example_in_df,
split_into_train_validation,
template_mapper,
validate_template,
warn_and_drop_long_samples,
warn_completion_is_not_empty,
warn_duplicated_rows,
warn_imbalanced_completion,
warn_low_n_samples,
warn_missing_suffix,
)
def test_recommend_hyperparameters():
df_100 = pd.DataFrame({'prompt': ['prompt'] * 100, 'completion': ['completion'] * 100})
assert recommend_hyperparameters(df_100) == {
'batch_size': 32,
'max_batch_size': 64,
'num_virtual_tokens': 10,
'encoder_hidden_size': 1024,
'lr': 0.005,
'epochs': 10,
'max_seq_length': 104,
}
df_1000 = pd.DataFrame({'prompt': ['prompt'] * 1000, 'completion': ['completion'] * 1000})
assert recommend_hyperparameters(df_1000) == {
'batch_size': 32,
'max_batch_size': 128,
'num_virtual_tokens': 10,
'encoder_hidden_size': 2048,
'lr': 0.001,
'epochs': 10,
'max_seq_length': 104,
}
df_10000 = pd.DataFrame({'prompt': ['prompt'] * 10000, 'completion': ['completion'] * 10000})
assert recommend_hyperparameters(df_10000) == {
'batch_size': 32,
'max_batch_size': 128,
'num_virtual_tokens': 10,
'encoder_hidden_size': 4096,
'lr': 0.0005,
'epochs': 10,
'max_seq_length': 104,
}
df_100000 = pd.DataFrame({'prompt': ['prompt'] * 100000, 'completion': ['completion'] * 100000})
assert recommend_hyperparameters(df_100000) == {
'batch_size': 32,
'max_batch_size': 128,
'num_virtual_tokens': 10,
'encoder_hidden_size': 4096,
'lr': 0.0001,
'epochs': 10,
'max_seq_length': 104,
}
def test_warn_completion_is_not_empty():
df_all_empty = pd.DataFrame({'prompt': ['prompt'] * 2, 'completion': [''] * 2})
msg_all_empty = (
"TODO: Note all completion fields are empty. This is possibly expected for inference but not for training"
)
assert warn_completion_is_not_empty(df_all_empty) == msg_all_empty
df_some_empty = pd.DataFrame({'prompt': ['prompt'] * 2, 'completion': ['', 'completion']})
msg_some_empty = f"""TODO: completion contains {1} empty values at rows ({[0]})
Please check the original file that the fields for prompt template are
not empty and rerun dataset validation"""
assert warn_completion_is_not_empty(df_some_empty) == msg_some_empty
df_no_empty = pd.DataFrame({'prompt': ['prompt'] * 2, 'completion': ['completion'] * 2})
assert warn_completion_is_not_empty(df_no_empty) is None
def test_warn_imbalanced_completion():
df_generation = pd.DataFrame(
{'prompt': [f'prompt{i}' for i in range(100)], 'completion': [f'completion{i}' for i in range(100)]}
)
assert warn_imbalanced_completion(df_generation) is None
df_classification_balanced = pd.DataFrame(
{'prompt': [f'prompt{i}' for i in range(100)], 'completion': [f'completion{i}' for i in range(5)] * 20}
)
msg_classification_balanced = (
f"There are {5} unique completions over {100} samples.\nThe five most common completions are:"
)
for i in range(5):
msg_classification_balanced += f"\n {20} samples ({20.0}%) with completion: completion{i}"
assert warn_imbalanced_completion(df_classification_balanced) == msg_classification_balanced
df_classification_imbalanced = pd.DataFrame(
{
'prompt': [f'prompt{i}' for i in range(100)],
'completion': ['completion0'] * 95 + [f'completion{i}' for i in range(5)],
}
)
msg_classification_imbalanced = (
f"There are {5} unique completions over {100} samples.\nThe five most common completions are:"
)
msg_classification_imbalanced += f"\n {96} samples ({96.0}%) with completion: completion0"
for i in range(1, 5):
msg_classification_imbalanced += f"\n {1} samples ({1.0}%) with completion: completion{i}"
assert warn_imbalanced_completion(df_classification_imbalanced) == msg_classification_imbalanced
def test_get_common_suffix():
df = pd.DataFrame(
{
'prompt': [f'prompt{i} answer:' for i in range(100)],
'completion': [f'completion{i}' for i in range(100)],
'empty_completion': [''] * 100,
'some_empty_completion': ['', 'completion'] * 50,
}
)
assert get_common_suffix(df.prompt) == " answer:"
assert get_common_suffix(df.completion) == ""
assert get_common_suffix(df.empty_completion) == ""
assert get_common_suffix(df.some_empty_completion) == ""
def test_warn_missing_suffix():
df_no_common = pd.DataFrame(
{'prompt': [f'prompt{i}' for i in range(100)], 'completion': [f'completion{i}' for i in range(100)],}
)
message = f"TODO: prompt does not have common suffix, please add one (e.g. \\n) at the end of prompt_template\n"
message += (
f"TODO: completion does not have common suffix, please add one (e.g. \\n) at the end of completion_template\n"
)
assert warn_missing_suffix(df_no_common) == message
df_common = pd.DataFrame(
{'prompt': [f'prompt{i} answer:' for i in range(100)], 'completion': [f'completion{i}\n' for i in range(100)],}
)
assert warn_missing_suffix(df_common) is None
def test_parse_template():
template_qa_prompt = "Context: {context}, Question: {question} Answer:"
template_qa_completion = "{answer}"
template_prompt = "{prompt}"
template_completion = "{completion}"
assert parse_template(template_qa_prompt) == ['context', 'question']
assert parse_template(template_qa_completion) == ['answer']
assert parse_template(template_prompt) == ['prompt']
assert parse_template(template_completion) == ['completion']
def test_validate_template():
template = "{prompt}"
template_missing_left = "prompt}"
template_missing_right = "{prompt"
template_twice = "{{prompt}}"
template_enclosed = "{prompt{enclosed}}"
assert validate_template(template) is None
with pytest.raises(ValueError):
validate_template(template_missing_left)
with pytest.raises(ValueError):
validate_template(template_missing_right)
with pytest.raises(ValueError):
validate_template(template_twice)
with pytest.raises(ValueError):
validate_template(template_enclosed)
def test_warn_duplicated_rows():
df_duplicated = pd.DataFrame({'prompt': ['prompt'] * 2, 'completion': ['completion'] * 2})
message_duplicated = f"TODO: There are {1} duplicated rows "
message_duplicated += f"at rows ([1]) \n"
message_duplicated += "Please check the original file to make sure that is expected\n"
message_duplicated += "If it is not, please add the argument --drop_duplicate"
assert warn_duplicated_rows(df_duplicated) == message_duplicated
df_unique = pd.DataFrame({'prompt': ['prompt', 'prompt1'], 'completion': ['completion', 'completion1']})
assert warn_duplicated_rows(df_unique) is None
df_only_prompt_duplicated = pd.DataFrame({'prompt': ['prompt'] * 2, 'completion': ['completion', 'completion1']})
assert warn_duplicated_rows(df_only_prompt_duplicated) is None
def test_drop_duplicated_rows():
df_deduplicated = pd.DataFrame({'prompt': ['prompt'], 'completion': ['completion']})
df_duplicated = pd.DataFrame({'prompt': ['prompt'] * 2, 'completion': ['completion'] * 2})
message_duplicated = "There are 1 duplicated rows\n"
message_duplicated += "Removed 1 duplicate rows"
assert drop_duplicated_rows(df_duplicated)[0].equals(df_deduplicated)
assert drop_duplicated_rows(df_duplicated)[1] == message_duplicated
df_unique = pd.DataFrame({'prompt': ['prompt', 'prompt1'], 'completion': ['completion', 'completion1']})
assert drop_duplicated_rows(df_unique) == (df_unique, None)
df_only_prompt_duplicated = pd.DataFrame({'prompt': ['prompt'] * 2, 'completion': ['completion', 'completion1']})
assert drop_duplicated_rows(df_only_prompt_duplicated) == (df_only_prompt_duplicated, None)
def test_template_mapper():
df = pd.DataFrame({'prompt': ['prompt sample'],})
template = "{prompt}"
field_names = ['prompt']
assert template_mapper(df.iloc[0], field_names, template) == 'prompt sample'
df_qa = pd.DataFrame({'question': ['question sample'], 'context': ['context sample']})
template_qa = "Context: {context} Question: {question} Answer:"
field_names_qa = ['context', 'question']
assert (
template_mapper(df_qa.iloc[0], field_names_qa, template_qa)
== "Context: context sample Question: question sample Answer:"
)
def test_drop_unrequired_fields():
df = pd.DataFrame(
{'question': ['question'], 'context': ['context'], 'prompt': ['prompt'], 'completion': ['completion']}
)
df_dropped_unnecessary_fields = pd.DataFrame({'prompt': ['prompt'], 'completion': ['completion']})
assert df_dropped_unnecessary_fields.equals(drop_unrequired_fields(df))
def test_convert_into_template():
df_non_existant_field_name = pd.DataFrame({'question': ['question']})
template = "Context: {context} Question: {question} Answer:"
with pytest.raises(ValueError):
convert_into_template(df_non_existant_field_name, template)
df = pd.DataFrame({'question': ['question sample'], 'context': ['context sample'],})
df_prompt = pd.DataFrame(
{
'question': ['question sample'],
'context': ['context sample'],
'prompt': ["Context: context sample Question: question sample Answer:"],
}
)
assert convert_into_template(df, template).equals(df_prompt)
def test_convert_into_prompt_completion_only():
df = pd.DataFrame({'question': ['question sample'], 'context': ['context sample'], 'answer': ['answer sample']})
df_prompt = pd.DataFrame(
{'prompt': ["Context: context sample Question: question sample Answer:"], 'completion': ["answer sample"]}
)
prompt_template = "Context: {context} Question: {question} Answer:"
completion_template = "{answer}"
assert df_prompt.equals(
convert_into_prompt_completion_only(
df, prompt_template=prompt_template, completion_template=completion_template
)
)
assert df_prompt.equals(convert_into_prompt_completion_only(df_prompt))
def get_indexes_of_long_examples(df, max_total_char_length):
long_examples = df.apply(lambda x: len(x.prompt) + len(x.completion) > max_total_char_length, axis=1)
return df.reset_index().index[long_examples].tolist()
def test_warn_and_drop_long_samples():
df = pd.DataFrame({'prompt': ['a' * 12000, 'a' * 9000, 'a'], 'completion': ['b' * 12000, 'b' * 2000, 'b']})
expected_df = pd.DataFrame({'prompt': ['a'], 'completion': ['b']})
message = f"""TODO: There are {2} / {3}
samples that have its prompt and completion too long
(over {10000} chars), which have been dropped."""
assert expected_df.equals(warn_and_drop_long_samples(df, 10000)[0])
assert warn_and_drop_long_samples(df, 10000)[1] == message
df_short = pd.DataFrame({'prompt': ['a'] * 2, 'completion': ['b'] * 2})
assert warn_and_drop_long_samples(df_short, 10000) == (df_short, None)
def test_warn_low_n_samples():
df_low = pd.DataFrame({'prompt': ['a'] * 10, 'completion': ['b'] * 10})
df_high = pd.DataFrame({'prompt': ['a'] * 100, 'completion': ['b'] * 100})
message = (
"TODO: We would recommend having more samples (>64) if possible but current_file only contains 10 samples. "
)
assert warn_low_n_samples(df_low) == message
assert warn_low_n_samples(df_high) is None
def test_show_first_example_in_df():
df = pd.DataFrame({'question': ['question sample'], 'context': ['context sample'], 'answer': ['answer sample']})
message = f"-->Column question:\nquestion sample\n"
message += f"-->Column context:\ncontext sample\n"
message += f"-->Column answer:\nanswer sample\n"
assert message == show_first_example_in_df(df)
def test_get_prepared_filename():
filename = "tmp/sample.jsonl"
prepared_filename = "tmp/sample_prepared.jsonl"
prepared_train_filename = "tmp/sample_prepared_train.jsonl"
prepared_val_filename = "tmp/sample_prepared_val.jsonl"
assert get_prepared_filename(filename) == (prepared_filename, None)
assert get_prepared_filename(filename, split_train_validation=True) == (
[prepared_train_filename, prepared_val_filename,],
None,
)
csv_filename = "tmp/sample.csv"
prepared_filename = "tmp/sample_prepared.jsonl"
assert get_prepared_filename(csv_filename) == (prepared_filename, None)
def test_split_into_train_validation():
df = pd.DataFrame({'prompt': ['a'] * 10, 'completion': ['b'] * 10})
df_train, df_val = split_into_train_validation(df, val_proportion=0.1)
assert len(df_train) == 9
assert len(df_val) == 1
df_train, df_val = split_into_train_validation(df, val_proportion=0.2)
assert len(df_train) == 8
assert len(df_val) == 2
|
NeMo-main
|
tools/customization_dataset_preparation/tests/test_customization_dataset_preparation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
tools/customization_dataset_preparation/tests/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.data.data_simulation import RIRMixGenerator
from nemo.core.config import hydra_runner
"""
This script creates a corpus of signals using RIRs, speech and noise.
Usage:
python rir_mix_generator.py --config-path PATH_TO_CONFIG_DIR --config-name CONFIG_NAME output_dir=OUTPUT_DIR
Details of the configuration can be found in the example configuration files in conf/*
"""
@hydra_runner(config_path="conf", config_name="rir_mix.yaml")
def main(cfg):
rir_mix = RIRMixGenerator(cfg=cfg)
rir_mix.generate()
if __name__ == "__main__":
main()
|
NeMo-main
|
tools/rir_corpus_generator/rir_mix_generator.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.data.data_simulation import RIRCorpusGenerator
from nemo.core.config import hydra_runner
"""
This script creates a corpus of room impulse responses.
Usage:
python rir_corpus_generator.py --config-path PATH_TO_CONFIG_DIR --config-name CONFIG_NAME output_dir=OUTPUT_DIR
Details of the configuration can be found in the example configuration files in conf/*
"""
@hydra_runner(config_path="conf", config_name="rir_corpus.yaml")
def main(cfg):
room_corpus = RIRCorpusGenerator(cfg=cfg)
room_corpus.generate()
if __name__ == "__main__":
main()
|
NeMo-main
|
tools/rir_corpus_generator/rir_corpus_generator.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import flask
import torch
from flask import Flask, json, request
from flask_cors import CORS
import nemo.collections.nlp as nemo_nlp
from nemo.utils import logging
MODELS_DICT = {}
model = None
api = Flask(__name__)
CORS(api)
def initialize(config_file_path: str):
"""
Loads 'language-pair to NMT model mapping'
"""
__MODELS_DICT = None
logging.info("Starting NMT service")
logging.info(f"I will attempt to load all the models listed in {config_file_path}.")
logging.info(f"Edit {config_file_path} to disable models you don't need.")
if torch.cuda.is_available():
logging.info("CUDA is available. Running on GPU")
else:
logging.info("CUDA is not available. Defaulting to CPUs")
# read config
with open(config_file_path) as f:
__MODELS_DICT = json.load(f)
if __MODELS_DICT is not None:
for key, value in __MODELS_DICT.items():
logging.info(f"Loading model for {key} from file: {value}")
if value.startswith("NGC/"):
model = nemo_nlp.models.machine_translation.MTEncDecModel.from_pretrained(model_name=value[4:])
else:
model = nemo_nlp.models.machine_translation.MTEncDecModel.restore_from(restore_path=value)
if torch.cuda.is_available():
model = model.cuda()
MODELS_DICT[key] = model
else:
raise ValueError("Did not find the config.json or it was empty")
logging.info("NMT service started")
@api.route('/translate', methods=['GET', 'POST', 'OPTIONS'])
def get_translation():
try:
time_s = time.time()
langpair = request.args["langpair"]
src = request.args["text"]
do_moses = request.args.get('do_moses', False)
if langpair in MODELS_DICT:
if do_moses:
result = MODELS_DICT[langpair].translate(
[src], source_lang=langpair.split('-')[0], target_lang=langpair.split('-')[1]
)
else:
result = MODELS_DICT[langpair].translate([src])
duration = time.time() - time_s
logging.info(
f"Translated in {duration}. Input was: {request.args['text']} <############> Translation was: {result[0]}"
)
res = {'translation': result[0]}
response = flask.jsonify(res)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
else:
logging.error(f"Got the following langpair: {langpair} which was not found")
except Exception as ex:
res = {'translation': str(ex)}
response = flask.jsonify(res)
response.headers.add('Access-Control-Allow-Origin', '*')
return res
if __name__ == '__main__':
initialize('config.json')
api.run(host='0.0.0.0')
|
NeMo-main
|
tools/nmt_webapp/nmt_service.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import git
from omegaconf import OmegaConf, open_dict
from utils import cal_target_metadata_wer, run_asr_inference
from nemo.collections.asr.parts.utils.eval_utils import cal_write_wer
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
This script serves as evaluator of ASR models
Usage:
python asr_evaluator.py \
engine.pretrained_name="stt_en_conformer_transducer_large" \
engine.inference.mode="offline" \
engine.test_ds.augmentor.noise.manifest_path=<manifest file for noise data> \
.....
Check out parameters in ./conf/eval.yaml
"""
@hydra_runner(config_path="conf", config_name="eval.yaml")
def main(cfg):
report = {}
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
# Store git hash for reproducibility
if cfg.env.save_git_hash:
repo = git.Repo(search_parent_directories=True)
report['git_hash'] = repo.head.object.hexsha
## Engine
# Could skip run_asr_inference and use the generated manifest by
# specifying analyst.metric_calculator.exist_pred_manifest
if cfg.analyst.metric_calculator.exist_pred_manifest is None:
# If need to change more parameters for ASR inference, change it in
# 1) shell script in utils.py
# 2) TranscriptionConfig on top of the executed scripts such as transcribe_speech.py in examples/asr
# Note we SKIP calculating wer during asr_inference stage with calculate_wer=False and calculate wer for each sample below
# for more flexibility and reducing possible redundant inference cost.
cfg.engine = run_asr_inference(cfg=cfg.engine)
else:
logging.info(
f"Use generated prediction manifest {cfg.analyst.metric_calculator.exist_pred_manifest} and skip enigneer"
)
with open_dict(cfg):
cfg.engine.output_filename = cfg.analyst.metric_calculator.exist_pred_manifest
## Analyst
output_manifest_w_wer, total_res, eval_metric = cal_write_wer(
pred_manifest=cfg.engine.output_filename,
clean_groundtruth_text=cfg.analyst.metric_calculator.clean_groundtruth_text,
langid=cfg.analyst.metric_calculator.langid,
use_cer=cfg.analyst.metric_calculator.use_cer,
output_filename=cfg.analyst.metric_calculator.output_filename,
)
with open_dict(cfg):
cfg.analyst.metric_calculator.output_filename = output_manifest_w_wer
report.update({"res": total_res})
for target in cfg.analyst.metadata:
if cfg.analyst.metadata[target].enable:
occ_avg_wer = cal_target_metadata_wer(
manifest=cfg.analyst.metric_calculator.output_filename,
target=target,
meta_cfg=cfg.analyst.metadata[target],
eval_metric=eval_metric,
)
report[target] = occ_avg_wer
config_engine = OmegaConf.to_object(cfg.engine)
report.update(config_engine)
config_metric_calculator = OmegaConf.to_object(cfg.analyst.metric_calculator)
report.update(config_metric_calculator)
pretty = json.dumps(report, indent=4)
res = "%.3f" % (report["res"][eval_metric] * 100)
logging.info(pretty)
logging.info(f"Overall {eval_metric} is {res} %")
## Writer
report_file = "report.json"
if "report_filename" in cfg.writer and cfg.writer.report_filename:
report_file = cfg.writer.report_filename
with open(report_file, "a") as fout:
json.dump(report, fout)
fout.write('\n')
fout.flush()
if __name__ == "__main__":
main()
|
NeMo-main
|
tools/asr_evaluator/asr_evaluator.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import subprocess
import tempfile
from pathlib import Path
from typing import Tuple
from omegaconf import DictConfig, OmegaConf, open_dict
from nemo.utils import logging
def run_asr_inference(cfg: DictConfig) -> DictConfig:
"""
Execute ASR inference based on input mode and parameters.
"""
if (cfg.model_path and cfg.pretrained_name) or (not cfg.model_path and not cfg.pretrained_name):
raise ValueError("Please specify either cfg.model_path or cfg.pretrained_name!")
if cfg.inference.decoder_type not in [None, 'ctc', 'rnnt']:
raise ValueError("decoder_type could only be null, ctc or rnnt")
if cfg.inference.mode == "offline":
cfg = run_offline_inference(cfg)
elif cfg.inference.mode == "chunked":
if (
"total_buffer_in_secs" not in cfg.inference
or "chunk_len_in_secs" not in cfg.inference
or not cfg.inference.total_buffer_in_secs
or not cfg.inference.chunk_len_in_secs
):
raise ValueError(f"Please specify both total_buffer_in_secs and chunk_len_in_secs for chunked inference")
cfg = run_chunked_inference(cfg)
elif cfg.inference.mode == "offline_by_chunked":
# When use Conformer to transcribe long audio sample, we could probably encounter CUDA out of memory issue.
# Here we use offline_by_chunked mode to simulate offline mode for Conformer.
# And we specify default total_buffer_in_secs=22 and chunk_len_in_secs=20 to avoid above problem.
OmegaConf.set_struct(cfg, True)
if 'total_buffer_in_secs' not in cfg.inference or not cfg.inference.total_buffer_in_secs:
with open_dict(cfg):
cfg.inference.total_buffer_in_secs = 22
logging.info(
f"Does not provide total_buffer_in_secs required by {cfg.inference.mode} mode. Using default value {cfg.inference.total_buffer_in_secs}"
)
if 'chunk_len_in_secs' not in cfg.inference or not cfg.inference.chunk_len_in_secs:
with open_dict(cfg):
cfg.inference.chunk_len_in_secs = 20
logging.info(
f"Does not provide total_buffer_in_secs required by {cfg.inference.mode} mode. Using default value {cfg.inference.chunk_len_in_secs}"
)
cfg = run_chunked_inference(cfg)
else:
raise ValueError(f"inference could only be offline or chunked, but got {cfg.inference.mode}")
return cfg
def run_chunked_inference(cfg: DictConfig) -> DictConfig:
if "output_filename" not in cfg or not cfg.output_filename:
if cfg.model_path:
model_name = Path(cfg.model_path).stem
else:
model_name = cfg.pretrained_name
dataset_name = Path(cfg.test_ds.manifest_filepath).stem
mode_name = (
cfg.inference.mode
+ "B"
+ str(cfg.inference.total_buffer_in_secs)
+ "C"
+ str(cfg.inference.chunk_len_in_secs)
)
OmegaConf.set_struct(cfg, True)
with open_dict(cfg):
cfg.output_filename = model_name + "-" + dataset_name + "-" + mode_name + ".json"
script_path = (
Path(__file__).parents[2]
/ "examples"
/ "asr"
/ "asr_chunked_inference"
/ "ctc"
/ "speech_to_text_buffered_infer_ctc.py"
)
use_rnnt_scrpit = False
# hybrid model
if (cfg.pretrained_name and 'hybrid' in cfg.pretrained_name.lower()) or (
cfg.model_path and 'hybrid' in cfg.model_path.lower()
):
if cfg.inference.decoder_type != 'ctc':
use_rnnt_scrpit = True
# rnnt model
elif (
(cfg.pretrained_name and 'rnnt' in cfg.pretrained_name.lower())
or (cfg.pretrained_name and 'transducer' in cfg.pretrained_name.lower())
or (cfg.model_path and 'rnnt' in cfg.model_path.lower())
or (cfg.model_path and 'transducer' in cfg.model_path.lower())
):
if cfg.inference.decoder_type and cfg.inference.decoder_type != 'rnnt':
raise ValueError(
f"rnnt models only support rnnt deocoding! Current decoder_type: {cfg.inference.decoder_type}! Change it to null or rnnt for rnnt models"
)
use_rnnt_scrpit = True
# ctc model
elif (cfg.pretrained_name and 'ctc' in cfg.pretrained_name.lower()) or (
cfg.model_path and 'ctc' in cfg.model_path.lower()
):
if cfg.inference.decoder_type and cfg.inference.decoder_type != 'ctc':
raise ValueError(
f"ctc models only support ctc deocoding! Current decoder_type: {cfg.inference.decoder_type}! Change it to null or ctc for ctc models"
)
else:
raise ValueError(
"Please make sure your pretrained_name or model_path contains \n\
'hybrid' for EncDecHybridRNNTCTCModel model, \n\
'transducer/rnnt' for EncDecRNNTModel model or \n\
'ctc' for EncDecCTCModel."
)
if use_rnnt_scrpit:
script_path = (
Path(__file__).parents[2]
/ "examples"
/ "asr"
/ "asr_chunked_inference"
/ "rnnt"
/ "speech_to_text_buffered_infer_rnnt.py"
)
# If need to change other config such as decoding strategy, could either:
# 1) change TranscriptionConfig on top of the executed scripts such as speech_to_text_buffered_infer_rnnt.py, or
# 2) add command as "decoding.strategy=greedy_batch " to below script
base_cmd = f"python {script_path} \
calculate_wer=False \
model_path={cfg.model_path} \
pretrained_name={cfg.pretrained_name} \
dataset_manifest={cfg.test_ds.manifest_filepath} \
output_filename={cfg.output_filename} \
random_seed={cfg.random_seed} \
batch_size={cfg.test_ds.batch_size} \
num_workers={cfg.test_ds.num_workers} \
chunk_len_in_secs={cfg.inference.chunk_len_in_secs} \
total_buffer_in_secs={cfg.inference.total_buffer_in_secs} \
model_stride={cfg.inference.model_stride} "
subprocess.run(
base_cmd, shell=True, check=True,
)
return cfg
def run_offline_inference(cfg: DictConfig) -> DictConfig:
if "output_filename" not in cfg or not cfg.output_filename:
if cfg.model_path:
model_name = Path(cfg.model_path).stem
else:
model_name = cfg.pretrained_name
dataset_name = Path(cfg.test_ds.manifest_filepath).stem
mode_name = cfg.inference.mode
OmegaConf.set_struct(cfg, True)
with open_dict(cfg):
cfg.output_filename = model_name + "-" + dataset_name + "-" + mode_name + ".json"
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as f:
OmegaConf.save(cfg, f)
f.seek(0) # reset file pointer
script_path = Path(__file__).parents[2] / "examples" / "asr" / "transcribe_speech.py"
# If need to change other config such as decoding strategy, could either:
# 1) change TranscriptionConfig on top of the executed scripts such as transcribe_speech.py in examples/asr, or
# 2) add command as "rnnt_decoding.strategy=greedy_batch " to below script
subprocess.run(
f"python {script_path} "
f"calculate_wer=False "
f"model_path={cfg.model_path} "
f"pretrained_name={cfg.pretrained_name} "
f"dataset_manifest={cfg.test_ds.manifest_filepath} "
f"output_filename={cfg.output_filename} "
f"batch_size={cfg.test_ds.batch_size} "
f"num_workers={cfg.test_ds.num_workers} "
f"random_seed={cfg.random_seed} "
f"eval_config_yaml={f.name} "
f"decoder_type={cfg.inference.decoder_type} ",
shell=True,
check=True,
)
return cfg
def cal_target_metadata_wer(manifest: str, target: str, meta_cfg: DictConfig, eval_metric: str = "wer",) -> dict:
"""
Caculating number of samples (samples), number of words/characters/tokens (tokens),
wer/cer, insertion error rate (ins_rate), deletion error rate (del_rate), substitution error rate (sub_rate) of the group/slot of target metadata.
The group could be [female, male] or slot group like [0-2s, 2-5s, >5s audios]
Args:
manifest (str): Filepath of the generated manifest which contains prediction and eval result for each samples.
target (str): Target metadata. Execute the target metadata if field presents in manifest.
such as 'duration', 'speaker', 'emotion', etc.
meta_cfg (DictConfig): Config for calculating group eval_metric for the target metadata.
eval_metric: (str): Supported evaluation metrics. Currently support 'wer' and 'cer'.
Return:
ret (dict): Generated dictionary containing all results regarding the target metadata.
"""
if eval_metric not in ['wer', 'cer']:
raise ValueError(
"Currently support wer and cer as eval_metric. Please implement it in cal_target_metadata_wer if using different eval_metric"
)
wer_per_class = {}
with open(manifest, 'r') as fp:
for line in fp:
sample = json.loads(line)
if target in sample:
target_class = sample[target]
if target_class not in wer_per_class:
wer_per_class[target_class] = {
'samples': 0,
'tokens': 0,
"errors": 0,
"inss": 0,
"dels": 0,
"subs": 0,
}
wer_per_class[target_class]['samples'] += 1
tokens = sample["tokens"]
wer_per_class[target_class]["tokens"] += tokens
wer_per_class[target_class]["errors"] += tokens * sample[eval_metric]
wer_per_class[target_class]["inss"] += tokens * sample["ins_rate"]
wer_per_class[target_class]["dels"] += tokens * sample["del_rate"]
wer_per_class[target_class]["subs"] += tokens * sample["sub_rate"]
if len(wer_per_class) > 0:
res_wer_per_class = {}
for target_class in wer_per_class:
res_wer_per_class[target_class] = {}
res_wer_per_class[target_class]["samples"] = wer_per_class[target_class]["samples"]
res_wer_per_class[target_class][eval_metric] = (
wer_per_class[target_class]["errors"] / wer_per_class[target_class]["tokens"]
)
res_wer_per_class[target_class]["tokens"] = wer_per_class[target_class]["tokens"]
res_wer_per_class[target_class]["ins_rate"] = (
wer_per_class[target_class]["inss"] / wer_per_class[target_class]["tokens"]
)
res_wer_per_class[target_class]["del_rate"] = (
wer_per_class[target_class]["dels"] / wer_per_class[target_class]["tokens"]
)
res_wer_per_class[target_class]["sub_rate"] = (
wer_per_class[target_class]["subs"] / wer_per_class[target_class]["tokens"]
)
else:
logging.info(f"metadata '{target}' does not present in manifest. Skipping! ")
return None
values = ['samples', 'tokens', 'errors', 'inss', 'dels', 'subs']
slot_wer = {}
if 'slot' in meta_cfg and meta_cfg.slot:
for target_class in wer_per_class:
for s in meta_cfg.slot:
if isinstance(s[0], float) or isinstance(s[0], int):
if s[0] <= target_class < s[1]:
slot_key = "slot-" + ",".join(str(i) for i in s)
if slot_key not in slot_wer:
slot_wer[slot_key] = {
'samples': 0,
'tokens': 0,
"errors": 0,
"inss": 0,
"dels": 0,
"subs": 0,
}
for v in values:
slot_wer[slot_key][v] += wer_per_class[target_class][v]
break
elif isinstance(s[0], str):
if target_class in s:
slot_key = "slot-" + ",".join(s)
if slot_key not in slot_wer:
slot_wer[slot_key] = {
'samples': 0,
'tokens': 0,
"errors": 0,
"inss": 0,
"dels": 0,
"subs": 0,
}
for v in values:
slot_wer[slot_key][v] += wer_per_class[target_class][v]
break
else:
raise ValueError("Current only support target metadata belongs to numeric or string ")
for slot_key in slot_wer:
slot_wer[slot_key][eval_metric] = slot_wer[slot_key]['errors'] / slot_wer[slot_key]['tokens']
slot_wer[slot_key]['ins_rate'] = slot_wer[slot_key]['inss'] / slot_wer[slot_key]['tokens']
slot_wer[slot_key]['del_rate'] = slot_wer[slot_key]['dels'] / slot_wer[slot_key]['tokens']
slot_wer[slot_key]['sub_rate'] = slot_wer[slot_key]['subs'] / slot_wer[slot_key]['tokens']
slot_wer[slot_key].pop('errors')
slot_wer[slot_key].pop('inss')
slot_wer[slot_key].pop('dels')
slot_wer[slot_key].pop('subs')
res_wer_per_class.update(slot_wer)
ret = None
if meta_cfg.save_wer_per_class:
ret = res_wer_per_class
if (not meta_cfg.save_wer_per_class) and ('slot' in meta_cfg and meta_cfg.slot):
ret = slot_wer
return ret
|
NeMo-main
|
tools/asr_evaluator/utils.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from concurrent import futures
import api.nmt_pb2 as nmt
import api.nmt_pb2_grpc as nmtsrv
import grpc
import torch
import nemo.collections.nlp as nemo_nlp
from nemo.utils import logging
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", required=True, type=str, help="Path to a folder containing .nemo translation model files.",
)
parser.add_argument(
"--punctuation_model",
default="",
type=str,
help="Optionally provide a path a .nemo file for punctation and capitalization (recommend if working with Riva speech recognition outputs)",
)
parser.add_argument("--port", default=50052, type=int, required=False)
parser.add_argument("--batch_size", type=int, default=256, help="Maximum number of batches to process")
parser.add_argument("--beam_size", type=int, default=1, help="Beam Size")
parser.add_argument("--len_pen", type=float, default=0.6, help="Length Penalty")
parser.add_argument("--max_delta_length", type=int, default=5, help="Max Delta Generation Length.")
args = parser.parse_args()
return args
def batches(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
class RivaTranslateServicer(nmtsrv.RivaTranslateServicer):
"""Provides methods that implement functionality of route guide server."""
def __init__(
self, model_dir, punctuation_model_path, beam_size=1, len_pen=0.6, max_delta_length=5, batch_size=256,
):
self._models = {}
self._beam_size = beam_size
self._len_pen = len_pen
self._max_delta_length = max_delta_length
self._batch_size = batch_size
self._punctuation_model_path = punctuation_model_path
self._model_dir = model_dir
model_paths = [os.path.join(model_dir, fname) for fname in os.listdir(model_dir) if fname.endswith('.nemo')]
for idx, model_path in enumerate(model_paths):
assert os.path.exists(model_path)
logging.info(f"Loading model {model_path}")
self._load_model(model_path)
if self._punctuation_model_path != "":
assert os.path.exists(punctuation_model_path)
logging.info(f"Loading punctuation model {model_path}")
self._load_puncutation_model(punctuation_model_path)
logging.info("Models loaded. Ready for inference requests.")
def _load_puncutation_model(self, punctuation_model_path):
if punctuation_model_path.endswith(".nemo"):
self.punctuation_model = nemo_nlp.models.PunctuationCapitalizationModel.restore_from(
restore_path=punctuation_model_path
)
self.punctuation_model.eval()
else:
raise NotImplemented(f"Only support .nemo files, but got: {punctuation_model_path}")
if torch.cuda.is_available():
self.punctuation_model = self.punctuation_model.cuda()
def _load_model(self, model_path):
if model_path.endswith(".nemo"):
logging.info("Attempting to initialize from .nemo file")
model = nemo_nlp.models.machine_translation.MTEncDecModel.restore_from(restore_path=model_path)
model = model.eval()
model.beam_search.beam_size = self._beam_size
model.beam_search.len_pen = self._len_pen
model.beam_search.max_delta_length = self._max_delta_length
if torch.cuda.is_available():
model = model.cuda()
else:
raise NotImplemented(f"Only support .nemo files, but got: {model_path}")
if not hasattr(model, "src_language") or not hasattr(model, "tgt_language"):
raise ValueError(
f"Could not find src_language and tgt_language in model attributes. If using NeMo rc1 checkpoints, please edit the config files to add model.src_language and model.tgt_language"
)
src_language = model.src_language
tgt_language = model.tgt_language
if src_language not in self._models:
self._models[src_language] = {}
if tgt_language not in self._models[src_language]:
self._models[src_language][tgt_language] = model
if torch.cuda.is_available():
self._models[src_language][tgt_language] = self._models[src_language][tgt_language].cuda()
else:
raise ValueError(f"Already found model for language pair {src_language}-{tgt_language}")
def TranslateText(self, request, context):
logging.info(f"Request received w/ {len(request.texts)} utterances")
results = []
if request.source_language not in self._models:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(
f"Could not find source-target language pair {request.source_language}-{request.target_language} in list of models."
)
return nmt.TranslateTextResponse()
if request.target_language not in self._models[request.source_language]:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(
f"Could not find source-target language pair {request.source_language}-{request.target_language} in list of models."
)
return nmt.TranslateTextResponse()
request_strings = [x for x in request.texts]
for batch in batches(request_strings, self._batch_size):
if self._punctuation_model_path != "":
batch = self.punctuation_model.add_punctuation_capitalization(batch)
batch_results = self._models[request.source_language][request.target_language].translate(text=batch)
translations = [nmt.Translation(translation=x) for x in batch_results]
results.extend(translations)
return nmt.TranslateTextResponse(translations=results)
def serve():
args = get_args()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
servicer = RivaTranslateServicer(
model_dir=args.model_dir,
punctuation_model_path=args.punctuation_model,
beam_size=args.beam_size,
len_pen=args.len_pen,
batch_size=args.batch_size,
max_delta_length=args.max_delta_length,
)
nmtsrv.add_RivaTranslateServicer_to_server(servicer, server)
server.add_insecure_port('[::]:' + str(args.port))
server.start()
server.wait_for_termination()
if __name__ == '__main__':
serve()
|
NeMo-main
|
tools/nmt_grpc_service/server.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from time import time
import api.nmt_pb2 as nmt
import api.nmt_pb2_grpc as nmtsrv
import grpc
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--target_language", default="es", type=str, required=True)
parser.add_argument("--source_language", default="en", type=str, required=True)
parser.add_argument("--text", default="Hello!", type=str, required=True)
parser.add_argument("--server_url", default='localhost:50052', type=str, required=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
with grpc.insecure_channel(f'{args.server_url}') as channel:
stub = nmtsrv.RivaTranslateStub(channel)
iterations = 1
start_time = time()
for _ in range(iterations):
req = nmt.TranslateTextRequest(
texts=[args.text], source_language=args.source_language, target_language=args.target_language
)
result = stub.TranslateText(req)
end_time = time()
print(f"Time to complete {iterations} synchronous requests: {end_time-start_time}")
print(result)
print(result.translations[0].translation)
|
NeMo-main
|
tools/nmt_grpc_service/client.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import wave
import api.nmt_pb2 as nmt
import api.nmt_pb2_grpc as nmtsrv
import grpc
import pyaudio
import riva_api.audio_pb2 as riva
import riva_api.riva_asr_pb2 as rivaasr
import riva_api.riva_asr_pb2_grpc as rivaasr_srv
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Speech Services")
parser.add_argument("--riva-server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--audio-file", required=True, help="path to local file to stream")
parser.add_argument("--output-device", type=int, default=None, help="output device to use")
parser.add_argument("--list-devices", action="store_true", help="list output devices indices")
parser.add_argument("--nmt-server", default="localhost:50052", help="port on which NMT server runs")
parser.add_argument("--asr_only", action="store_true", help="Whether to skip MT and just display")
parser.add_argument("--target_language", default="es", help="Target language to translate into.")
parser.add_argument(
"--asr_punctuation",
action="store_true",
help="Whether to use Riva's punctuation model for ASR transcript postprocessing.",
)
return parser.parse_args()
def listen_print_loop(responses, nmt_stub, target_language, asr_only=False):
num_chars_printed = 0
prev_utterances = []
for response in responses:
if not response.results:
continue
result = response.results[0]
if not result.alternatives:
continue
transcript = result.alternatives[0].transcript
original_transcript = transcript
if not asr_only:
req = nmt.TranslateTextRequest(texts=[transcript], source_language='en', target_language=target_language)
translation = nmt_stub.TranslateText(req).translations[0].translation
transcript = translation
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(">> " + transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript) + 3
else:
print("## " + transcript + overwrite_chars + "\n")
num_chars_printed = 0
prev_utterances.append(original_transcript)
CHUNK = 1024
args = get_args()
wf = wave.open(args.audio_file, 'rb')
channel = grpc.insecure_channel(args.riva_server)
client = rivaasr_srv.RivaSpeechRecognitionStub(channel)
nmt_channel = grpc.insecure_channel(args.nmt_server)
nmt_stub = nmtsrv.RivaTranslateStub(nmt_channel)
config = rivaasr.RecognitionConfig(
encoding=riva.AudioEncoding.LINEAR_PCM,
sample_rate_hertz=wf.getframerate(),
language_code="en-US",
max_alternatives=1,
enable_automatic_punctuation=args.asr_punctuation,
)
streaming_config = rivaasr.StreamingRecognitionConfig(config=config, interim_results=True)
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
if args.list_devices:
for i in range(p.get_device_count()):
info = p.get_device_info_by_index(i)
if info['maxOutputChannels'] < 1:
continue
print(f"{info['index']}: {info['name']}")
sys.exit(0)
# open stream (2)
stream = p.open(
output_device_index=args.output_device,
format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True,
)
# read data
def generator(w, s):
d = w.readframes(CHUNK)
yield rivaasr.StreamingRecognizeRequest(streaming_config=s)
while len(d) > 0:
yield rivaasr.StreamingRecognizeRequest(audio_content=d)
stream.write(d)
d = w.readframes(CHUNK)
return
responses = client.StreamingRecognize(generator(wf, streaming_config))
listen_print_loop(responses, nmt_stub, target_language=args.target_language, asr_only=args.asr_only)
# stop stream (4)
stream.stop_stream()
stream.close()
# close PyAudio (5)
p.terminate()
|
NeMo-main
|
tools/nmt_grpc_service/asr_nmt_client.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nmt.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nmt.proto',
package='nvidia.riva.nmt',
syntax='proto3',
serialized_options=b'Z\026nvidia.com/riva_speech\370\001\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\tnmt.proto\x12\x0fnvidia.riva.nmt\"W\n\x14TranslateTextRequest\x12\r\n\x05texts\x18\x01 \x03(\t\x12\x17\n\x0fsource_language\x18\x03 \x01(\t\x12\x17\n\x0ftarget_language\x18\x04 \x01(\t\"4\n\x0bTranslation\x12\x13\n\x0btranslation\x18\x01 \x01(\t\x12\x10\n\x08language\x18\x02 \x01(\t\"K\n\x15TranslateTextResponse\x12\x32\n\x0ctranslations\x18\x01 \x03(\x0b\x32\x1c.nvidia.riva.nmt.Translation2q\n\rRivaTranslate\x12`\n\rTranslateText\x12%.nvidia.riva.nmt.TranslateTextRequest\x1a&.nvidia.riva.nmt.TranslateTextResponse\"\x00\x42\x1bZ\x16nvidia.com/riva_speech\xf8\x01\x01\x62\x06proto3',
)
_TRANSLATETEXTREQUEST = _descriptor.Descriptor(
name='TranslateTextRequest',
full_name='nvidia.riva.nmt.TranslateTextRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='texts',
full_name='nvidia.riva.nmt.TranslateTextRequest.texts',
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='source_language',
full_name='nvidia.riva.nmt.TranslateTextRequest.source_language',
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='target_language',
full_name='nvidia.riva.nmt.TranslateTextRequest.target_language',
index=2,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=30,
serialized_end=117,
)
_TRANSLATION = _descriptor.Descriptor(
name='Translation',
full_name='nvidia.riva.nmt.Translation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='translation',
full_name='nvidia.riva.nmt.Translation.translation',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name='language',
full_name='nvidia.riva.nmt.Translation.language',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=119,
serialized_end=171,
)
_TRANSLATETEXTRESPONSE = _descriptor.Descriptor(
name='TranslateTextResponse',
full_name='nvidia.riva.nmt.TranslateTextResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='translations',
full_name='nvidia.riva.nmt.TranslateTextResponse.translations',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=173,
serialized_end=248,
)
_TRANSLATETEXTRESPONSE.fields_by_name['translations'].message_type = _TRANSLATION
DESCRIPTOR.message_types_by_name['TranslateTextRequest'] = _TRANSLATETEXTREQUEST
DESCRIPTOR.message_types_by_name['Translation'] = _TRANSLATION
DESCRIPTOR.message_types_by_name['TranslateTextResponse'] = _TRANSLATETEXTRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TranslateTextRequest = _reflection.GeneratedProtocolMessageType(
'TranslateTextRequest',
(_message.Message,),
{
'DESCRIPTOR': _TRANSLATETEXTREQUEST,
'__module__': 'nmt_pb2'
# @@protoc_insertion_point(class_scope:nvidia.riva.nmt.TranslateTextRequest)
},
)
_sym_db.RegisterMessage(TranslateTextRequest)
Translation = _reflection.GeneratedProtocolMessageType(
'Translation',
(_message.Message,),
{
'DESCRIPTOR': _TRANSLATION,
'__module__': 'nmt_pb2'
# @@protoc_insertion_point(class_scope:nvidia.riva.nmt.Translation)
},
)
_sym_db.RegisterMessage(Translation)
TranslateTextResponse = _reflection.GeneratedProtocolMessageType(
'TranslateTextResponse',
(_message.Message,),
{
'DESCRIPTOR': _TRANSLATETEXTRESPONSE,
'__module__': 'nmt_pb2'
# @@protoc_insertion_point(class_scope:nvidia.riva.nmt.TranslateTextResponse)
},
)
_sym_db.RegisterMessage(TranslateTextResponse)
DESCRIPTOR._options = None
_RIVATRANSLATE = _descriptor.ServiceDescriptor(
name='RivaTranslate',
full_name='nvidia.riva.nmt.RivaTranslate',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=250,
serialized_end=363,
methods=[
_descriptor.MethodDescriptor(
name='TranslateText',
full_name='nvidia.riva.nmt.RivaTranslate.TranslateText',
index=0,
containing_service=None,
input_type=_TRANSLATETEXTREQUEST,
output_type=_TRANSLATETEXTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
],
)
_sym_db.RegisterServiceDescriptor(_RIVATRANSLATE)
DESCRIPTOR.services_by_name['RivaTranslate'] = _RIVATRANSLATE
# @@protoc_insertion_point(module_scope)
|
NeMo-main
|
tools/nmt_grpc_service/api/nmt_pb2.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import nmt_pb2 as nmt__pb2
class RivaTranslateStub(object):
"""Riva NLP Services implement task-specific APIs for popular NLP tasks including
intent recognition (as well as slot filling), and entity extraction.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.TranslateText = channel.unary_unary(
'/nvidia.riva.nmt.RivaTranslate/TranslateText',
request_serializer=nmt__pb2.TranslateTextRequest.SerializeToString,
response_deserializer=nmt__pb2.TranslateTextResponse.FromString,
)
class RivaTranslateServicer(object):
"""Riva NLP Services implement task-specific APIs for popular NLP tasks including
intent recognition (as well as slot filling), and entity extraction.
"""
def TranslateText(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RivaTranslateServicer_to_server(servicer, server):
rpc_method_handlers = {
'TranslateText': grpc.unary_unary_rpc_method_handler(
servicer.TranslateText,
request_deserializer=nmt__pb2.TranslateTextRequest.FromString,
response_serializer=nmt__pb2.TranslateTextResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler('nvidia.riva.nmt.RivaTranslate', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class RivaTranslate(object):
"""Riva NLP Services implement task-specific APIs for popular NLP tasks including
intent recognition (as well as slot filling), and entity extraction.
"""
@staticmethod
def TranslateText(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/nvidia.riva.nmt.RivaTranslate/TranslateText',
nmt__pb2.TranslateTextRequest.SerializeToString,
nmt__pb2.TranslateTextResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
|
NeMo-main
|
tools/nmt_grpc_service/api/nmt_pb2_grpc.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAJOR = 1
MINOR = 21
PATCH = 0
PRE_RELEASE = 'rc0'
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'nemo_toolkit'
__contact_names__ = 'NVIDIA'
__contact_emails__ = 'nemo-toolkit@nvidia.com'
__homepage__ = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/'
__repository_url__ = 'https://github.com/nvidia/nemo'
__download_url__ = 'https://github.com/NVIDIA/NeMo/releases'
__description__ = 'NeMo - a toolkit for Conversational AI'
__license__ = 'Apache2'
__keywords__ = 'deep learning, machine learning, gpu, NLP, NeMo, nvidia, pytorch, torch, tts, speech, language'
|
NeMo-main
|
nemo/package_info.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NEMO_ENV_VARNAME_ENABLE_COLORING = "NEMO_ENABLE_COLORING"
NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR = "NEMO_REDIRECT_LOGS_TO_STDERR"
NEMO_ENV_VARNAME_TESTING = "NEMO_TESTING" # Set to True to enable nemo.util.logging's debug mode
NEMO_ENV_VARNAME_VERSION = "NEMO_EXPM_VERSION" # Used for nemo.utils.exp_manager versioning
NEMO_ENV_CACHE_DIR = "NEMO_CACHE_DIR" # Used to change default nemo cache directory
NEMO_ENV_DATA_STORE_CACHE_DIR = "NEMO_DATA_STORE_CACHE_DIR" # Used to change default nemo data store cache directory
NEMO_ENV_DATA_STORE_CACHE_SHARED = "NEMO_DATA_STORE_CACHE_SHARED" # Shared among nodes (1) or not shared (0)
|
NeMo-main
|
nemo/constants.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.package_info import (
__contact_emails__,
__contact_names__,
__description__,
__download_url__,
__homepage__,
__keywords__,
__license__,
__package_name__,
__repository_url__,
__shortversion__,
__version__,
)
|
NeMo-main
|
nemo/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nemo.core.neural_types
from nemo.core.classes import *
|
NeMo-main
|
nemo/core/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations # necessary for lazy types evaluation
import os
import shutil
import tarfile
import tempfile
import uuid
from typing import Optional, Set, Union
import torch
from omegaconf import DictConfig, OmegaConf
from omegaconf.omegaconf import open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.core import classes as nemo_classes # to avoid circular import do not import ModelPT directly
from nemo.utils import logging, model_utils
from nemo.utils.app_state import AppState
from nemo.utils.get_rank import is_global_rank_zero
from nemo.utils.model_utils import inject_model_parallel_rank
class SaveRestoreConnector:
def __init__(self) -> None:
self._model_config_yaml = "model_config.yaml"
self._model_weights_ckpt = "model_weights.ckpt"
self._model_extracted_dir = None
def save_to(self, model: "nemo_classes.ModelPT", save_path: str):
"""
Saves model instance (weights and configuration) into .nemo file.
You can use "restore_from" method to fully restore instance from .nemo file.
.nemo file is an archive (tar.gz) with the following:
model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor
model_wights.ckpt - model checkpoint
Args:
model: ModelPT object to be saved.
save_path: Path to .nemo file where model instance should be saved
"""
if is_global_rank_zero():
with tempfile.TemporaryDirectory() as tmpdir:
config_yaml = os.path.join(tmpdir, self.model_config_yaml)
model_weights = os.path.join(tmpdir, self.model_weights_ckpt)
model.to_config_file(path2yaml_file=config_yaml)
# update subconfigs, if there are child model, since child model can change its config
self._update_subconfigs(model, path2yaml_file=config_yaml)
if model.has_native_or_submodules_artifacts():
self._handle_artifacts(model, nemo_file_folder=tmpdir)
# We should not update self._cfg here - the model can still be in use
self._update_artifact_paths(model, path2yaml_file=config_yaml)
self._save_state_dict_to_disk(model.state_dict(), model_weights)
self._make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir)
else:
return
def load_config_and_state_dict(
self,
calling_cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = True,
return_config: bool = False,
trainer: Trainer = None,
):
"""
Restores model instance (weights and configuration) into .nemo file
Args:
restore_path: path to .nemo file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file or an OmegaConf / DictConfig object representing the model config.
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict. By default True
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
Example:
```
model = nemo.collections.asr.models.EncDecCTCModel.restore_from('asr.nemo')
assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel)
```
Returns:
An instance of type cls or its underlying config (if return_config is set).
"""
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
cwd = os.getcwd()
if map_location is None:
if torch.cuda.is_available():
map_location = torch.device('cuda')
else:
map_location = torch.device('cpu')
app_state = AppState()
with tempfile.TemporaryDirectory() as tmpdir:
try:
# Check if self.model_extracted_dir is set, and is a valid path
if self.model_extracted_dir is not None and os.path.isdir(self.model_extracted_dir):
# Log that NeMo will use the provided `model_extracted_dir`
logging.info(
f"Restoration will occur within pre-extracted directory : " f"`{self.model_extracted_dir}`."
)
# Override `tmpdir` above with the pre-extracted `model_extracted_dir`
tmpdir = self.model_extracted_dir
else:
# Extract the nemo file into the temporary directory
self._unpack_nemo_file(
path2file=restore_path, out_folder=tmpdir, extract_config_only=return_config is True
)
# Change current working directory to
os.chdir(tmpdir)
if override_config_path is None:
config_yaml = self.model_config_yaml
else:
# can be str path or OmegaConf / DictConfig object
config_yaml = override_config_path
if not isinstance(config_yaml, (OmegaConf, DictConfig)):
conf = OmegaConf.load(config_yaml)
else:
conf = config_yaml
if override_config_path is not None:
# Resolve the override config
conf = OmegaConf.to_container(conf, resolve=True)
conf = OmegaConf.create(conf)
# If override is top level config, extract just `model` from it
if 'model' in conf:
conf = conf.model
if return_config:
instance = conf
return instance
else:
if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:
model_weights = self._inject_model_parallel_rank_for_ckpt(tmpdir, self.model_weights_ckpt)
else:
model_weights = os.path.join(tmpdir, self.model_weights_ckpt)
OmegaConf.set_struct(conf, True)
os.chdir(cwd)
# get the class
calling_cls._set_model_restore_state(is_being_restored=True, folder=tmpdir)
instance = calling_cls.from_config_dict(config=conf, trainer=trainer)
instance = instance.to(map_location)
# add load_state_dict override
if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:
model_weights = self._inject_model_parallel_rank_for_ckpt(tmpdir, self.model_weights_ckpt)
state_dict = self._load_state_dict_from_disk(model_weights, map_location=map_location)
finally:
os.chdir(cwd)
return (conf, instance, state_dict)
def modify_state_dict(self, conf, state_dict):
"""
Utility method that allows to modify the state dict before loading parameters into a model.
Args:
conf: A model level OmegaConf object.
state_dict: The state dict restored from the checkpoint.
Returns:
A potentially modified state dict.
"""
# NOTE and TODO (sandeepsub) This is duplicated across save_restore_connector and nlp_save_restore_connector. This shouldn't be here.
if conf.get('megatron_amp_O2', False):
new_state_dict = {}
for key in state_dict.keys():
new_key = key.replace('model.', 'model.module.', 1)
new_state_dict[new_key] = state_dict[key]
state_dict = new_state_dict
return state_dict
def load_instance_with_state_dict(self, instance, state_dict, strict):
"""
Utility method that loads a model instance with the (potentially modified) state dict.
Args:
instance: ModelPT subclass instance.
state_dict: The state dict (which may have been modified)
strict: Bool, whether to perform strict checks when loading the state dict.
"""
instance.load_state_dict(state_dict, strict=strict)
instance._set_model_restore_state(is_being_restored=False)
def restore_from(
self,
calling_cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = True,
return_config: bool = False,
trainer: Trainer = None,
):
"""
Restores model instance (weights and configuration) into .nemo file
Args:
restore_path: path to .nemo file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file or an OmegaConf / DictConfig object representing the model config.
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict. By default True
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
trainer: An optional Trainer object, passed to the model constructor.
Example:
```
model = nemo.collections.asr.models.EncDecCTCModel.restore_from('asr.nemo')
assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel)
```
Returns:
An instance of type cls or its underlying config (if return_config is set).
"""
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
loaded_params = self.load_config_and_state_dict(
calling_cls, restore_path, override_config_path, map_location, strict, return_config, trainer,
)
if not isinstance(loaded_params, tuple) or return_config is True:
return loaded_params
conf, instance, state_dict = loaded_params
state_dict = self.modify_state_dict(conf, state_dict)
self.load_instance_with_state_dict(instance, state_dict, strict)
logging.info(f'Model {instance.__class__.__name__} was successfully restored from {restore_path}.')
return instance
def extract_state_dict_from(self, restore_path: str, save_dir: str, split_by_module: bool = False):
"""
Extract the state dict(s) from a provided .nemo tarfile and save it to a directory.
Args:
restore_path: path to .nemo file from which state dict(s) should be extracted
save_dir: directory in which the saved state dict(s) should be stored
split_by_module: bool flag, which determins whether the output checkpoint should
be for the entire Model, or the individual module's that comprise the Model
Example:
To convert the .nemo tarfile into a single Model level PyTorch checkpoint
::
state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts')
To restore a model from a Model level checkpoint
::
model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration
model.load_state_dict(torch.load("./asr_ckpts/model_weights.ckpt"))
To convert the .nemo tarfile into multiple Module level PyTorch checkpoints
::
state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts', split_by_module=True)
To restore a module from a Module level checkpoint
::
model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration
# load the individual components
model.preprocessor.load_state_dict(torch.load("./asr_ckpts/preprocessor.ckpt"))
model.encoder.load_state_dict(torch.load("./asr_ckpts/encoder.ckpt"))
model.decoder.load_state_dict(torch.load("./asr_ckpts/decoder.ckpt"))
Returns:
The state dict that was loaded from the original .nemo checkpoint
"""
cwd = os.getcwd()
save_dir = os.path.abspath(save_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
with tempfile.TemporaryDirectory() as tmpdir:
try:
self._unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)
os.chdir(tmpdir)
model_weights = os.path.join(tmpdir, self.model_weights_ckpt)
state_dict = self._load_state_dict_from_disk(model_weights)
if not split_by_module:
filepath = os.path.join(save_dir, self.model_weights_ckpt)
self._save_state_dict_to_disk(state_dict, filepath)
else:
key_set = set([key.split(".")[0] for key in state_dict.keys()])
for primary_key in key_set:
inner_keys = [key for key in state_dict.keys() if key.split(".")[0] == primary_key]
state_dict_subset = {
".".join(inner_key.split(".")[1:]): state_dict[inner_key] for inner_key in inner_keys
}
filepath = os.path.join(save_dir, f"{primary_key}.ckpt")
self._save_state_dict_to_disk(state_dict_subset, filepath)
logging.info(f'Checkpoints from {restore_path} were successfully extracted into {save_dir}.')
finally:
os.chdir(cwd)
return state_dict
def register_artifact(self, model, config_path: str, src: str, verify_src_exists: bool = True):
"""
Register model artifacts with this function. These artifacts (files) will be included inside .nemo file
when model.save_to("mymodel.nemo") is called.
How it works:
1. It always returns existing absolute path which can be used during Model constructor call
EXCEPTION: src is None or "" in which case nothing will be done and src will be returned
2. It will add (config_path, model_utils.ArtifactItem()) pair to self.artifacts
.. code-block::
If "src" is local existing path:
then it will be returned in absolute path form
elif "src" starts with "nemo_file:unique_artifact_name":
.nemo will be untarred to a temporary folder location and an actual existing path will be returned
else:
an error will be raised.
WARNING: use .register_artifact calls in your models' constructors.
The returned path is not guaranteed to exist after you have exited your model's constructor.
Args:
model: ModelPT object to register artifact for.
config_path (str): Artifact key. Usually corresponds to the model config.
src (str): Path to artifact.
verify_src_exists (bool): If set to False, then the artifact is optional and register_artifact will return
None even if src is not found. Defaults to True.
Returns:
str: If src is not None or empty it always returns absolute path which is guaranteed to exists during model
instance life
"""
app_state = AppState()
artifact_item = model_utils.ArtifactItem()
# This is for backward compatibility, if the src objects exists simply inside of the tarfile
# without its key having been overriden, this pathway will be used.
src_obj_name = os.path.basename(src)
if app_state.nemo_file_folder is not None:
src_obj_path = os.path.abspath(os.path.join(app_state.nemo_file_folder, src_obj_name))
else:
src_obj_path = src_obj_name
# src is a local existing path - register artifact and return exact same path for usage by the model
if os.path.exists(os.path.abspath(src)):
return_path = os.path.abspath(src)
artifact_item.path_type = model_utils.ArtifactPathType.LOCAL_PATH
# this is the case when artifact must be retried from the nemo file
# we are assuming that the location of the right nemo file is available from _MODEL_RESTORE_PATH
elif src.startswith("nemo:"):
return_path = os.path.abspath(os.path.join(app_state.nemo_file_folder, src[5:]))
artifact_item.path_type = model_utils.ArtifactPathType.TAR_PATH
# backward compatibility implementation
elif os.path.exists(src_obj_path):
return_path = src_obj_path
artifact_item.path_type = model_utils.ArtifactPathType.TAR_PATH
else:
if verify_src_exists:
raise FileNotFoundError(
f"src path does not exist or it is not a path in nemo file. src value I got was: {src}. Absolute: {os.path.abspath(src)}"
)
else:
# artifact is optional and we simply return None
return None
assert os.path.exists(return_path)
artifact_item.path = os.path.abspath(src)
model.artifacts[config_path] = artifact_item
# we were called by ModelPT
if hasattr(model, "cfg"):
with open_dict(model._cfg):
OmegaConf.update(model.cfg, config_path, return_path)
return return_path
def _handle_artifacts(self, model, nemo_file_folder):
tarfile_artifacts = []
app_state = AppState()
# aggregate artifacts from self and all children recursively
artifacts_containers = []
for _, config_path, module in model.named_nemo_modules():
if module.has_artifacts(): # NeMo model with artifacts
artifacts_containers.append((config_path, module.artifacts))
if len(artifacts_containers) > 0 and (not hasattr(model, "artifacts") or model.artifacts is None):
# model has no artifacts, but submodules have some
model.artifacts = dict()
for config_path, artifacts in artifacts_containers:
for subconf_path, artiitem in artifacts.items():
conf_path = f"{config_path}.{subconf_path}" if config_path else f"{subconf_path}"
if artiitem.path_type == model_utils.ArtifactPathType.LOCAL_PATH:
if not os.path.exists(artiitem.path):
raise FileNotFoundError(f"Artifact {conf_path} not found at location: {artiitem.path}")
# Generate new uniq artifact name and copy it to nemo_file_folder
# Note uuid.uuid4().hex is guaranteed to be 32 character long
artifact_base_name = os.path.basename(artiitem.path)
artifact_uniq_name = f"{uuid.uuid4().hex}_{artifact_base_name}"
shutil.copy2(artiitem.path, os.path.join(nemo_file_folder, artifact_uniq_name))
# Update artifacts registry
artiitem.hashed_path = "nemo:" + artifact_uniq_name
model.artifacts[conf_path] = artiitem
elif artiitem.path_type == model_utils.ArtifactPathType.TAR_PATH:
# process all tarfile artifacts in one go, so preserve key-value pair
tarfile_artifacts.append((conf_path, artiitem))
if subconf_path: # artifact from submodule
model.artifacts[conf_path] = artiitem
else:
raise ValueError(f"Directly referencing artifacts from other nemo files isn't supported yet")
# Process current tarfile artifacts by unpacking the previous tarfile and extract the artifacts
# that are currently required.
# artifacts can be native (from the model itself) and from submodules
restoration_paths: Set[str] = set() # model + submodules restoration paths, handle only unique paths
model_metadata = app_state.get_model_metadata_from_guid(model.model_guid)
if model_metadata.restoration_path is not None:
restoration_paths.add(model_metadata.restoration_path)
# aggregate restoration paths for all submodules recursively
for module in model.modules():
if isinstance(module, nemo_classes.ModelPT): # if NeMo model
submodule_restoration_path = app_state.get_model_metadata_from_guid(module.model_guid).restoration_path
if submodule_restoration_path is not None:
restoration_paths.add(submodule_restoration_path)
if len(tarfile_artifacts) > 0 and len(restoration_paths) == 0:
# TODO: see cases when this can occur, and if we can fix them
logging.warning("Model contains registered artifacts, but no restoration paths found")
if len(tarfile_artifacts) > 0 and len(restoration_paths) > 0:
# Need to step into nemo archive to extract file
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
cwd = os.getcwd()
# Step into the nemo archive to try and find the file
# TemporaryDirectory context must always be outer to try-catch chdir otherwise it crashes on Windows
with tempfile.TemporaryDirectory() as archive_dir:
try:
# unpack all restorations paths (nemo checkpoints)
# in nemo checkpoints all resources contain hash in name, so there should be no collisions
for path in restoration_paths:
if self.model_extracted_dir:
shutil.copytree(src=path, dst=archive_dir, dirs_exist_ok=True)
else:
self._unpack_nemo_file(path2file=path, out_folder=archive_dir)
os.chdir(archive_dir)
for conf_path, artiitem in tarfile_artifacts:
# Get basename and copy it to nemo_file_folder
if 'nemo:' in artiitem.path:
artifact_base_name = artiitem.path.split('nemo:')[1]
else:
artifact_base_name = os.path.basename(artiitem.path)
# no need to hash here as we are in tarfile_artifacts which are already hashed
artifact_uniq_name = artifact_base_name
shutil.copy2(artifact_base_name, os.path.join(nemo_file_folder, artifact_uniq_name))
# Update artifacts registry
new_artiitem = model_utils.ArtifactItem()
new_artiitem.path = "nemo:" + artifact_uniq_name
new_artiitem.path_type = model_utils.ArtifactPathType.TAR_PATH
model.artifacts[conf_path] = new_artiitem
finally:
# change back working directory
os.chdir(cwd)
@staticmethod
def _update_subconfigs(model: "nemo_classes.ModelPT", path2yaml_file):
"""
Update subconfigs of the model if ModelPT has submodules
Should be called before updating artifacts paths
"""
if not model.has_nemo_submodules():
# no submodules => nothing to update
return
conf = OmegaConf.load(path2yaml_file)
# update subconfigs for all children recoursively
# parent configs updated before children
for _, conf_path, submodule in model.named_nemo_modules():
if not conf_path: # self
continue
OmegaConf.update(conf, conf_path, submodule.cfg)
with open(path2yaml_file, 'w', encoding='utf-8') as fout:
OmegaConf.save(config=conf, f=fout, resolve=True)
def _update_artifact_paths(self, model, path2yaml_file):
if hasattr(model, "artifacts") and model.artifacts is not None and len(model.artifacts) > 0:
conf = OmegaConf.load(path2yaml_file)
for conf_path, item in model.artifacts.items():
if item.hashed_path is None:
OmegaConf.update(conf, conf_path, item.path)
else:
OmegaConf.update(conf, conf_path, item.hashed_path)
with open(path2yaml_file, 'w', encoding='utf-8') as fout:
OmegaConf.save(config=conf, f=fout, resolve=True)
def _inject_model_parallel_rank_for_ckpt(self, dirname, basename):
model_weights = os.path.join(dirname, basename)
model_weights = inject_model_parallel_rank(model_weights)
return model_weights
@staticmethod
def _make_nemo_file_from_folder(filename, source_dir):
dirname = os.path.dirname(filename)
os.makedirs(dirname, exist_ok=True)
with tarfile.open(filename, "w:") as tar:
tar.add(source_dir, arcname=".")
@staticmethod
def _unpack_nemo_file(path2file: str, out_folder: str, extract_config_only: bool = False) -> str:
if not os.path.exists(path2file):
raise FileNotFoundError(f"{path2file} does not exist")
# we start with an assumption of uncompressed tar,
# which should be true for versions 1.7.0 and above
tar_header = "r:"
try:
tar_test = tarfile.open(path2file, tar_header)
tar_test.close()
except tarfile.ReadError:
# can be older checkpoint => try compressed tar
tar_header = "r:gz"
tar = tarfile.open(path2file, tar_header)
if not extract_config_only:
tar.extractall(path=out_folder)
else:
members = [x for x in tar.getmembers() if ".yaml" in x.name]
tar.extractall(path=out_folder, members=members)
tar.close()
return out_folder
@staticmethod
def _save_state_dict_to_disk(state_dict, filepath):
torch.save(state_dict, filepath)
@staticmethod
def _load_state_dict_from_disk(model_weights, map_location=None):
return torch.load(model_weights, map_location='cpu')
@property
def model_config_yaml(self) -> str:
return self._model_config_yaml
@model_config_yaml.setter
def model_config_yaml(self, path: str):
self._model_config_yaml = path
@property
def model_weights_ckpt(self) -> str:
return self._model_weights_ckpt
@model_weights_ckpt.setter
def model_weights_ckpt(self, path: str):
self._model_weights_ckpt = path
@property
def model_extracted_dir(self) -> Optional[str]:
return self._model_extracted_dir
@model_extracted_dir.setter
def model_extracted_dir(self, path: Optional[str]):
self._model_extracted_dir = path
|
NeMo-main
|
nemo/core/connectors/save_restore_connector.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/core/connectors/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from abc import ABC
from typing import Dict, Optional, Tuple
from nemo.core.neural_types.comparison import NeuralTypeComparisonResult
__all__ = [
'ElementType',
'VoidType',
'BoolType',
'ChannelType',
'AcousticEncodedRepresentation',
'AudioSignal',
'SpectrogramType',
'MelSpectrogramType',
'MFCCSpectrogramType',
'LogitsType',
'LabelsType',
'HypothesisType',
'LossType',
'RegressionValuesType',
'CategoricalValuesType',
'PredictionsType',
'LogprobsType',
'ProbsType',
'LengthsType',
'EmbeddedTextType',
'EncodedRepresentation',
'MaskType',
'Target',
'ClassificationTarget',
'ImageFeatureValue',
'Index',
'ImageValue',
'NormalizedImageValue',
'StringLabel',
'StringType',
'TokenIndex',
'Length',
'IntType',
'FloatType',
'NormalDistributionSamplesType',
'NormalDistributionMeanType',
'NormalDistributionLogVarianceType',
'TokenDurationType',
'TokenLogDurationType',
'LogDeterminantType',
'SequenceToSequenceAlignmentType',
]
class ElementType(ABC):
"""Abstract class defining semantics of the tensor elements.
We are relying on Python for inheritance checking"""
def __str__(self):
return self.__doc__
def __repr__(self):
return self.__class__.__name__
@property
def type_parameters(self) -> Dict:
"""Override this property to parametrize your type. For example, you can specify 'storage' type such as
float, int, bool with 'dtype' keyword. Another example, is if you want to represent a signal with a
particular property (say, sample frequency), then you can put sample_freq->value in there.
When two types are compared their type_parameters must match."""
return {}
@property
def fields(self) -> Optional[Tuple]:
"""This should be used to logically represent tuples/structures. For example, if you want to represent a
bounding box (x, y, width, height) you can put a tuple with names ('x', y', 'w', 'h') in here.
Under the hood this should be converted to the last tesnor dimension of fixed size = len(fields).
When two types are compared their fields must match."""
return None
def compare(self, second) -> NeuralTypeComparisonResult:
# First, check general compatibility
first_t = type(self)
second_t = type(second)
if first_t == second_t:
result = NeuralTypeComparisonResult.SAME
elif issubclass(first_t, second_t):
result = NeuralTypeComparisonResult.LESS
elif issubclass(second_t, first_t):
result = NeuralTypeComparisonResult.GREATER
else:
result = NeuralTypeComparisonResult.INCOMPATIBLE
if result != NeuralTypeComparisonResult.SAME:
return result
else:
# now check that all parameters match
check_params = set(self.type_parameters.keys()) == set(second.type_parameters.keys())
if check_params is False:
return NeuralTypeComparisonResult.SAME_TYPE_INCOMPATIBLE_PARAMS
else:
for k1, v1 in self.type_parameters.items():
if v1 is None or second.type_parameters[k1] is None:
# Treat None as Void
continue
if v1 != second.type_parameters[k1]:
return NeuralTypeComparisonResult.SAME_TYPE_INCOMPATIBLE_PARAMS
# check that all fields match
if self.fields == second.fields:
return NeuralTypeComparisonResult.SAME
else:
return NeuralTypeComparisonResult.INCOMPATIBLE
class VoidType(ElementType):
"""Void-like type which is compatible with everything.
It is a good practice to use this type only as necessary.
For example, when you need template-like functionality.
"""
def compare(cls, second: abc.ABCMeta) -> NeuralTypeComparisonResult:
return NeuralTypeComparisonResult.SAME
# TODO: Consider moving these files elsewhere
class ChannelType(ElementType):
"""Element to represent convolutional input/output channel.
"""
class EmbeddedTextType(ChannelType):
"""Element to represent output on word/text embedding layers
"""
class LogitsType(ElementType):
"""Element type to represent logits"""
class ProbsType(ElementType):
"""Element type to represent probabilities. For example, outputs of softmax layers."""
class LogprobsType(ElementType):
"""Element type to represent log-probabilities. For example, outputs of log softmax layers."""
class LabelsType(ElementType):
"""Element type to represent some sort of labels. This is often used as a base class to create
a more concrete types such as RegressionValuesType, etc."""
class HypothesisType(LabelsType):
"""Element type to represent some decoded hypothesis, which may further be processed to obtain
a concrete label."""
class LengthsType(ElementType):
"""Element type representing lengths of something"""
class LossType(ElementType):
"""Element type to represent outputs of Loss modules"""
class EncodedRepresentation(ChannelType):
"""Element type to represent encoded representation, for example, encoder's output"""
class AcousticEncodedRepresentation(EncodedRepresentation):
"""Element type to represent encoded representation returned by the acoustic encoder model"""
class AudioSignal(ElementType):
"""Element type to represent encoded representation returned by the acoustic encoder model
Args:
freq (int): sampling frequency of a signal. Note that two signals will only be the same if their
freq is the same.
"""
def __init__(self, freq: int = None):
self._params = {}
self._params['freq'] = freq
@property
def type_parameters(self):
return self._params
class SpectrogramType(ChannelType):
"""Element type to represent generic spectrogram signal"""
class MelSpectrogramType(SpectrogramType):
"""Element type to represent mel spectrogram signal"""
class MFCCSpectrogramType(SpectrogramType):
"""Element type to represent MFCC spectrogram signal"""
class PredictionsType(LabelsType):
"""Element type to represent some sort of predictions returned by model"""
class RegressionValuesType(PredictionsType):
"""Element type to represent labels for regression task"""
class CategoricalValuesType(PredictionsType):
"""Element type to represent labels for categorical classification task"""
class MaskType(PredictionsType):
"""Element type to represent a boolean mask"""
class Index(ElementType):
"""Type representing an element being an index of the sample."""
class Target(ElementType):
"""
Type representing an element being a target value.
"""
class ClassificationTarget(Target):
"""
Type representing an element being target value in the classification task, i.e. identifier of a desired class.
"""
class ImageValue(ElementType):
"""
Type representing an element/value of a single image channel,
e.g. a single element (R) of RGB image.
"""
class NormalizedImageValue(ImageValue):
"""
Type representing an element/value of a single image channel normalized to <0-1> range,
e.g. a single element (R) of normalized RGB image.
"""
class ImageFeatureValue(ImageValue):
"""Type representing an element (single value) of a (image) feature maps."""
class StringType(ElementType):
"""Element type representing a single string"""
class StringLabel(StringType):
"""
Type representing an label being a string with class name (e.g. the "hamster" class in CIFAR100).
"""
class BoolType(ElementType):
"""Element type representing a single integer"""
class IntType(ElementType):
"""Element type representing a single integer"""
class FloatType(ElementType):
"""Element type representing a single float"""
class TokenIndex(IntType):
"""Type representing an element being index of a token in some kind of a vocabulary."""
class Length(IntType):
"""Type representing an element storing a "length" (e.g. length of a list)."""
class ProbabilityDistributionSamplesType(ElementType):
"""Element to represent tensors that meant to be sampled from a valid probability distribution
"""
class NormalDistributionSamplesType(ProbabilityDistributionSamplesType):
"""Element to represent tensors that meant to be sampled from a valid normal distribution
"""
class SequenceToSequenceAlignmentType(ElementType):
"""Class to represent the alignment from seq-to-seq attention outputs. Generally a mapping from endcoder time steps
to decoder time steps."""
class NormalDistributionMeanType(ElementType):
"""Element to represent the mean of a normal distribution"""
class NormalDistributionLogVarianceType(ElementType):
"""Element to represent the log variance of a normal distribution"""
class TokenDurationType(ElementType):
"""Element for representing the duration of a token"""
class TokenLogDurationType(ElementType):
"""Element for representing the log-duration of a token"""
class LogDeterminantType(ElementType):
"""Element for representing log determinants usually used in flow models"""
|
NeMo-main
|
nemo/core/neural_types/elements.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
from nemo.core.neural_types.axes import AxisKind, AxisType
from nemo.core.neural_types.comparison import NeuralTypeComparisonResult
from nemo.core.neural_types.elements import ElementType, VoidType
__all__ = [
'NeuralType',
'NeuralTypeError',
'NeuralPortNameMismatchError',
'NeuralPortNmTensorMismatchError',
]
class NeuralType(object):
"""This is the main class which would represent neural type concept.
It is used to represent *the types* of inputs and outputs.
Args:
axes (Optional[Tuple]): a tuple of AxisTypes objects representing the semantics of what varying each axis means
You can use a short, string-based form here. For example: ('B', 'C', 'H', 'W') would correspond to an NCHW
format frequently used in computer vision. ('B', 'T', 'D') is frequently used for signal processing and
means [batch, time, dimension/channel].
elements_type (ElementType): an instance of ElementType class representing the semantics of what is stored
inside the tensor. For example: logits (LogitsType), log probabilities (LogprobType), etc.
optional (bool): By default, this is false. If set to True, it would means that input to the port of this
type can be optional.
"""
def __str__(self):
if self.axes is not None:
return f"axes: {self.axes}; elements_type: {self.elements_type.__class__.__name__}"
else:
return f"axes: None; elements_type: {self.elements_type.__class__.__name__}"
def __init__(self, axes: Optional[Tuple] = None, elements_type: ElementType = VoidType(), optional=False):
if not isinstance(elements_type, ElementType):
raise ValueError(
"elements_type of NeuralType must be an instance of a class derived from ElementType. "
"Did you pass a class instead?"
)
self.elements_type = elements_type
if axes is not None:
NeuralType.__check_sanity(axes)
axes_list = []
for axis in axes:
if isinstance(axis, str):
axes_list.append(AxisType(AxisKind.from_str(axis), None))
elif isinstance(axis, AxisType):
axes_list.append(axis)
else:
raise ValueError("axis type must be either str or AxisType instance")
self.axes = tuple(axes_list)
else:
self.axes = None
self.optional = optional
def compare(self, second) -> NeuralTypeComparisonResult:
"""Performs neural type comparison of self with second. When you chain two modules' inputs/outputs via
__call__ method, this comparison will be called to ensure neural type compatibility."""
# First, handle dimensionality
axes_a = self.axes
axes_b = second.axes
# "Big void" type
if isinstance(self.elements_type, VoidType) and self.axes is None:
return NeuralTypeComparisonResult.SAME
if self.axes is None:
if second.axes is None:
return self.elements_type.compare(second.elements_type)
else:
return NeuralTypeComparisonResult.INCOMPATIBLE
dimensions_pass = NeuralType.__compare_axes(axes_a, axes_b)
element_comparison_result = self.elements_type.compare(second.elements_type)
# SAME DIMS
if dimensions_pass == 0:
return element_comparison_result
# TRANSPOSE_SAME DIMS
elif dimensions_pass == 1:
if element_comparison_result == NeuralTypeComparisonResult.SAME:
return NeuralTypeComparisonResult.TRANSPOSE_SAME
else:
return NeuralTypeComparisonResult.INCOMPATIBLE
# DIM_INCOMPATIBLE DIMS
elif dimensions_pass == 2:
if element_comparison_result == NeuralTypeComparisonResult.SAME:
return NeuralTypeComparisonResult.DIM_INCOMPATIBLE
else:
return NeuralTypeComparisonResult.INCOMPATIBLE
else:
return NeuralTypeComparisonResult.INCOMPATIBLE
def compare_and_raise_error(self, parent_type_name, port_name, second_object):
""" Method compares definition of one type with another and raises an error if not compatible. """
type_comatibility = self.compare(second_object)
if (
type_comatibility != NeuralTypeComparisonResult.SAME
and type_comatibility != NeuralTypeComparisonResult.GREATER
):
raise NeuralPortNmTensorMismatchError(
parent_type_name, port_name, str(self), str(second_object.ntype), type_comatibility
)
def __eq__(self, other):
if isinstance(other, NeuralType):
return self.compare(other)
return False
@staticmethod
def __check_sanity(axes):
# check that list come before any tensor dimension
are_strings = True
for axis in axes:
if not isinstance(axis, str):
are_strings = False
if isinstance(axis, str) and not are_strings:
raise ValueError("Either use full class names or all strings")
if are_strings:
return
checks_passed = True
saw_tensor_dim = False
for axis in axes:
if not axis.is_list:
saw_tensor_dim = True
else: # current axis is a list
if saw_tensor_dim: # which is preceded by tensor dim
checks_passed = False
if not checks_passed:
raise ValueError(
"You have list dimension after Tensor dimension. All list dimensions must preceed Tensor dimensions"
)
@staticmethod
def __compare_axes(axes_a, axes_b) -> int:
"""
Compares axes_a and axes_b
Args:
axes_a: first axes tuple
axes_b: second axes tuple
Returns:
0 - if they are exactly the same
1 - if they are "TRANSPOSE_SAME"
2 - if the are "DIM_INCOMPATIBLE"
3 - if they are different
"""
if axes_a is None and axes_b is None:
return 0
elif axes_a is None and axes_b is not None:
return 3
elif axes_a is not None and axes_b is None:
return 3
elif len(axes_a) != len(axes_b):
return 3
# After these ifs we know that len(axes_a) == len(axes_b)
same = True
kinds_a = dict()
kinds_b = dict()
for axis_a, axis_b in zip(axes_a, axes_b):
kinds_a[axis_a.kind] = axis_a.size
kinds_b[axis_b.kind] = axis_b.size
if axis_a.kind == AxisKind.Any:
same = True
elif (
axis_a.kind != axis_b.kind
or axis_a.is_list != axis_b.is_list
or (axis_a.size != axis_b.size and axis_a.size is not None)
):
same = False
if same:
return 0
else:
# can be TRANSPOSE_SAME, DIM_INCOMPATIBLE
if kinds_a.keys() == kinds_b.keys():
for key, value in kinds_a.items():
if kinds_b[key] != value:
return 2
return 1
else:
return 3
def __repr__(self):
if self.axes is not None:
axes = str(self.axes)
else:
axes = "None"
if self.elements_type is not None:
element_type = repr(self.elements_type)
else:
element_type = "None"
data = f"axis={axes}, element_type={element_type}"
if self.optional:
data = f"{data}, optional={self.optional}"
final = f"{self.__class__.__name__}({data})"
return final
class NeuralTypeError(Exception):
"""Base class for neural type related exceptions."""
class NeuralPortNameMismatchError(NeuralTypeError):
"""Exception raised when neural module is called with incorrect port
names."""
def __init__(self, input_port_name):
super().__init__()
self.message = "Wrong input port name: {0}".format(input_port_name)
class NeuralPortNmTensorMismatchError(NeuralTypeError):
"""Exception raised when a port is fed with a NmTensor of incompatible
type."""
def __init__(self, class_name, port_name, first_type, second_type, type_comatibility):
super().__init__()
self.message = "\nIn {}. \nPort: {} and a NmTensor it was fed are \n".format(class_name, port_name)
self.message += "of incompatible neural types:\n\n{} \n\n and \n\n{}".format(first_type, second_type)
self.message += "\n\nType comparison result: {}".format(type_comatibility)
|
NeMo-main
|
nemo/core/neural_types/neural_type.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.neural_types.axes import *
from nemo.core.neural_types.comparison import *
from nemo.core.neural_types.elements import *
from nemo.core.neural_types.neural_type import *
|
NeMo-main
|
nemo/core/neural_types/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
__all__ = ['NeuralTypeComparisonResult']
class NeuralTypeComparisonResult(Enum):
"""The result of comparing two neural type objects for compatibility.
When comparing A.compare_to(B):"""
SAME = 0
LESS = 1 # A is B
GREATER = 2 # B is A
DIM_INCOMPATIBLE = 3 # Resize connector might fix incompatibility
TRANSPOSE_SAME = 4 # A transpose and/or converting between lists and tensors will make them same
CONTAINER_SIZE_MISMATCH = 5 # A and B contain different number of elements
INCOMPATIBLE = 6 # A and B are incompatible
SAME_TYPE_INCOMPATIBLE_PARAMS = 7 # A and B are of the same type but parametrized differently
UNCHECKED = 8 # type comparison wasn't done
|
NeMo-main
|
nemo/core/neural_types/comparison.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from typing import Optional
__all__ = ['AxisKindAbstract', 'AxisKind', 'AxisType']
class AxisKindAbstract(Enum):
"""This is an abstract Enum to represents what does varying axis dimension mean.
In practice, you will almost always use AxisKind Enum. This Enum should be inherited by
your OWN Enum if you aren't satisfied with AxisKind. Then your own Enum can be used
instead of AxisKind."""
pass
class AxisKind(AxisKindAbstract):
"""This Enum represents what does varying axis dimension mean.
For example, does this dimension correspond to width, batch, time, etc.
The "Dimension" and "Channel" kinds are the same and used to represent
a general axis. "Any" axis will accept any axis kind fed to it.
"""
Batch = 0
Time = 1
Dimension = 2
Channel = 2
Width = 3
Height = 4
Any = 5
Sequence = 6
FlowGroup = 7
Singleton = 8 # Used to represent a axis that has size 1
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.name).lower()
def t_with_string(self, text):
# it checks if text is "t_<any string>"
return text.startswith("t_") and text.endswith("_") and text[2:-1] == self.__str__()
@staticmethod
def from_str(label):
"""Returns AxisKind instance based on short string representation"""
_label = label.lower().strip()
if _label == "b" or _label == "n" or _label == "batch":
return AxisKind.Batch
elif _label == "t" or _label == "time" or (len(_label) > 2 and _label.startswith("t_")):
return AxisKind.Time
elif _label == "d" or _label == "c" or _label == "channel":
return AxisKind.Dimension
elif _label == "w" or _label == "width":
return AxisKind.Width
elif _label == "h" or _label == "height":
return AxisKind.Height
elif _label == "s" or _label == "singleton":
return AxisKind.Singleton
elif _label == "seq" or _label == "sequence":
return AxisKind.Sequence
elif _label == "flowgroup":
return AxisKind.FlowGroup
elif _label == "any":
return AxisKind.Any
else:
raise ValueError(f"Can't create AxisKind from {label}")
class AxisType(object):
"""This class represents axis semantics and (optionally) it's dimensionality
Args:
kind (AxisKindAbstract): what kind of axis it is? For example Batch, Height, etc.
size (int, optional): specify if the axis should have a fixed size. By default it is set to None and you
typically do not want to set it for Batch and Time
is_list (bool, default=False): whether this is a list or a tensor axis
"""
def __init__(self, kind: AxisKindAbstract, size: Optional[int] = None, is_list=False):
if size is not None and is_list:
raise ValueError("The axis can't be list and have a fixed size")
self.kind = kind
self.size = size
self.is_list = is_list
def __repr__(self):
if self.size is None:
representation = str(self.kind)
else:
representation = f"{str(self.kind)}:{self.size}"
if self.is_list:
representation += "_listdim"
return representation
|
NeMo-main
|
nemo/core/neural_types/axes.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional
__all__ = ['Config']
@dataclass
class Config:
"""
Abstract NeMo Configuration class.
Args:
name: name of the module/dataset/loss/model object (used in serialization, DEFAULT: None)
"""
name: Optional[str] = None
|
NeMo-main
|
nemo/core/config/base_config.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.config.base_config import Config
from nemo.core.config.hydra_runner import hydra_runner
from nemo.core.config.optimizers import (
AdadeltaParams,
AdagradParams,
AdamaxParams,
AdamParams,
AdamWParams,
NovogradParams,
OptimizerParams,
RMSpropParams,
RpropParams,
SGDParams,
get_optimizer_config,
register_optimizer_params,
)
from nemo.core.config.pytorch import DataLoaderConfig
from nemo.core.config.pytorch_lightning import TrainerConfig
from nemo.core.config.schedulers import (
CosineAnnealingParams,
InverseSquareRootAnnealingParams,
NoamAnnealingParams,
PolynomialDecayAnnealingParams,
PolynomialHoldDecayAnnealingParams,
SchedulerParams,
SquareAnnealingParams,
SquareRootAnnealingParams,
SquareRootConstantSchedulerParams,
WarmupAnnealingParams,
WarmupHoldSchedulerParams,
WarmupSchedulerParams,
get_scheduler_config,
register_scheduler_params,
)
|
NeMo-main
|
nemo/core/config/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from functools import partial
from typing import Any, Dict, Optional, Tuple
from omegaconf import MISSING, OmegaConf
__all__ = [
'OptimizerParams',
'AdamParams',
'NovogradParams',
'SGDParams',
'AdadeltaParams',
'AdamaxParams',
'AdagradParams',
'AdamWParams',
'RMSpropParams',
'RpropParams',
]
@dataclass
class OptimizerParams:
"""
Base Optimizer params with no values. User can chose it to explicitly override via
command line arguments
"""
lr: Optional[float] = MISSING
@dataclass
class SGDParams(OptimizerParams):
"""
Default configuration for Adam optimizer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/optim.html?highlight=sgd#torch.optim.SGD
"""
momentum: float = 0
dampening: float = 0
weight_decay: float = 0
nesterov: bool = False
@dataclass
class AdamParams(OptimizerParams):
"""
Default configuration for Adam optimizer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/optim.html?highlight=adam#torch.optim.Adam
"""
# betas: Tuple[float, float] = (0.9, 0.999)
eps: float = 1e-08
weight_decay: float = 0
amsgrad: bool = False
@dataclass
class AdamWParams(OptimizerParams):
"""
Default configuration for AdamW optimizer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/optim.html#torch.optim.AdamW
"""
betas: Tuple[float, float] = (0.9, 0.999)
eps: float = 1e-08
weight_decay: float = 0
amsgrad: bool = False
@dataclass
class AdadeltaParams(OptimizerParams):
"""
Default configuration for Adadelta optimizer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/optim.html#torch.optim.Adadelta
"""
rho: float = 0.9
eps: float = 1e-6
weight_decay: float = 0
@dataclass
class AdamaxParams(OptimizerParams):
"""
Default configuration for Adamax optimizer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/optim.html#torch.optim.Adamax
"""
betas: Tuple[float, float] = (0.9, 0.999)
eps: float = 1e-8
weight_decay: float = 0
@dataclass
class AdagradParams(OptimizerParams):
"""
Default configuration for Adagrad optimizer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/optim.html#torch.optim.Adagrad
"""
lr_decay: float = 0
weight_decay: float = 0
initial_accumulator_value: float = 0
eps: float = 1e-10
@dataclass
class RMSpropParams(OptimizerParams):
"""
Default configuration for RMSprop optimizer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/optim.html#torch.optim.RMSprop
"""
alpha: float = 0.99
eps: float = 1e-8
weight_decay: float = 0
momentum: float = 0
centered: bool = False
@dataclass
class RpropParams(OptimizerParams):
"""
Default configuration for RpropParams optimizer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/optim.html#torch.optim.Rprop
"""
etas: Tuple[float, float] = (0.5, 1.2)
step_sizes: Tuple[float, float] = (1e-6, 50)
@dataclass
class NovogradParams(OptimizerParams):
"""
Configuration of the Novograd optimizer.
It has been proposed in "Stochastic Gradient Methods with Layer-wise
Adaptive Moments for Training of Deep Networks"
(https://arxiv.org/abs/1905.11286)
Args:
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and Beyond"
"""
betas: Tuple[float, float] = (0.95, 0.98)
eps: float = 1e-8
weight_decay: float = 0
grad_averaging: bool = False
amsgrad: bool = False
luc: bool = False
luc_trust: float = 1e-3
luc_eps: float = 1e-8
@dataclass
class AdafactorParams(OptimizerParams):
"""
Configuration of the Adafactor optimizer.
It has been proposed in "Adafactor: Adaptive Learning Rates with Sublinear Memory Cost"
(https://arxiv.org/abs/1804.04235)
Args:
lr (float, optional): learning rate (default: 1e-3)
beta1 (float, optional): coefficients used for computing
running averages of gradient and its square (default: None)
eps (Tuple [float, float] optional)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (float, optional): scale parameter (default: False)
relative_step (bool, optional): whether to use relative step sizes (default: False)
warmup_init (bool, optional): whether to warmup the learning rate linearly (default: False)
"""
beta1: float = None
eps: Tuple[float, float] = (1e-30, 1e-3)
clip_threshold: float = 1.0
decay_rate: float = 0.8
weight_decay: float = 0
scale_parameter: bool = True
relative_step: bool = False
warmup_init: bool = False
def register_optimizer_params(name: str, optimizer_params: OptimizerParams):
"""
Checks if the optimizer param name exists in the registry, and if it doesnt, adds it.
This allows custom optimizer params to be added and called by name during instantiation.
Args:
name: Name of the optimizer. Will be used as key to retrieve the optimizer.
optimizer_params: Optimizer class
"""
if name in AVAILABLE_OPTIMIZER_PARAMS:
raise ValueError(f"Cannot override pre-existing optimizers. Conflicting optimizer name = {name}")
AVAILABLE_OPTIMIZER_PARAMS[name] = optimizer_params
def get_optimizer_config(name: str, **kwargs: Optional[Dict[str, Any]]) -> OptimizerParams:
"""
Convenience method to obtain a OptimizerParams class and partially instantiate it with optimizer kwargs.
Args:
name: Name of the OptimizerParams in the registry.
kwargs: Optional kwargs of the optimizer used during instantiation.
Returns:
a partially instantiated OptimizerParams
"""
if name is None:
return kwargs
if name not in AVAILABLE_OPTIMIZER_PARAMS:
raise ValueError(
f"Cannot resolve optimizer parameters '{name}'. Available optimizer parameters are : "
f"{AVAILABLE_OPTIMIZER_PARAMS.keys()}"
)
scheduler_params = AVAILABLE_OPTIMIZER_PARAMS[name]
if kwargs is not None and len(kwargs) != 0:
kwargs = OmegaConf.create(kwargs)
OmegaConf.merge(scheduler_params(), kwargs)
scheduler_params = partial(scheduler_params, **kwargs)
return scheduler_params
AVAILABLE_OPTIMIZER_PARAMS = {
'optim_params': OptimizerParams,
'adam_params': AdamParams,
'novograd_params': NovogradParams,
'sgd_params': SGDParams,
'adadelta_params': AdadeltaParams,
'adamax_params': AdamaxParams,
'adagrad_params': AdagradParams,
'adamw_params': AdamWParams,
'rmsprop_params': RMSpropParams,
'rprop_params': RpropParams,
'adafactor_params': AdafactorParams,
}
|
NeMo-main
|
nemo/core/config/optimizers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Optional
from hydra.core.config_store import ConfigStore
__all__ = ['TrainerConfig']
cs = ConfigStore.instance()
@dataclass
class TrainerConfig:
"""
Configuration of PyTorch Lightning Trainer.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..warning:
Picked just few params of the PTL trainer for now. This needs to be discussed.
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html
"""
logger: Any = True
callbacks: Optional[Any] = None
default_root_dir: Optional[str] = None
gradient_clip_val: float = 0
num_nodes: int = 1
enable_progress_bar: bool = True
overfit_batches: Any = 0.0
check_val_every_n_epoch: int = 1
fast_dev_run: bool = False
accumulate_grad_batches: Any = 1
max_epochs: int = 1000
min_epochs: int = 1
max_steps: Optional[int] = -1
min_steps: Optional[int] = None
limit_train_batches: Any = 1.0
limit_val_batches: Any = 1.0
limit_test_batches: Any = 1.0
val_check_interval: Any = 1.0
log_every_n_steps: int = 50
accelerator: Optional[str] = None
sync_batchnorm: bool = False
precision: Any = 32
num_sanity_val_steps: int = 2
profiler: Optional[Any] = None
benchmark: bool = False
deterministic: bool = False
use_distributed_sampler: bool = True
detect_anomaly: bool = False
plugins: Optional[Any] = None # Optional[Union[str, list]]
limit_predict_batches: float = 1.0
gradient_clip_algorithm: str = 'norm'
max_time: Optional[Any] = None # can be one of Union[str, timedelta, Dict[str, int], None]
reload_dataloaders_every_n_epochs: int = 0
devices: Any = None
strategy: Any = None
enable_checkpointing: bool = False
enable_model_summary: bool = True
inference_mode: bool = True
barebones: bool = False
# Register the trainer config.
cs.store(
group="trainer", name="trainer", node=TrainerConfig,
)
|
NeMo-main
|
nemo/core/config/pytorch_lightning.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Optional
from omegaconf import MISSING
__all__ = ['DataLoaderConfig']
@dataclass
class DataLoaderConfig:
"""
Configuration of PyTorch DataLoader.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
..note:
For the details on the function/meanings of the arguments, please refer to:
https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader
"""
batch_size: int = MISSING
shuffle: bool = False
sampler: Optional[Any] = None
batch_sampler: Optional[Any] = None
num_workers: int = 0
collate_fn: Optional[Any] = None
pin_memory: bool = False
drop_last: bool = False
timeout: int = 0
worker_init_fn: Optional[Any] = None
multiprocessing_context: Optional[Any] = None
|
NeMo-main
|
nemo/core/config/pytorch.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.