code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import random
import time
from .ops import average_gradients, _variable_with_weight_decay, _variable_on_cpu
import phenograph
from sklearn.cluster import KMeans
def train_large(train_data_path,
file_num,
cell_size,
gene_size,
latent_code_dim,
exp_batch_idx=0,
use_mask=True,
batch_size=64,
max_epoch=100,
epoch_per_check=100,
T=2,
encoder_layers=[],
decoder_layers=[],
learning_rate=0.0001,
beta1=0.05,
num_gpus=1
):
'''
scScope training:
This function is used to train the scScope model on gene expression data
Parameters:
train_data_path: File path of multiple small gene expression files. Each file is a cell_size * gene_size matrix stored in *.npy format.
Files are named in "batch_0.npy", "batch_1.npy", ...
file_num: Number of gene expression files in "train_data_path".
cell_size: Cell numbers in each expression file. All files should include the same number of cells.
gene_size: Gene numbers in each expression file. All files should include the same number of genes.
exp_batch_idx: Number of experimental batches in the sequencing. if exp_batch_idx = 0, no batch information need to provide.
Otherwise, experimental batch labels are stored in "exp_batch_label_0.npy", "exp_batch_label_1.npy", ..., corresponding to each data batch file.
In each file, experimental batch labels are stored in an n * batch_num matrix in one-hot format. Experimental batch labels and data batch files
are in the same directory.
latent_code_dim: The feature dimension outputted by scScope.
batch_size: Number of cells used in each training iteration.
max_epoch: Maximal epoch used in training.
epoch_per_check: Step to display current loss.
T: Depth of recurrence used in deep learning framework.
use_mask: Flag indicating whether to use only non-zero entries in calculating losses.
learning_rate: Step length in gradient descending algorithm.
beta1: The beta1 parameter in AdamOptimizer.
num_gpus: Number of gpus used for training in parallel.
Output:
model: a dataframe of scScope outputs with keys:
'latent_code_session': tensorflow session used in training.
'test_input': tensorflow dataholder for test data.
'test_exp_batch_idx': tensorflow dataholder for experimental batch label.
'imputated_output': imputed gene expressions.
'latent_code': latent features by scScope.
'removed_batch_effect': correcting layer learning by scScope.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
batch_size = int(batch_size * num_gpus)
learning_rate = learning_rate * num_gpus
if exp_batch_idx == 0:
exp_batch_idx_input = np.zeros((cell_size, 1))
consider_exp_batch = False
else:
consider_exp_batch = True
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_data = tf.placeholder(
tf.float32, [batch_size, gene_size])
exp_batch_idx = tf.placeholder(tf.float32,
[batch_size, exp_batch_idx])
# Create an optimizer that performs gradient descent.
opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1)
# Calculate the gradients on models deployed on each GPU then summarized the gradients.
tower_grads = []
tower_grads2 = []
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpus):
print('Building Computational Graph on GPU-' + str(i))
with tf.device('/gpu:%d' % (i + 1)):
with tf.name_scope('%s_%d' % ('tower', i)) as scope:
itv = int(batch_size / num_gpus)
if i == 0:
re_use_flag = False
else:
re_use_flag = True
loss = tower_loss(scope,
train_data[(i) *
itv:(i + 1) * itv, :],
use_mask,
latent_code_dim,
T,
encoder_layers,
decoder_layers,
exp_batch_idx[(
i) * itv:(i + 1) * itv, :],
re_use_flag)
tf.get_variable_scope().reuse_variables()
t_vars = tf.trainable_variables()
inference_para = [
var for var in t_vars if 'inference' in var.name]
grads = opt.compute_gradients(loss, inference_para)
tower_grads.append(grads)
if consider_exp_batch:
exp_batch_effect_para = [
var for var in t_vars if 'batch_effect_removal' in var.name]
grads2 = opt.compute_gradients(
loss, exp_batch_effect_para)
tower_grads2.append(grads2)
# Summarize gradients from multiple GPUs.
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads)
train_op = apply_gradient_op
if consider_exp_batch:
grads2 = average_gradients(tower_grads2)
apply_gradient_op2 = opt.apply_gradients(grads2)
train_op2 = apply_gradient_op2
init = tf.global_variables_initializer()
# Configuration of GPU.
config_ = tf.ConfigProto()
config_.gpu_options.allow_growth = True
config_.allow_soft_placement = True
sess = tf.Session(config=config_)
sess.run(init)
reconstruction_error = []
start = time.time()
for step in range(1, max_epoch + 1):
for file_count in range(file_num):
train_data_real_val = np.load(
train_data_path + '/batch_' + str(file_count) + '.npy')
if exp_batch_idx > 0:
exp_batch_idx_input = np.load(
train_data_path + '/exp_batch_label_' + str(file_count) + '.npy')
total_data_size = np.shape(train_data_real_val)[0]
total_sample_list = list(range(total_data_size))
total_cnt = total_data_size / (batch_size)
for itr_cnt in range(int(total_cnt)):
sel_pos = random.sample(total_sample_list, batch_size)
cur_data = train_data_real_val[sel_pos, :]
cur_exp_batch_idx = exp_batch_idx_input[sel_pos, :]
sess.run(train_op,
feed_dict={train_data: cur_data,
exp_batch_idx: cur_exp_batch_idx})
if consider_exp_batch:
sess.run(train_op2,
feed_dict={train_data: cur_data,
exp_batch_idx: cur_exp_batch_idx})
if step % epoch_per_check == 0 and step > 0:
all_input = tf.placeholder(
tf.float32, [np.shape(train_data_real_val)[0], np.shape(train_data_real_val)[1]])
exp_batch_idx_all = tf.placeholder(
tf.float32, [np.shape(exp_batch_idx_input)[0], np.shape(exp_batch_idx_input)[1]])
layer_output, train_latent_code, _ = Inference(
all_input, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_idx_all, re_use=True)
train_code_val, layer_output_val = sess.run(
[train_latent_code[-1], layer_output[-1]],
feed_dict={all_input: train_data_real_val, exp_batch_idx_all: exp_batch_idx_input})
mask = np.sign(train_data_real_val)
recon_error = np.linalg.norm(np.multiply(mask, layer_output_val) - np.multiply(
mask, train_data_real_val)) / np.linalg.norm(np.multiply(mask, train_data_real_val))
reconstruction_error.append(recon_error)
print("Finisheded epoch:" + str(step))
print('Current reconstruction error is: ' + str(recon_error))
if len(reconstruction_error) >= 2:
if (abs(reconstruction_error[-1] - reconstruction_error[-2]) / reconstruction_error[-2] < 1e-3):
break
model = {}
test_data_holder = tf.placeholder(
tf.float32, [None, gene_size])
test_exp_batch_idx = tf.placeholder(
tf.float32, [None, exp_batch_idx])
test_layer_out, test_latent_code, removed_batch_effect = Inference(
test_data_holder, latent_code_dim, T, encoder_layers, decoder_layers, test_exp_batch_idx, re_use=True)
model['latent_code_session'] = sess
model['test_input'] = test_data_holder
model['test_exp_batch_idx'] = test_exp_batch_idx
model['imputated_output'] = test_layer_out
model['latent_code'] = test_latent_code
model['removed_batch_effect'] = removed_batch_effect
duration = time.time() - start
print('Finish training ' + str(len(train_data)) + ' samples after ' + str(
step) + ' epochs. The total training time is ' +
str(duration) + ' seconds.')
return model
def predict_large(train_data_path,
file_num,
model,
exp_batch_idx=0):
'''
Output the latent feature and imputed sequence for large scale dataset after training the model.
Parameters:
train_data_path: The same data path as in "train_large()".
file_num: Number of data files in train_data_path.
exp_batch_idx: Number of experimental batches in sequencing. If exp_batch_idx=0, the function is run without batch correction.
model: The pre-trained model by "train_large()".
Output:
Latent features and imputed genes for each data file.
For data file "batch_i.npy", corresponding latent features and imputed gene expressions are stored in
"feature_i.npy" and "imputation_i.npy" files respectively in the same directory.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
for file_count in range(file_num):
train_data = np.load(
train_data_path + '/batch_' + str(file_count) + '.npy')
if exp_batch_idx > 0:
batch_effect = np.load(
train_data_path + '/exp_batch_label_' + str(file_count) + '.npy')
else:
batch_effect = []
latent_fea, output_val, predicted_batch_effect = predict(
train_data, model, batch_effect=batch_effect)
np.save(train_data_path + '/feature_' +
str(file_count) + '.npy', latent_fea)
np.save(train_data_path + '/imputation_' +
str(file_count) + '.npy', output_val)
def predict(test_data, model, batch_effect=[]):
'''
Make predications using the learned scScope model.
Parameter:
test_data: gene expression matrix need to make prediction.
model: pre-trained scScope model.
Output:
latent_fea: scScope features for inputted gene expressions.
output_val: gene expressions with imputations.
predicted_batch_effect: batch effects inferenced by scScope, if experimental batches exist.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
sess = model['latent_code_session']
test_data_holder = model['test_input']
test_exp_batch_idx_holder = model['test_exp_batch_idx']
output = model['imputated_output']
latent_code = model['latent_code']
removed_batch_effect = model['removed_batch_effect']
if len(batch_effect) == 0:
batch_effect_idx = np.zeros((np.shape(test_data)[0], 1))
else:
batch_effect_idx = batch_effect
for i in range(len(latent_code)):
latent_code_val, output_val, predicted_batch_effect = sess.run(
[latent_code[i], output[i], removed_batch_effect], feed_dict={
test_data_holder: test_data, test_exp_batch_idx_holder: batch_effect_idx})
if i == 0:
latent_fea = latent_code_val
output_total = output_val
else:
latent_fea = np.concatenate([latent_fea, latent_code_val], 1)
output_total = output_total + output_val
output_val = output_total / len(latent_code)
return latent_fea, output_val, predicted_batch_effect
def Inference(input_d, latent_code_dim, T, encoder_layers, decoder_layer, exp_batch_idx=[], re_use=False):
'''
The deep neural network structure of scScope
Parameters:
input_d: gene expression matrix of dim n * m; n = number of cells, m = number of genes.
latent_code_dim: the dimension of features outputted by scScope.
T: number of recurrent structures used in deep learning framework.
encoder_layers:
decoder_layer:
exp_batch_idx: if provided, experimental batch labels are stored in an n * batch_num matrix in one-hot format.
re_use: if re-use variables in training.
Output:
output_list: outputs of decoder (y_c in the paper) in T recurrent structures.
latent_code_list: latent representations (h_c in the paper) in T recurrent structures.
batch_effect_removal_layer: experimental batch effects inferred by scScope.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
input_shape = input_d.get_shape().as_list()
input_dim = input_shape[1]
with tf.variable_scope('scScope') as scope_all:
if re_use == True:
scope_all.reuse_variables()
latent_code_list = []
output_list = []
exp_batch_id_shape = exp_batch_idx.get_shape().as_list()
exp_batch_dim = exp_batch_id_shape[1]
with tf.variable_scope('batch_effect_removal'):
batch_effect_para_weight = _variable_with_weight_decay('batch_effect_weight',
[exp_batch_dim,
input_dim],
stddev=0, wd=0)
batch_effect_removal_layer = tf.matmul(
exp_batch_idx, batch_effect_para_weight)
with tf.variable_scope('inference'):
for i in range(T):
if i == 0:
encoder_layer_list_W = []
encoder_layer_list_b = []
if len(encoder_layers) > 0:
for l in range(len(encoder_layers)):
if l == 0:
encoder_layer_list_W.append(_variable_with_weight_decay('encoder_layer' + str(l),
[input_dim,
encoder_layers[l]],
stddev=0.1, wd=0))
encoder_layer_list_b.append(
_variable_on_cpu('encoder_layer_bias' + str(l), [encoder_layers[l]],
tf.constant_initializer(0)))
else:
encoder_layer_list_W.append(_variable_with_weight_decay('encoder_layer' + str(l),
[encoder_layers[l - 1],
encoder_layers[l]],
stddev=0.1, wd=0))
encoder_layer_list_b.append(
_variable_on_cpu('encoder_layer_bias' + str(l), [encoder_layers[l]],
tf.constant_initializer(0)))
latent_code_layer_input_dim = encoder_layers[-1]
else:
latent_code_layer_input_dim = input_dim
W_fea = _variable_with_weight_decay('latent_layer_weights',
[latent_code_layer_input_dim,
latent_code_dim],
stddev=0.1, wd=0)
b_fea = _variable_on_cpu('latent_layer_bias', [latent_code_dim],
tf.constant_initializer(0))
decoder_layer_list_W = []
decoder_layer_list_b = []
if len(decoder_layer) > 0:
for l in range(len(decoder_layer)):
if l == 0:
decoder_layer_list_W.append(_variable_with_weight_decay('dencoder_layer' + str(l),
[latent_code_dim,
decoder_layer[l]],
stddev=0.1, wd=0))
decoder_layer_list_b.append(
_variable_on_cpu('decoder_layer_bias' + str(l), [decoder_layer[l]],
tf.constant_initializer(0)))
else:
decoder_layer_list_W.append(_variable_with_weight_decay('dencoder_layer' + str(l),
[decoder_layer[l - 1],
decoder_layer[l]],
stddev=0.1, wd=0))
decoder_layer_list_b.append(
_variable_on_cpu('decoder_layer_bias' + str(l), [decoder_layer[l]],
tf.constant_initializer(0)))
decoder_last_layer_dim = decoder_layer[-1]
else:
decoder_last_layer_dim = latent_code_dim
W_recon = _variable_with_weight_decay('reconstruction_layer_weights',
[decoder_last_layer_dim,
input_dim],
stddev=0.1, wd=0)
b_recon = _variable_on_cpu('reconstruction_layer_bias', [input_dim],
tf.constant_initializer(0))
input_vec = tf.nn.relu(
input_d - batch_effect_removal_layer)
else:
if i == 1:
W_feedback_1 = _variable_with_weight_decay('impute_layer_weights',
[input_dim, 64],
stddev=0.1, wd=0)
b_feedback_1 = _variable_on_cpu(
'impute_layer_bias', [64], tf.constant_initializer(0))
W_feedback_2 = _variable_with_weight_decay('impute_layer_weights2',
[64, input_dim],
stddev=0.1, wd=0)
b_feedback_2 = _variable_on_cpu(
'impute_layer_bias2', [input_dim], tf.constant_initializer(0))
intermediate_layer = tf.nn.relu(
tf.matmul(output, W_feedback_1) + b_feedback_1)
imputation_layer = tf.multiply(
1 - tf.sign(input_d), (tf.matmul(intermediate_layer, W_feedback_2) + b_feedback_2))
input_vec = tf.nn.relu(
imputation_layer + input_d - batch_effect_removal_layer)
intermedate_encoder_layer_list = []
if len(encoder_layer_list_W) > 0:
for i in range(len(encoder_layer_list_W)):
if i == 0:
intermedate_encoder_layer_list.append(tf.nn.relu(
tf.matmul(input_vec, encoder_layer_list_W[i]) + encoder_layer_list_b[i]))
else:
intermedate_encoder_layer_list.append(tf.nn.relu(tf.matmul(
intermedate_encoder_layer_list[-1], encoder_layer_list_W[i]) + encoder_layer_list_b[i]))
intermedate_encoder_layer = intermedate_encoder_layer_list[-1]
else:
intermedate_encoder_layer = input_vec
latent_code = tf.nn.relu(
tf.matmul(intermedate_encoder_layer, W_fea) + b_fea)
inter_decoder_layer_list = []
if len(decoder_layer_list_W) > 0:
for i in range(len(decoder_layer_list_W)):
if i == 0:
inter_decoder_layer_list.append(tf.nn.relu(
tf.matmul(latent_code, decoder_layer_list_W[i]) + decoder_layer_list_b[i]))
else:
inter_decoder_layer_list.append(tf.nn.relu(tf.matmul(
inter_decoder_layer_list[-1], decoder_layer_list_W[i]) + decoder_layer_list_b[i]))
inter_decoder_layer = inter_decoder_layer_list[-1]
else:
inter_decoder_layer = latent_code
output = tf.nn.relu(
tf.matmul(inter_decoder_layer, W_recon) + b_recon)
latent_code_list.append(latent_code)
output_list.append(output)
return output_list, latent_code_list, batch_effect_removal_layer
def tower_loss(scope, batch_data, use_mask, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_id,
re_use_flag):
'''
Overall losses of scScope on multiple GPUs.
Parameter:
scope: tensorflow name scope
batch_data: cell batch for calculating the loss
use_mask: flag indicating only use non-zero genes to calculate losses.
latent_code_dim: the dimension of features outputted by scScope.
T: number of recurrent structures used in deep learning framework.
encoder_layers:
decoder_layers:
exp_batch_id:
re_use_flag: if re-use variables in training.
Output:
total_loss: total loss of multiple GPUs.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
layer_out, latent_code, batch_effect_removal_layer = Inference(
batch_data, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_id, re_use=re_use_flag)
_ = Cal_Loss(layer_out, batch_data, use_mask, batch_effect_removal_layer)
losses = tf.get_collection('losses', scope)
total_loss = tf.add_n(losses, name='total_loss')
return total_loss
def Cal_Loss(outpout_layer_list, input_data, use_mask, removed_exp_batch_effect):
'''
Loss function of scScope.
Parameter:
outpout_layer_list: encoder output of T recurrent structures in scScope.
input_data: original gene expression matrix inputted into scScope.
use_mask: flag indicating only use non-zero genes to calculate losses.
removed_exp_batch_effect:
Output:
acc_loss: loss function value.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
input_data_corrected = input_data - removed_exp_batch_effect
if use_mask:
val_mask = tf.sign(input_data_corrected)
else:
val_mask = tf.sign(input_data_corrected + 1)
for i in range(len(outpout_layer_list)):
layer_out = outpout_layer_list[i]
if i == 0:
reconstruct_loss = tf.reduce_mean(
tf.norm(tf.multiply(val_mask, (layer_out - input_data_corrected))))
else:
reconstruct_loss = reconstruct_loss + \
tf.reduce_mean(
tf.norm(tf.multiply(val_mask, (layer_out - input_data_corrected))))
acc_loss = reconstruct_loss
tf.add_to_collection('losses', acc_loss)
return acc_loss
def scalable_cluster(latent_code,
kmeans_num=500,
cluster_num=400,
display_step=50,
phenograh_neighbor=30
):
'''
Scalable cluster:
To perform graph clustering on large-scale data, we designed a scalable clustering strategy by combining k-means and PhenoGraph.
Briefly, we divide cells into M (kmeans_num) groups of equal size and perform K-means (cluster_num) clustering on each group independently.
The whole dataset is split to M×K clusters and we only input the cluster centroids into PhenoGraph for graph clustering.
Finally, each cell is assigned to graph clusters according to the cluster labels of its nearest centroids.
Parameters:
latent_code: n*m matrix; n = number of cells, m = dimension of feature representation.
kmeans_num: number of independent K-means clusterings used. This is also the subset number.
cluster_num: cluster number for each K-means clustering. This is also the "n_clusters" in KMeans function in sklearn package.
display_step: displaying the process of K-means clustering.
phenograh_neighbor: "k" parameter in PhenoGraph package.
Output:
label: Cluster labels for input cells.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
print('Scalable clustering:')
print('Use %d subsets of cells for initially clustering...' % kmeans_num)
stamp = np.floor(np.linspace(0, latent_code.shape[0], kmeans_num + 1))
stamp = stamp.astype(int)
cluster_ceter = np.zeros([kmeans_num * cluster_num, latent_code.shape[1]])
mapping_sample_kmeans = np.zeros(latent_code.shape[0])
for i in range(kmeans_num):
low_bound = stamp[i]
upp_bound = stamp[i + 1]
sample_range = np.arange(low_bound, upp_bound)
select_sample = latent_code[sample_range, :]
kmeans = KMeans(n_clusters=cluster_num,
random_state=0).fit(select_sample)
label = kmeans.labels_
for j in range(cluster_num):
cluster_sample_idx = np.nonzero(label == j)[0]
cluster_sample = select_sample[cluster_sample_idx, :]
cluster_ceter[i * cluster_num + j,
:] = np.mean(cluster_sample, axis=0)
mapping_sample_kmeans[sample_range[cluster_sample_idx]
] = i * cluster_num + j
if i % display_step == 0:
print('\tK-means clustering for %d subset.' % i)
print('Finish intially clustering by K-means.')
print('Start PhenoGraph clustering...\n')
label_pheno, graph, Q = phenograph.cluster(
cluster_ceter, k=phenograh_neighbor, n_jobs=1)
label = np.zeros(latent_code.shape[0])
for i in range(label_pheno.max() + 1):
center_index = np.nonzero(label_pheno == i)[0]
for j in center_index:
sample_index = np.nonzero(mapping_sample_kmeans == j)[
0] # samples belong to this center
label[sample_index] = i
print('Finish density down-sampling clustering.')
return label
|
/scScope_cpu-0.1.5.tar.gz/scScope_cpu-0.1.5/scscope/large_scale_processing.py
| 0.773131 | 0.416797 |
large_scale_processing.py
|
pypi
|
import tensorflow as tf
def _variable_with_weight_decay(name, shape, stddev, wd):
"""
Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
"""
dtype = tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd != 0:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(
name, shape, initializer=initializer, dtype=dtype)
return var
def average_gradients(tower_grads):
""" Summarize the gradient calculated by each GPU.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, var1 in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
/scScope-0.1.5.tar.gz/scScope-0.1.5/scscope/ops.py
| 0.844409 | 0.578865 |
ops.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import random
import time
from .ops import average_gradients, _variable_with_weight_decay, _variable_on_cpu
import phenograph
from sklearn.cluster import KMeans
def train_large(train_data_path,
file_num,
cell_size,
gene_size,
latent_code_dim,
exp_batch_idx=0,
use_mask=True,
batch_size=64,
max_epoch=100,
epoch_per_check=100,
T=2,
encoder_layers=[],
decoder_layers=[],
learning_rate=0.0001,
beta1=0.05,
num_gpus=1
):
'''
scScope training:
This function is used to train the scScope model on gene expression data
Parameters:
train_data_path: File path of multiple small gene expression files. Each file is a cell_size * gene_size matrix stored in *.npy format.
Files are named in "batch_0.npy", "batch_1.npy", ...
file_num: Number of gene expression files in "train_data_path".
cell_size: Cell numbers in each expression file. All files should include the same number of cells.
gene_size: Gene numbers in each expression file. All files should include the same number of genes.
exp_batch_idx: Number of experimental batches in the sequencing. if exp_batch_idx = 0, no batch information need to provide.
Otherwise, experimental batch labels are stored in "exp_batch_label_0.npy", "exp_batch_label_1.npy", ..., corresponding to each data batch file.
In each file, experimental batch labels are stored in an n * batch_num matrix in one-hot format. Experimental batch labels and data batch files
are in the same directory.
latent_code_dim: The feature dimension outputted by scScope.
batch_size: Number of cells used in each training iteration.
max_epoch: Maximal epoch used in training.
epoch_per_check: Step to display current loss.
T: Depth of recurrence used in deep learning framework.
use_mask: Flag indicating whether to use only non-zero entries in calculating losses.
learning_rate: Step length in gradient descending algorithm.
beta1: The beta1 parameter in AdamOptimizer.
num_gpus: Number of gpus used for training in parallel.
Output:
model: a dataframe of scScope outputs with keys:
'latent_code_session': tensorflow session used in training.
'test_input': tensorflow dataholder for test data.
'test_exp_batch_idx': tensorflow dataholder for experimental batch label.
'imputated_output': imputed gene expressions.
'latent_code': latent features by scScope.
'removed_batch_effect': correcting layer learning by scScope.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
batch_size = int(batch_size * num_gpus)
learning_rate = learning_rate * num_gpus
if exp_batch_idx == 0:
exp_batch_idx_input = np.zeros((cell_size, 1))
consider_exp_batch = False
else:
consider_exp_batch = True
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_data = tf.placeholder(
tf.float32, [batch_size, gene_size])
exp_batch_idx = tf.placeholder(tf.float32,
[batch_size, exp_batch_idx])
# Create an optimizer that performs gradient descent.
opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1)
# Calculate the gradients on models deployed on each GPU then summarized the gradients.
tower_grads = []
tower_grads2 = []
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpus):
print('Building Computational Graph on GPU-' + str(i))
with tf.device('/gpu:%d' % (i + 1)):
with tf.name_scope('%s_%d' % ('tower', i)) as scope:
itv = int(batch_size / num_gpus)
if i == 0:
re_use_flag = False
else:
re_use_flag = True
loss = tower_loss(scope,
train_data[(i) *
itv:(i + 1) * itv, :],
use_mask,
latent_code_dim,
T,
encoder_layers,
decoder_layers,
exp_batch_idx[(
i) * itv:(i + 1) * itv, :],
re_use_flag)
tf.get_variable_scope().reuse_variables()
t_vars = tf.trainable_variables()
inference_para = [
var for var in t_vars if 'inference' in var.name]
grads = opt.compute_gradients(loss, inference_para)
tower_grads.append(grads)
if consider_exp_batch:
exp_batch_effect_para = [
var for var in t_vars if 'batch_effect_removal' in var.name]
grads2 = opt.compute_gradients(
loss, exp_batch_effect_para)
tower_grads2.append(grads2)
# Summarize gradients from multiple GPUs.
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads)
train_op = apply_gradient_op
if consider_exp_batch:
grads2 = average_gradients(tower_grads2)
apply_gradient_op2 = opt.apply_gradients(grads2)
train_op2 = apply_gradient_op2
init = tf.global_variables_initializer()
# Configuration of GPU.
config_ = tf.ConfigProto()
config_.gpu_options.allow_growth = True
config_.allow_soft_placement = True
sess = tf.Session(config=config_)
sess.run(init)
reconstruction_error = []
start = time.time()
for step in range(1, max_epoch + 1):
for file_count in range(file_num):
train_data_real_val = np.load(
train_data_path + '/batch_' + str(file_count) + '.npy')
if exp_batch_idx > 0:
exp_batch_idx_input = np.load(
train_data_path + '/exp_batch_label_' + str(file_count) + '.npy')
total_data_size = np.shape(train_data_real_val)[0]
total_sample_list = list(range(total_data_size))
total_cnt = total_data_size / (batch_size)
for itr_cnt in range(int(total_cnt)):
sel_pos = random.sample(total_sample_list, batch_size)
cur_data = train_data_real_val[sel_pos, :]
cur_exp_batch_idx = exp_batch_idx_input[sel_pos, :]
sess.run(train_op,
feed_dict={train_data: cur_data,
exp_batch_idx: cur_exp_batch_idx})
if consider_exp_batch:
sess.run(train_op2,
feed_dict={train_data: cur_data,
exp_batch_idx: cur_exp_batch_idx})
if step % epoch_per_check == 0 and step > 0:
all_input = tf.placeholder(
tf.float32, [np.shape(train_data_real_val)[0], np.shape(train_data_real_val)[1]])
exp_batch_idx_all = tf.placeholder(
tf.float32, [np.shape(exp_batch_idx_input)[0], np.shape(exp_batch_idx_input)[1]])
layer_output, train_latent_code, _ = Inference(
all_input, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_idx_all, re_use=True)
train_code_val, layer_output_val = sess.run(
[train_latent_code[-1], layer_output[-1]],
feed_dict={all_input: train_data_real_val, exp_batch_idx_all: exp_batch_idx_input})
mask = np.sign(train_data_real_val)
recon_error = np.linalg.norm(np.multiply(mask, layer_output_val) - np.multiply(
mask, train_data_real_val)) / np.linalg.norm(np.multiply(mask, train_data_real_val))
reconstruction_error.append(recon_error)
print("Finisheded epoch:" + str(step))
print('Current reconstruction error is: ' + str(recon_error))
if len(reconstruction_error) >= 2:
if (abs(reconstruction_error[-1] - reconstruction_error[-2]) / reconstruction_error[-2] < 1e-3):
break
model = {}
test_data_holder = tf.placeholder(
tf.float32, [None, gene_size])
test_exp_batch_idx = tf.placeholder(
tf.float32, [None, exp_batch_idx])
test_layer_out, test_latent_code, removed_batch_effect = Inference(
test_data_holder, latent_code_dim, T, encoder_layers, decoder_layers, test_exp_batch_idx, re_use=True)
model['latent_code_session'] = sess
model['test_input'] = test_data_holder
model['test_exp_batch_idx'] = test_exp_batch_idx
model['imputated_output'] = test_layer_out
model['latent_code'] = test_latent_code
model['removed_batch_effect'] = removed_batch_effect
duration = time.time() - start
print('Finish training ' + str(len(train_data)) + ' samples after ' + str(
step) + ' epochs. The total training time is ' +
str(duration) + ' seconds.')
return model
def predict_large(train_data_path,
file_num,
model,
exp_batch_idx=0):
'''
Output the latent feature and imputed sequence for large scale dataset after training the model.
Parameters:
train_data_path: The same data path as in "train_large()".
file_num: Number of data files in train_data_path.
exp_batch_idx: Number of experimental batches in sequencing. If exp_batch_idx=0, the function is run without batch correction.
model: The pre-trained model by "train_large()".
Output:
Latent features and imputed genes for each data file.
For data file "batch_i.npy", corresponding latent features and imputed gene expressions are stored in
"feature_i.npy" and "imputation_i.npy" files respectively in the same directory.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
for file_count in range(file_num):
train_data = np.load(
train_data_path + '/batch_' + str(file_count) + '.npy')
if exp_batch_idx > 0:
batch_effect = np.load(
train_data_path + '/exp_batch_label_' + str(file_count) + '.npy')
else:
batch_effect = []
latent_fea, output_val, predicted_batch_effect = predict(
train_data, model, batch_effect=batch_effect)
np.save(train_data_path + '/feature_' +
str(file_count) + '.npy', latent_fea)
np.save(train_data_path + '/imputation_' +
str(file_count) + '.npy', output_val)
def predict(test_data, model, batch_effect=[]):
'''
Make predications using the learned scScope model.
Parameter:
test_data: gene expression matrix need to make prediction.
model: pre-trained scScope model.
Output:
latent_fea: scScope features for inputted gene expressions.
output_val: gene expressions with imputations.
predicted_batch_effect: batch effects inferenced by scScope, if experimental batches exist.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
sess = model['latent_code_session']
test_data_holder = model['test_input']
test_exp_batch_idx_holder = model['test_exp_batch_idx']
output = model['imputated_output']
latent_code = model['latent_code']
removed_batch_effect = model['removed_batch_effect']
if len(batch_effect) == 0:
batch_effect_idx = np.zeros((np.shape(test_data)[0], 1))
else:
batch_effect_idx = batch_effect
for i in range(len(latent_code)):
latent_code_val, output_val, predicted_batch_effect = sess.run(
[latent_code[i], output[i], removed_batch_effect], feed_dict={
test_data_holder: test_data, test_exp_batch_idx_holder: batch_effect_idx})
if i == 0:
latent_fea = latent_code_val
output_total = output_val
else:
latent_fea = np.concatenate([latent_fea, latent_code_val], 1)
output_total = output_total + output_val
output_val = output_total / len(latent_code)
return latent_fea, output_val, predicted_batch_effect
def Inference(input_d, latent_code_dim, T, encoder_layers, decoder_layer, exp_batch_idx=[], re_use=False):
'''
The deep neural network structure of scScope
Parameters:
input_d: gene expression matrix of dim n * m; n = number of cells, m = number of genes.
latent_code_dim: the dimension of features outputted by scScope.
T: number of recurrent structures used in deep learning framework.
encoder_layers:
decoder_layer:
exp_batch_idx: if provided, experimental batch labels are stored in an n * batch_num matrix in one-hot format.
re_use: if re-use variables in training.
Output:
output_list: outputs of decoder (y_c in the paper) in T recurrent structures.
latent_code_list: latent representations (h_c in the paper) in T recurrent structures.
batch_effect_removal_layer: experimental batch effects inferred by scScope.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
input_shape = input_d.get_shape().as_list()
input_dim = input_shape[1]
with tf.variable_scope('scScope') as scope_all:
if re_use == True:
scope_all.reuse_variables()
latent_code_list = []
output_list = []
exp_batch_id_shape = exp_batch_idx.get_shape().as_list()
exp_batch_dim = exp_batch_id_shape[1]
with tf.variable_scope('batch_effect_removal'):
batch_effect_para_weight = _variable_with_weight_decay('batch_effect_weight',
[exp_batch_dim,
input_dim],
stddev=0, wd=0)
batch_effect_removal_layer = tf.matmul(
exp_batch_idx, batch_effect_para_weight)
with tf.variable_scope('inference'):
for i in range(T):
if i == 0:
encoder_layer_list_W = []
encoder_layer_list_b = []
if len(encoder_layers) > 0:
for l in range(len(encoder_layers)):
if l == 0:
encoder_layer_list_W.append(_variable_with_weight_decay('encoder_layer' + str(l),
[input_dim,
encoder_layers[l]],
stddev=0.1, wd=0))
encoder_layer_list_b.append(
_variable_on_cpu('encoder_layer_bias' + str(l), [encoder_layers[l]],
tf.constant_initializer(0)))
else:
encoder_layer_list_W.append(_variable_with_weight_decay('encoder_layer' + str(l),
[encoder_layers[l - 1],
encoder_layers[l]],
stddev=0.1, wd=0))
encoder_layer_list_b.append(
_variable_on_cpu('encoder_layer_bias' + str(l), [encoder_layers[l]],
tf.constant_initializer(0)))
latent_code_layer_input_dim = encoder_layers[-1]
else:
latent_code_layer_input_dim = input_dim
W_fea = _variable_with_weight_decay('latent_layer_weights',
[latent_code_layer_input_dim,
latent_code_dim],
stddev=0.1, wd=0)
b_fea = _variable_on_cpu('latent_layer_bias', [latent_code_dim],
tf.constant_initializer(0))
decoder_layer_list_W = []
decoder_layer_list_b = []
if len(decoder_layer) > 0:
for l in range(len(decoder_layer)):
if l == 0:
decoder_layer_list_W.append(_variable_with_weight_decay('dencoder_layer' + str(l),
[latent_code_dim,
decoder_layer[l]],
stddev=0.1, wd=0))
decoder_layer_list_b.append(
_variable_on_cpu('decoder_layer_bias' + str(l), [decoder_layer[l]],
tf.constant_initializer(0)))
else:
decoder_layer_list_W.append(_variable_with_weight_decay('dencoder_layer' + str(l),
[decoder_layer[l - 1],
decoder_layer[l]],
stddev=0.1, wd=0))
decoder_layer_list_b.append(
_variable_on_cpu('decoder_layer_bias' + str(l), [decoder_layer[l]],
tf.constant_initializer(0)))
decoder_last_layer_dim = decoder_layer[-1]
else:
decoder_last_layer_dim = latent_code_dim
W_recon = _variable_with_weight_decay('reconstruction_layer_weights',
[decoder_last_layer_dim,
input_dim],
stddev=0.1, wd=0)
b_recon = _variable_on_cpu('reconstruction_layer_bias', [input_dim],
tf.constant_initializer(0))
input_vec = tf.nn.relu(
input_d - batch_effect_removal_layer)
else:
if i == 1:
W_feedback_1 = _variable_with_weight_decay('impute_layer_weights',
[input_dim, 64],
stddev=0.1, wd=0)
b_feedback_1 = _variable_on_cpu(
'impute_layer_bias', [64], tf.constant_initializer(0))
W_feedback_2 = _variable_with_weight_decay('impute_layer_weights2',
[64, input_dim],
stddev=0.1, wd=0)
b_feedback_2 = _variable_on_cpu(
'impute_layer_bias2', [input_dim], tf.constant_initializer(0))
intermediate_layer = tf.nn.relu(
tf.matmul(output, W_feedback_1) + b_feedback_1)
imputation_layer = tf.multiply(
1 - tf.sign(input_d), (tf.matmul(intermediate_layer, W_feedback_2) + b_feedback_2))
input_vec = tf.nn.relu(
imputation_layer + input_d - batch_effect_removal_layer)
intermedate_encoder_layer_list = []
if len(encoder_layer_list_W) > 0:
for i in range(len(encoder_layer_list_W)):
if i == 0:
intermedate_encoder_layer_list.append(tf.nn.relu(
tf.matmul(input_vec, encoder_layer_list_W[i]) + encoder_layer_list_b[i]))
else:
intermedate_encoder_layer_list.append(tf.nn.relu(tf.matmul(
intermedate_encoder_layer_list[-1], encoder_layer_list_W[i]) + encoder_layer_list_b[i]))
intermedate_encoder_layer = intermedate_encoder_layer_list[-1]
else:
intermedate_encoder_layer = input_vec
latent_code = tf.nn.relu(
tf.matmul(intermedate_encoder_layer, W_fea) + b_fea)
inter_decoder_layer_list = []
if len(decoder_layer_list_W) > 0:
for i in range(len(decoder_layer_list_W)):
if i == 0:
inter_decoder_layer_list.append(tf.nn.relu(
tf.matmul(latent_code, decoder_layer_list_W[i]) + decoder_layer_list_b[i]))
else:
inter_decoder_layer_list.append(tf.nn.relu(tf.matmul(
inter_decoder_layer_list[-1], decoder_layer_list_W[i]) + decoder_layer_list_b[i]))
inter_decoder_layer = inter_decoder_layer_list[-1]
else:
inter_decoder_layer = latent_code
output = tf.nn.relu(
tf.matmul(inter_decoder_layer, W_recon) + b_recon)
latent_code_list.append(latent_code)
output_list.append(output)
return output_list, latent_code_list, batch_effect_removal_layer
def tower_loss(scope, batch_data, use_mask, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_id,
re_use_flag):
'''
Overall losses of scScope on multiple GPUs.
Parameter:
scope: tensorflow name scope
batch_data: cell batch for calculating the loss
use_mask: flag indicating only use non-zero genes to calculate losses.
latent_code_dim: the dimension of features outputted by scScope.
T: number of recurrent structures used in deep learning framework.
encoder_layers:
decoder_layers:
exp_batch_id:
re_use_flag: if re-use variables in training.
Output:
total_loss: total loss of multiple GPUs.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
layer_out, latent_code, batch_effect_removal_layer = Inference(
batch_data, latent_code_dim, T, encoder_layers, decoder_layers, exp_batch_id, re_use=re_use_flag)
_ = Cal_Loss(layer_out, batch_data, use_mask, batch_effect_removal_layer)
losses = tf.get_collection('losses', scope)
total_loss = tf.add_n(losses, name='total_loss')
return total_loss
def Cal_Loss(outpout_layer_list, input_data, use_mask, removed_exp_batch_effect):
'''
Loss function of scScope.
Parameter:
outpout_layer_list: encoder output of T recurrent structures in scScope.
input_data: original gene expression matrix inputted into scScope.
use_mask: flag indicating only use non-zero genes to calculate losses.
removed_exp_batch_effect:
Output:
acc_loss: loss function value.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
input_data_corrected = input_data - removed_exp_batch_effect
if use_mask:
val_mask = tf.sign(input_data_corrected)
else:
val_mask = tf.sign(input_data_corrected + 1)
for i in range(len(outpout_layer_list)):
layer_out = outpout_layer_list[i]
if i == 0:
reconstruct_loss = tf.reduce_mean(
tf.norm(tf.multiply(val_mask, (layer_out - input_data_corrected))))
else:
reconstruct_loss = reconstruct_loss + \
tf.reduce_mean(
tf.norm(tf.multiply(val_mask, (layer_out - input_data_corrected))))
acc_loss = reconstruct_loss
tf.add_to_collection('losses', acc_loss)
return acc_loss
def scalable_cluster(latent_code,
kmeans_num=500,
cluster_num=400,
display_step=50,
phenograh_neighbor=30
):
'''
Scalable cluster:
To perform graph clustering on large-scale data, we designed a scalable clustering strategy by combining k-means and PhenoGraph.
Briefly, we divide cells into M (kmeans_num) groups of equal size and perform K-means (cluster_num) clustering on each group independently.
The whole dataset is split to M×K clusters and we only input the cluster centroids into PhenoGraph for graph clustering.
Finally, each cell is assigned to graph clusters according to the cluster labels of its nearest centroids.
Parameters:
latent_code: n*m matrix; n = number of cells, m = dimension of feature representation.
kmeans_num: number of independent K-means clusterings used. This is also the subset number.
cluster_num: cluster number for each K-means clustering. This is also the "n_clusters" in KMeans function in sklearn package.
display_step: displaying the process of K-means clustering.
phenograh_neighbor: "k" parameter in PhenoGraph package.
Output:
label: Cluster labels for input cells.
Altschuler & Wu Lab 2018.
Software provided as is under Apache License 2.0.
'''
print('Scalable clustering:')
print('Use %d subsets of cells for initially clustering...' % kmeans_num)
stamp = np.floor(np.linspace(0, latent_code.shape[0], kmeans_num + 1))
stamp = stamp.astype(int)
cluster_ceter = np.zeros([kmeans_num * cluster_num, latent_code.shape[1]])
mapping_sample_kmeans = np.zeros(latent_code.shape[0])
for i in range(kmeans_num):
low_bound = stamp[i]
upp_bound = stamp[i + 1]
sample_range = np.arange(low_bound, upp_bound)
select_sample = latent_code[sample_range, :]
kmeans = KMeans(n_clusters=cluster_num,
random_state=0).fit(select_sample)
label = kmeans.labels_
for j in range(cluster_num):
cluster_sample_idx = np.nonzero(label == j)[0]
cluster_sample = select_sample[cluster_sample_idx, :]
cluster_ceter[i * cluster_num + j,
:] = np.mean(cluster_sample, axis=0)
mapping_sample_kmeans[sample_range[cluster_sample_idx]
] = i * cluster_num + j
if i % display_step == 0:
print('\tK-means clustering for %d subset.' % i)
print('Finish intially clustering by K-means.')
print('Start PhenoGraph clustering...\n')
label_pheno, graph, Q = phenograph.cluster(
cluster_ceter, k=phenograh_neighbor, n_jobs=1)
label = np.zeros(latent_code.shape[0])
for i in range(label_pheno.max() + 1):
center_index = np.nonzero(label_pheno == i)[0]
for j in center_index:
sample_index = np.nonzero(mapping_sample_kmeans == j)[
0] # samples belong to this center
label[sample_index] = i
print('Finish density down-sampling clustering.')
return label
|
/scScope-0.1.5.tar.gz/scScope-0.1.5/scscope/large_scale_processing.py
| 0.773131 | 0.416797 |
large_scale_processing.py
|
pypi
|
# scSplit [](https://doi.org/10.5281/zenodo.3464622)
### Genotype-free demultiplexing of pooled single-cell RNA-seq, using a hidden state model for identifying genetically distinct samples within a mixed population.
#### It has been tested on up to 8 real mixed samples (10X pipeline), and up to 32 mixed simulated samples
### How to install:
1) install python 3.6+
2) make sure below python packages can be imported:
math, numpy, pandas pickle, pysam, PyVCF, scikit-learn, scipy, statistics
3) "git clone https://<span></span>github.com/jon-xu/scSplit" or "pip install scSplit"
4) run with "\<PATH\>/scSplit \<command\> \<args\>" or "python \<PATH\>/scSplit \<command\> \<args\>"
### Overall Pipeline:

### 1. Data quality control and filtering
a) Make sure pooled scRNA-seq BAM file doesn't contain reads from unknown barcodes, you can do this by "grep -vFwf <whitelist> <xxx>.sam > qcresult" - searching for invalid reads in SAM format of the source BAM using a file of whitelist barcodes.
b) Filter processed BAM in a way that reads with any of following patterns be removed: read quality lower than 10, being unmapped segment, being secondary alignment, not passing filters, being PCR or optical duplicate, or being supplementary alignment.
e.g. samtools view -S -b -q 10 -F 3844 processed.bam > filtered.bam
b) Mark BAM file for duplication, and get it sorted and indexed, using rmdup, sort, index commands in samtools
### 2. Calling for single-nucleotide variants
a) Use freebayes v1.2 to call SNVs from the mixed sample BAM file after being processed in the first step, set the parameters for freebayes so that no insertion and deletions (indels), nor Multi-nucleotide polymorphysim (MNP) or complex events would be captured, set minimum allele count to 2 and set minimum base quality to 1.
e.g. freebayes -f <reference.fa> -iXu -C 2 -q 1 filtered.bam > snv.vcf
This step could take very long (up to 30 hours if not using parallel processing). In order to fasten the calling process, user can split the BAM by chromosome and call SNVs separately and merge the vcf files afterwards.
Users can opt to use GATK or other SNV calling tools as well.
b) The output VCF file should be further filtered so that only the SNVs with quality score larger than 30 would be kept.
### 3. Building allele count matrices
a) Run "scSplit count" and get two .csv files ("ref_filtered.csv" and "alt_filtered.csv") as output.
input parameters:
-v, --vcf, VCF from mixed BAM
-i, --bam, mixed sample BAM
-b, --bar, barcodes whitelist
-t, --tag, tag for barcode (default: "CB")
-c, --com, common SNVs
-r, --ref, output Ref count matrix
-a, --alt, output Alt count matrix
e.g. scSplit count -v mixed_genotype.vcf -i filtered.bam -b barcodes.tsv -r ref_filtered.csv -a alt_filtered.csv
b) It is **strongly recommended** to use below SNV list to filter the matrices to improve prediction accuracy:
Common SNPs (e.g. Human common SNPs from 1000 Genome project)
hg19: ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20130502/
hg38: http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/release/20181203_biallelic_SNV/
To process the genotype files of common SNPs, either download per-chromosome files and concatenate them using bcftools or download the whole genome file, take the first two columns of the vcf file and replace the tab with colon sign so that each line is one SNV, e.g., "1:10177".
Processed common SNVs for hg19 and hg38 can be found here: http://data.genomicsresearch.org/Projects/scSplit/CommonSNVs
Please specify the common SNVs in scSplit count using -c/--com parameter, please make sure your common SNVs list does not have header row.
c) When building count matrices, the genotypes of each allele will be checked and only those heterozygous thus informative SNVs will be kept. This is achieved by checking GL/GP/PL/GT fields in the VCF file, while only one in order will be used if existed.
d) This step could be memory consuming, if the number of SNVs and/or cells are high. As a guideline, building matrices for 60,000 SNVs and 10,000 cells might need more than 30GB RAM to run, please allow enough RAM resource for running the script.
e) Typical runtime for this step is about one hour, depending on the nature of the data and the resources being allocated.
f) Typical number of filtered SNVs after this step is usually between 10,000 and 30,000.
g) If this step fails, please check: 1) is your barcode tag in the BAM files "CB" - if not, you need to specify it using -t/--tag; 2) are you working on a mixed sample VCF rather than a simple merge of individual genotypes? 3) is the correct whitelist barcode file being used?
### 4. Demultiplexing and generate ALT P/A matrix
a) Use the two generated allele counts matrices files to demultiplex the cells into different samples. Doublet sample will not have the same sample ID every time, which will be explicitly indicated in the log file
b) Run "scSplit run" with input parameters:
-r, --ref, input Ref count matrix
-a, --alt, input Alt count matrix
-n, --num, expected number of mixed samples (-n 0: autodetect mode)
-s, --sub, (optional) maximum number of subpopulations in autodetect mode, default: 10
-e, --ems, (optional) number of EM repeats to avoid local maximum, default: 30
-d, --dbl, (optional) correction for doublets, "-d 0" means you would expect no doublets. There will be no refinement on the results if this optional parameter is not specified or specified percentage is less than doublet rates detected during the run
-v, --vcf, (optional) known individual genotypes to map clusters and samples using distinguishing variants
e.g. scSplit run -r ref_filtered.csv -a alt_filtered.csv -n 8
# below command will tell the script to expect 20% doublets if the natually found doublets are less than that:
e.g. scSplit run -r ref_filtered.csv -a alt_filtered.csv -n 8 -d 0.2
# (beta) -n 0 -s <sub>, let system decide the optimal sample number between 2 and <sub>
e.g. scSplit run -r ref_filtered.csv -a alt_filtered.csv -n 0 -s 12
c) Below files will be generated:
"scSplit_result.csv": barcodes assigned to each of the N+1 cluster (N singlets and 1 doublet cluster), doublet marked as DBL-<n> (n stands for the cluster number)
"scSplit_dist_variants.txt": the distinguishing variants that can be used to genotype and assign sample to clusters
"scSplit_dist_matrix.csv": the ALT allele Presence/Absence (P/A) matrix on distinguishing variants for all samples as a reference in assigning sample to clusters, NOT including the doublet cluster, whose sequence number would be different every run (please pay enough attention to this)
"scSplit_PA_matrix.csv": the full ALT allele Presence/Absence (P/A) matrix for all samples, NOT including the doublet cluster, whose sequence number would be different every run (please pay enough attention to this)
"scSplit_P_s_c.csv", the probability of each cell belonging to each sample
"scSplit.log" log file containing information for current run, iterations, and final Maximum Likelihood and doublet sample
d) This step is also memory consuming, and the RAM needed is highly dependent on the quantity of SNVs from last step and the number of cells. As a guideline, a matrix with 60,000 SNVs and 10,000 cells might need more than 50GB RAM to run, please allow enough RAM resource for running the script.
e) Typical runtime for this step is about half an hour, with default parameters, depending on the nature of the data and the resources being allocated.
f) Please notice that scSplit will add one pseudo cluster to represent doublet, so if you don't
### 5. (Optional) Generate sample genotypes based on the split result
a) Run "scSplit genotype" with input parameters:
-r, --ref, Ref count CSV as output
-a, --alt, Alt count CSV as output
-p, --psc, generated P(S|C)
e.g. scSplit genotype -r ref_filtered.csv -a alt_filtered.csv -p scSplit_P_s_c.csv
b) VCF file ("scSplit.vcf") will be generated for the logarithm-transformed genotype likelihoods for all sample models.
<br/>
|
/scSplit-1.0.8.2.tar.gz/scSplit-1.0.8.2/README.md
| 0.812533 | 0.781164 |
README.md
|
pypi
|
import torch
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
from torch.optim import Adam
import torch.nn.functional as F
from torch.utils.data import DataLoader
from .model import simdatset, AutoEncoder, device
from .utils import showloss
def reproducibility(seed=1):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def training_stage(model, train_loader, optimizer, epochs=128):
model.train()
model.state = 'train'
loss = []
recon_loss = []
for i in tqdm(range(epochs)):
for k, (data, label) in enumerate(train_loader):
# reproducibility(seed=0)
optimizer.zero_grad()
x_recon, cell_prop, sigm = model(data)
batch_loss = F.l1_loss(cell_prop, label) + F.l1_loss(x_recon, data)
batch_loss.backward()
optimizer.step()
loss.append(F.l1_loss(cell_prop, label).cpu().detach().numpy())
recon_loss.append(F.l1_loss(x_recon, data).cpu().detach().numpy())
return model, loss, recon_loss
def adaptive_stage(model, data, optimizerD, optimizerE, step=10, max_iter=5):
data = torch.from_numpy(data).float().to(device)
loss = []
model.eval()
model.state = 'test'
_, ori_pred, ori_sigm = model(data)
ori_sigm = ori_sigm.detach()
ori_pred = ori_pred.detach()
model.state = 'train'
for k in range(max_iter):
model.train()
for i in range(step):
reproducibility(seed=0)
optimizerD.zero_grad()
x_recon, _, sigm = model(data)
batch_loss = F.l1_loss(x_recon, data)+F.l1_loss(sigm,ori_sigm)
batch_loss.backward()
optimizerD.step()
loss.append(F.l1_loss(x_recon, data).cpu().detach().numpy())
for i in range(step):
reproducibility(seed=0)
optimizerE.zero_grad()
x_recon, pred, _ = model(data)
batch_loss = F.l1_loss(ori_pred, pred)+F.l1_loss(x_recon, data)
batch_loss.backward()
optimizerE.step()
loss.append(F.l1_loss(x_recon, data).cpu().detach().numpy())
model.eval()
model.state = 'test'
_, pred, sigm = model(data)
return sigm.cpu().detach().numpy(), loss, pred.detach().cpu().numpy()
def train_model(train_x, train_y,
model_name=None,
batch_size=128, epochs=128):
train_loader = DataLoader(simdatset(train_x, train_y), batch_size=batch_size, shuffle=True)
model = AutoEncoder(train_x.shape[1], train_y.shape[1]).to(device)
# reproducibility(seed=0)
optimizer = Adam(model.parameters(), lr=1e-4)
print('Start training')
model, loss, reconloss = training_stage(model, train_loader, optimizer, epochs=epochs)
print('Training is done')
print('prediction loss is:')
showloss(loss)
print('reconstruction loss is:')
showloss(reconloss)
if model_name is not None:
print('Model is saved')
torch.save(model, model_name+".pth")
return model
def predict(test_x, genename, celltypes, samplename,
model_name=None, model=None,
adaptive=True, mode='overall'):
if model is not None and model_name is None:
print('Model is saved without defined name')
torch.save(model, 'model.pth')
if adaptive is True:
if mode == 'high-resolution':
TestSigmList = np.zeros((test_x.shape[0], len(celltypes), len(genename)))
TestPred = np.zeros((test_x.shape[0], len(celltypes)))
print('Start adaptive training at high-resolution')
for i in tqdm(range(len(test_x))):
x = test_x[i,:].reshape(1,-1)
if model_name is not None and model is None:
model = torch.load(model_name + ".pth")
elif model is not None and model_name is None:
model = torch.load("model.pth")
decoder_parameters = [{'params': [p for n, p in model.named_parameters() if 'decoder' in n]}]
encoder_parameters = [{'params': [p for n, p in model.named_parameters() if 'encoder' in n]}]
optimizerD = torch.optim.Adam(decoder_parameters, lr=1e-4)
optimizerE = torch.optim.Adam(encoder_parameters, lr=1e-4)
test_sigm, loss, test_pred = adaptive_stage(model, x, optimizerD, optimizerE, step=300, max_iter=3)
TestSigmList[i, :, :] = test_sigm
TestPred[i,:] = test_pred
TestPred = pd.DataFrame(TestPred,columns=celltypes,index=samplename)
CellTypeSigm = {}
for i in range(len(celltypes)):
cellname = celltypes[i]
sigm = TestSigmList[:,i,:]
sigm = pd.DataFrame(sigm,columns=genename,index=samplename)
CellTypeSigm[cellname] = sigm
print('Adaptive stage is done')
return CellTypeSigm, TestPred
elif mode == 'overall':
if model_name is not None and model is None:
model = torch.load(model_name + ".pth")
elif model is not None and model_name is None:
model = torch.load("model.pth")
decoder_parameters = [{'params': [p for n, p in model.named_parameters() if 'decoder' in n]}]
encoder_parameters = [{'params': [p for n, p in model.named_parameters() if 'encoder' in n]}]
optimizerD = torch.optim.Adam(decoder_parameters, lr=1e-4)
optimizerE = torch.optim.Adam(encoder_parameters, lr=1e-4)
print('Start adaptive training for all the samples')
test_sigm, loss, test_pred = adaptive_stage(model, test_x, optimizerD, optimizerE, step=300, max_iter=3)
print('Adaptive stage is done')
test_sigm = pd.DataFrame(test_sigm,columns=genename,index=celltypes)
test_pred = pd.DataFrame(test_pred,columns=celltypes,index=samplename)
return test_sigm, test_pred
else:
if model_name is not None and model is None:
model = torch.load(model_name+".pth")
elif model is not None and model_name is None:
model = model
print('Predict cell fractions without adaptive training')
model.eval()
model.state = 'test'
data = torch.from_numpy(test_x).float().to(device)
_, pred, _ = model(data)
pred = pred.cpu().detach().numpy()
pred = pd.DataFrame(pred, columns=celltypes, index=samplename)
print('Prediction is done')
return pred
|
/scTAPE-1.1.2-py3-none-any.whl/TAPE/train.py
| 0.667581 | 0.422624 |
train.py
|
pypi
|
import os
import anndata
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#### NEEDED FILES
# 1. GeneLength.txt
def counts2FPKM(counts, genelen):
genelen = pd.read_csv(genelen, sep=',')
genelen['TranscriptLength'] = genelen['Transcript end (bp)'] - genelen['Transcript start (bp)']
genelen = genelen[['Gene name', 'TranscriptLength']]
genelen = genelen.groupby('Gene name').max()
# intersection
inter = counts.columns.intersection(genelen.index)
samplename = counts.index
counts = counts[inter].values
genelen = genelen.loc[inter].T.values
# transformation
totalreads = counts.sum(axis=1)
counts = counts * 1e9 / (genelen * totalreads.reshape(-1, 1))
counts = pd.DataFrame(counts, columns=inter, index=samplename)
return counts
def FPKM2TPM(fpkm):
genename = fpkm.columns
samplename = fpkm.index
fpkm = fpkm.values
total = fpkm.sum(axis=1).reshape(-1, 1)
fpkm = fpkm * 1e6 / total
fpkm = pd.DataFrame(fpkm, columns=genename, index=samplename)
return fpkm
def counts2TPM(counts, genelen):
fpkm = counts2FPKM(counts, genelen)
tpm = FPKM2TPM(fpkm)
return tpm
def ProcessInputData(train_data, test_data, sep=None, datatype='TPM', variance_threshold=0.98,
scaler="mms",
genelenfile=None):
### read train data
print('Reading training data')
if type(train_data) is anndata.AnnData:
pass
elif type(train_data) is str:
train_data = anndata.read_h5ad(train_data)
# train_data.var_names_make_unique()
train_x = pd.DataFrame(train_data.X, columns=train_data.var.index)
train_y = train_data.obs
print('Reading is done')
### read test data
print('Reading test data')
if type(test_data) is str:
test_x = pd.read_csv(test_data, index_col=0, sep=sep)
elif type(test_data) is pd.DataFrame:
test_x = test_data
print('Reading test data is done')
### transform to datatype
if datatype == 'FPKM':
if genelenfile is None:
raise Exception("Please add gene length file!")
print('Transforming to FPKM')
train_x = counts2FPKM(train_x, genelenfile)
elif datatype == 'TPM':
if genelenfile is None:
raise Exception("Please add gene length file!")
print('Transforming to TPM')
train_x = counts2TPM(train_x, genelenfile)
elif datatype == 'counts':
print('Using counts data to train model')
### variance cutoff
print('Cutting variance...')
var_cutoff = train_x.var(axis=0).sort_values(ascending=False)[int(train_x.shape[1] * variance_threshold)]
train_x = train_x.loc[:, train_x.var(axis=0) > var_cutoff]
var_cutoff = test_x.var(axis=0).sort_values(ascending=False)[int(test_x.shape[1] * variance_threshold)]
test_x = test_x.loc[:, test_x.var(axis=0) > var_cutoff]
### find intersected genes
print('Finding intersected genes...')
inter = train_x.columns.intersection(test_x.columns)
train_x = train_x[inter]
test_x = test_x[inter]
genename = list(inter)
celltypes = train_y.columns
samplename = test_x.index
print('Intersected gene number is ', len(inter))
### MinMax process
print('Scaling...')
train_x = np.log(train_x + 1)
test_x = np.log(test_x + 1)
colors = sns.color_palette('RdYlBu', 10)
fig = plt.figure()
sns.histplot(data=np.mean(train_x, axis=0), kde=True, color=colors[3],edgecolor=None)
sns.histplot(data=np.mean(test_x, axis=0), kde=True, color=colors[7],edgecolor=None)
plt.legend(title='datatype', labels=['trainingdata', 'testdata'])
plt.show()
if scaler=='ss':
print("Using standard scaler...")
ss = StandardScaler()
ss_train_x = ss.fit_transform(train_x.T).T
ss_test_x = ss.fit_transform(test_x.T).T
fig = plt.figure()
sns.histplot(data=np.mean(ss_train_x, axis=0), kde=True, color=colors[3],edgecolor=None)
sns.histplot(data=np.mean(ss_test_x, axis=0), kde=True, color=colors[7],edgecolor=None)
plt.legend(title='datatype', labels=['trainingdata', 'testdata'])
plt.show()
return ss_train_x, train_y.values, ss_test_x, genename, celltypes, samplename
elif scaler == 'mms':
print("Using minmax scaler...")
mms = MinMaxScaler()
mms_train_x = mms.fit_transform(train_x.T).T
mms_test_x = mms.fit_transform(test_x.T).T
sns.histplot(data=np.mean(mms_train_x, axis=0), kde=True, color=colors[3],edgecolor=None)
sns.histplot(data=np.mean(mms_test_x, axis=0), kde=True, color=colors[7],edgecolor=None)
plt.legend(title='datatype', labels=['trainingdata', 'testdata'])
plt.show()
return mms_train_x, train_y.values, mms_test_x, genename, celltypes, samplename
def L1error(pred, true):
return np.mean(np.abs(pred - true))
def CCCscore(y_pred, y_true, mode='all'):
# pred: shape{n sample, m cell}
if mode == 'all':
y_pred = y_pred.reshape(-1, 1)
y_true = y_true.reshape(-1, 1)
elif mode == 'avg':
pass
ccc_value = 0
for i in range(y_pred.shape[1]):
r = np.corrcoef(y_pred[:, i], y_true[:, i])[0, 1]
# Mean
mean_true = np.mean(y_true[:, i])
mean_pred = np.mean(y_pred[:, i])
# Variance
var_true = np.var(y_true[:, i])
var_pred = np.var(y_pred[:, i])
# Standard deviation
sd_true = np.std(y_true[:, i])
sd_pred = np.std(y_pred[:, i])
# Calculate CCC
numerator = 2 * r * sd_true * sd_pred
denominator = var_true + var_pred + (mean_true - mean_pred) ** 2
ccc = numerator / denominator
ccc_value += ccc
return ccc_value / y_pred.shape[1]
def score(pred, label):
print('L1 error is', L1error(pred, label))
print('CCC is ', CCCscore(pred, label))
def showloss(loss):
sns.set()
plt.plot(loss)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.show()
def transformation(train_x, test_x):
sigma_2 = np.sum((train_x - np.mean(train_x, axis=0)) ** 2, axis=0) / (train_x.shape[0] + 1)
sigma = np.sqrt(sigma_2)
test_x = ((test_x - np.mean(test_x, axis=0)) / np.std(test_x, axis=0)) * sigma + np.mean(train_x, axis=0)
return test_x
|
/scTAPE-1.1.2-py3-none-any.whl/TAPE/utils.py
| 0.439988 | 0.431464 |
utils.py
|
pypi
|
import anndata
import pandas as pd
from .simulation import generate_simulated_data
from .utils import ProcessInputData
from .train import train_model, predict, reproducibility
from .model import scaden, AutoEncoder
def Deconvolution(necessary_data, real_bulk, sep='\t', variance_threshold=0.98,
scaler='mms',
datatype='counts', genelenfile=None, d_prior=None,
mode='overall', adaptive=True,
save_model_name=None, sparse=True,
batch_size=128, epochs=128, seed=0):
"""
:param necessary_data: for single-cell data, txt file and dataframe are supported. for simulated data, file location
and the h5ad variable are supported. for a trained model, model location(saved with pth) and
the model are supported.
:param real_bulk: an expression file path, index is sample, columns is gene name
:param variance_threshold: value from 0 to 1. Filter out genes with variance low than this rank.
:param scaler: using MinMaxScaler ("mms") or StandardScaler ("ss") to process data.
:param sep: used to read bulk data, depends on the format
:param datatype: FPKM or TPM, if bulk RNA-seq normalization type is RPKM, please just use FPKM.
:param genelenfile: specify the location of gene length file for transforming counts data to TPM or FPKM
this file should in txt format and
contain three columns: [Gene name,Transcript start (bp),Transcript end (bp)]
:param d_prior: prior knowledge about cell fractions, used to generate cell fractions, if this param is None, then the
fractions is generated as a random way.
:param mode: 'high-resolution' means this will apply adaptive stage to every single sample to generate signature matrix,
'overall' means that it will deconvolve all the samples at the same time
:param adaptive: it has to be True, if model is 'high-resolution'
:param save_model_name: the name used to save model, if it was not provided, it would not be saved
:return: depends on the mode or adaptive
there are three combinations:
1. high-resolution and adaptive deconvolution
this will return a dictionary and predicted cell fractions in pandas dataframe format
the keys of the dict are the pre-defined cell type names in the single cell reference data
the values of the dict are the dataframe of gene expression and samples
2. overall and adaptive deconvolution
this will return a signature matrix and a cell fraction
the rows of the signature matrix is the gene expression in each cell types
both of the variables are in dataframe format
3. overall and non-adaptive deconvolution
this will return a cell fraction directly
the signature matrix in this mode is None
"""
if type(necessary_data) is str:
postfix = necessary_data.split('.')[-1]
if postfix == 'txt':
simudata = generate_simulated_data(sc_data=necessary_data, samplenum=5000, d_prior=d_prior, sparse=sparse)
elif postfix == 'h5ad':
simudata = anndata.read_h5ad(necessary_data)
elif postfix == 'pth':
raise Exception('Do not accept a model as input')
else:
raise Exception('Please give the correct input')
else:
if type(necessary_data) is pd.DataFrame:
simudata = generate_simulated_data(sc_data=necessary_data, samplenum=5000, d_prior=d_prior, sparse=sparse)
elif type(necessary_data) is anndata.AnnData:
simudata = necessary_data
elif type(necessary_data) is AutoEncoder:
raise Exception('Do not accept a model as input')
else:
raise Exception('Please give the correct input')
train_x, train_y, test_x, genename, celltypes, samplename = \
ProcessInputData(simudata, real_bulk, sep=sep, datatype=datatype, genelenfile=genelenfile,
variance_threshold=variance_threshold, scaler=scaler)
print('training data shape is ', train_x.shape, '\ntest data shape is ', test_x.shape)
if save_model_name is not None:
reproducibility(seed)
model = train_model(train_x, train_y, save_model_name, batch_size=batch_size, epochs=epochs)
else:
reproducibility(seed)
model = train_model(train_x, train_y, batch_size=batch_size, epochs=epochs)
print('Notice that you are using parameters: mode=' + str(mode) + ' and adaptive=' + str(adaptive))
if adaptive is True:
if mode == 'high-resolution':
CellTypeSigm, Pred = \
predict(test_x=test_x, genename=genename, celltypes=celltypes, samplename=samplename,
model=model, model_name=save_model_name,
adaptive=adaptive, mode=mode)
return CellTypeSigm, Pred
elif mode == 'overall':
Sigm, Pred = \
predict(test_x=test_x, genename=genename, celltypes=celltypes, samplename=samplename,
model=model, model_name=save_model_name,
adaptive=adaptive, mode=mode)
return Sigm, Pred
else:
Pred = predict(test_x=test_x, genename=genename, celltypes=celltypes, samplename=samplename,
model=model, model_name=save_model_name,
adaptive=adaptive, mode=mode)
Sigm = None
return Sigm, Pred
def ScadenDeconvolution(necessary_data, real_bulk, sep='\t', sparse=True,
batch_size=128, epochs=128):
if type(necessary_data) is str:
postfix = necessary_data.split('.')[-1]
if postfix == 'txt':
simudata = generate_simulated_data(sc_data=necessary_data, samplenum=5000, sparse=sparse)
elif postfix == 'h5ad':
simudata = anndata.read_h5ad(necessary_data)
elif postfix == 'pth':
raise Exception('Do not accept a model as input')
else:
raise Exception('Please give the correct input')
else:
if type(necessary_data) is pd.DataFrame:
simudata = generate_simulated_data(sc_data=necessary_data, samplenum=5000, sparse=sparse)
elif type(necessary_data) is anndata.AnnData:
simudata = necessary_data
elif type(necessary_data) is AutoEncoder:
raise Exception('Do not accept a model as input')
else:
raise Exception('Please give the correct input')
train_x, train_y, test_x, genename, celltypes, samplename = \
ProcessInputData(simudata, real_bulk, sep=sep, datatype='counts')
print('training data shape is ', train_x.shape, '\ntest data shape is ', test_x.shape)
pred = test_scaden(train_x,train_y,test_x,batch_size=batch_size,epochs=epochs)
pred = pd.DataFrame(pred, columns=celltypes, index=samplename)
return pred
def test_scaden(train_x,train_y,test_x,batch_size=128,epochs=128):
architectures = {'m256': ([256,128,64,32],[0,0,0,0]),
'm512': ([512,256,128,64],[0, 0.3, 0.2, 0.1]),
'm1024': ([1024, 512, 256, 128],[0, 0.6, 0.3, 0.1])}
model = scaden(architectures, train_x, train_y, batch_size=batch_size, epochs=epochs)
model.train()
pred = model.predict(test_x)
return pred
|
/scTAPE-1.1.2-py3-none-any.whl/TAPE/deconvolution.py
| 0.711431 | 0.557424 |
deconvolution.py
|
pypi
|
import json
import time
from pathlib import Path
from typing import Optional, Union
import inspect
import numpy as np
import pandas as pd
from scipy import sparse
from scTenifold.core._networks import *
from scTenifold.core._QC import sc_QC
from scTenifold.core._norm import cpm_norm
from scTenifold.core._decomposition import tensor_decomp
from scTenifold.core._ko import reconstruct_pcnets
from scTenifold.plotting import plot_hist
from scTenifold.data import read_folder
__all__ = ["scTenifoldNet", "scTenifoldKnk"]
class scBase:
cls_prop = ["shared_gene_names", "strict_lambda"]
kw_sigs = {"qc_kws": inspect.signature(sc_QC),
"nc_kws": inspect.signature(make_networks),
"td_kws": inspect.signature(tensor_decomp),
"ma_kws": inspect.signature(manifold_alignment),
"dr_kws": inspect.signature(d_regulation)}
def __init__(self,
qc_kws=None,
nc_kws=None,
td_kws=None,
ma_kws=None,
dr_kws=None
):
self.data_dict = {}
self.QC_dict = {}
self.network_dict = {}
self.tensor_dict = {}
self.manifold: Optional[pd.DataFrame] = None
self.d_regulation: Optional[pd.DataFrame] = None
self.shared_gene_names = None
self.qc_kws = {} if qc_kws is None else qc_kws
self.nc_kws = {} if nc_kws is None else nc_kws
self.td_kws = {} if td_kws is None else td_kws
self.ma_kws = {} if ma_kws is None else ma_kws
self.dr_kws = {} if dr_kws is None else dr_kws
self.step_comps = {"qc": self.QC_dict,
"nc": self.network_dict,
"td": self.tensor_dict,
"ma": self.manifold,
"dr": self.d_regulation}
@classmethod
def _load_comp(cls,
file_dir: Path,
comp):
if comp == "qc":
dic = {}
for d in file_dir.iterdir():
if d.is_file():
dic[d.stem] = pd.read_csv(d)
obj_name = "QC_dict"
elif comp == "nc":
dic = {}
for d in file_dir.iterdir():
if d.is_dir():
dic[d.stem] = []
nt = 0
while (d / Path(f"network_{nt}.npz")).exists():
dic[d.stem].append(sparse.load_npz(d / Path(f"network_{nt}.npz")))
nt += 1
obj_name = "network_dict"
elif comp == "td":
dic = {}
for d in file_dir.iterdir():
if d.is_file():
dic[d.stem] = sparse.load_npz(d).toarray()
obj_name = "tensor_dict"
elif comp in ["ma", "dr"]:
dic = {}
for d in file_dir.iterdir():
if d.is_file():
dic[d.stem] = pd.read_csv(d)
obj_name = "manifold" if comp == "ma" else "d_regulation"
else:
raise ValueError("The component is not a valid one")
return dic, obj_name
@classmethod
def load(cls,
file_dir,
**kwargs):
parent_dir = Path(file_dir)
kw_path = parent_dir / Path("kws.json")
with open(kw_path, "r") as f:
kws = json.load(f)
kwargs.update(kws)
kwarg_props = {k: kwargs.pop(k)
for k in cls.cls_prop if k in kwargs}
ins = cls(**kwargs)
for name, obj in ins.step_comps.items():
if (file_dir / Path(name)).exists():
dic, name = cls._load_comp(file_dir / Path(name), name)
setattr(ins, name, dic)
for k, prop in kwarg_props.items():
setattr(ins, k, prop)
return ins
@classmethod
def list_kws(cls, step_name):
return {n: p.default for n, p in cls.kw_sigs[f"{step_name}"].parameters.items()
if not (p.default is p.empty)}
@staticmethod
def _infer_groups(*args):
grps = set()
for kw in args:
grps |= set(kw.keys())
return list(grps)
def _QC(self, label, plot: bool = True, **kwargs):
self.QC_dict[label] = self.data_dict[label].copy()
self.QC_dict[label].loc[:, "gene"] = self.QC_dict[label].index
self.QC_dict[label] = self.QC_dict[label].groupby(by="gene").sum()
self.QC_dict[label] = sc_QC(self.QC_dict[label], **kwargs)
if plot:
plot_hist(self.QC_dict[label], label)
def _make_networks(self, label, data, **kwargs):
self.network_dict[label] = make_networks(data, **kwargs)
def _tensor_decomp(self, label, gene_names, **kwargs):
self.tensor_dict[label] = tensor_decomp(np.concatenate([np.expand_dims(network.toarray(), -1)
for network in self.network_dict[label]], axis=-1),
gene_names, **kwargs)
def _save_comp(self,
file_dir: Path,
comp: str,
verbose: bool):
if comp == "qc":
for label, obj in self.step_comps["qc"].items():
label_fn = (file_dir / Path(label)).with_suffix(".csv")
obj.to_csv(label_fn)
if verbose:
print(f"{label_fn.name} has been saved successfully.")
elif comp == "nc":
for label, obj in self.step_comps["nc"].items():
(file_dir / Path(f"{label}")).mkdir(parents=True, exist_ok=True)
for i, npx in enumerate(obj):
file_name = file_dir / Path(f"{label}/network_{i}").with_suffix(".npz")
sparse.save_npz(file_name, npx)
if verbose:
print(f"{file_name.name} has been saved successfully.")
elif comp == "td":
for label, obj in self.step_comps["td"].items():
sp = sparse.coo_matrix(obj)
label_fn = (file_dir / Path(label)).with_suffix(".npz")
sparse.save_npz(label_fn, sp)
if verbose:
print(f"{label_fn.name} has been saved successfully.")
elif comp in ["ma", "dr"]:
if isinstance(self.step_comps[comp], pd.DataFrame):
fn = (file_dir / Path("manifold_alignment" if comp == "ma" else "d_regulation")).with_suffix(".csv")
self.step_comps[comp].to_csv(fn)
if verbose:
print(f"{fn.name} has been saved successfully.")
else:
raise ValueError(f"This step is not valid, please choose from {list(self.step_comps.keys())}")
def save(self,
file_dir: str,
comps: Union[str, list] = "all",
verbose: bool = True,
**kwargs):
dir_path = Path(file_dir)
dir_path.mkdir(parents=True, exist_ok=True)
if comps == "all":
comps = [k for k, v in self.step_comps.items()
if v is not None or (isinstance(v, dict) and len(v) != 0)]
for c in comps:
subdir = dir_path / Path(c)
subdir.mkdir(parents=True, exist_ok=True)
self._save_comp(subdir, c, verbose)
configs = {"qc_kws": self.qc_kws, "nc_kws": self.nc_kws, "td_kws": self.td_kws, "ma_kws": self.ma_kws}
if hasattr(self, "ko_kws"):
configs.update({"ko_kws": getattr(self, "ko_kws")})
if hasattr(self, "dr_kws"):
configs.update({"dr_kws": getattr(self, "dr_kws")})
if self.shared_gene_names is not None:
configs.update({"shared_gene_names": self.shared_gene_names})
configs.update(kwargs)
with open(dir_path / Path('kws.json'), 'w') as f:
json.dump(configs, f)
class scTenifoldNet(scBase):
def __init__(self,
x_data: pd.DataFrame,
y_data: pd.DataFrame,
x_label: str,
y_label: str,
qc_kws: dict = None,
nc_kws: dict = None,
td_kws: dict = None,
ma_kws: dict = None,
dr_kws: dict = None):
"""
Parameters
----------
x_data: pd.DataFrame
DataFrame contains single-cell data (rows: genes, cols: cells)
y_data: pd.DataFrame
DataFrame contains single-cell data (rows: genes, cols: cells)
x_label: str
The label of x_data
y_label: str
The label of y_data
qc_kws: dict
Keyword arguments of the QC step
nc_kws: dict
Keyword arguments of the network constructing step
td_kws: dict
Keyword arguments of the tensor decomposition step
ma_kws: dict
Keyword arguments of the manifold alignment step
"""
super().__init__(qc_kws=qc_kws, nc_kws=nc_kws, td_kws=td_kws, ma_kws=ma_kws, dr_kws=dr_kws)
self.x_label, self.y_label = x_label, y_label
self.data_dict[x_label] = x_data
self.data_dict[y_label] = y_data
@classmethod
def get_empty_config(cls):
config = {"x_data_path": None, "y_data_path": None,
"x_label": None, "y_label": None}
for kw, sig in cls.kw_sigs.items():
config[kw] = cls.list_kws(kw)
return config
@classmethod
def load_config(cls, config):
x_data_path = Path(config.pop("x_data_path"))
y_data_path = Path(config.pop("y_data_path"))
if x_data_path.is_dir():
x_data = read_folder(x_data_path)
else:
x_data = pd.read_csv(x_data_path, sep='\t' if x_data_path.suffix == ".tsv" else ",")
if y_data_path.is_dir():
y_data = read_folder(y_data_path)
else:
y_data = pd.read_csv(y_data_path, sep='\t' if y_data_path.suffix == ".tsv" else ",")
return cls(x_data, y_data, **config)
def save(self,
file_dir: str,
comps: Union[str, list] = "all",
verbose: bool = True,
**kwargs):
super().save(file_dir, comps, verbose,
x_data="", y_data="", # TODO: fix this later
x_label=self.x_label, y_label=self.y_label)
def _norm(self, label):
self.QC_dict[label] = cpm_norm(self.QC_dict[label])
def run_step(self,
step_name: str,
**kwargs) -> None:
"""
Run a single step of scTenifoldNet
Parameters
----------
step_name: str
The name of step to be run, possible steps:
1. qc: Quality control
2. nc: Network construction (PCNet)
3. td: Tensor decomposition
4. ma: Manifold alignment
5. dr: Differential regulation evaluation
**kwargs
Keyword arguments for the step, if None then use stored kws in this object.
Returns
-------
None
"""
start_time = time.perf_counter()
if step_name == "qc":
for label in self.data_dict:
self._QC(label,
**(self.qc_kws if kwargs == {} else kwargs))
self._norm(label)
print("finish QC:", label)
elif step_name == "nc":
x_gene_names, y_gene_names = set(self.QC_dict[self.x_label].index), set(self.QC_dict[self.y_label].index)
self.shared_gene_names = list(x_gene_names & y_gene_names)
for label, qc_data in self.QC_dict.items():
self._make_networks(label, data=qc_data.loc[self.shared_gene_names, :],
**(self.nc_kws if kwargs == {} else kwargs))
elif step_name == "td":
for label, qc_data in self.QC_dict.items():
self._tensor_decomp(label, self.shared_gene_names, **(self.td_kws if kwargs == {} else kwargs))
self.tensor_dict[self.x_label] = (self.tensor_dict[self.x_label] + self.tensor_dict[self.x_label].T) / 2
self.tensor_dict[self.y_label] = (self.tensor_dict[self.y_label] + self.tensor_dict[self.y_label].T) / 2
elif step_name == "ma":
self.manifold = manifold_alignment(self.tensor_dict[self.x_label],
self.tensor_dict[self.y_label],
**(self.ma_kws if kwargs == {} else kwargs))
self.step_comps["ma"] = self.manifold
elif step_name == "dr":
self.d_regulation = d_regulation(self.manifold, **(self.dr_kws if kwargs == {} else kwargs))
self.step_comps["dr"] = self.d_regulation
else:
raise ValueError("This step name is not valid, please choose from qc, nc, td, ma, dr")
print(f"process {step_name} finished in {time.perf_counter() - start_time} secs.")
def build(self) -> pd.DataFrame:
"""
Run the whole pipeline of scTenifoldNet
Returns
-------
d_regulation_df: pd.DataFrame
Differential regulation result dataframe
"""
self.run_step("qc")
self.run_step("nc")
self.run_step("td")
self.run_step("ma")
self.run_step("dr")
return self.d_regulation
class scTenifoldKnk(scBase):
def __init__(self,
data,
strict_lambda=0,
ko_method="default",
ko_genes=None,
qc_kws=None,
nc_kws=None,
td_kws=None,
ma_kws=None,
dr_kws=None,
ko_kws=None):
"""
Parameters
----------
data: pd.DataFrame
DataFrame contains single-cell data (rows: genes, cols: cells)
strict_lambda: float
strict_direction's parameter, default: 0
ko_method: str
KO method, ['default', 'propagation']
ko_genes: str, list of str
Gene(s) to be knocked out
qc_kws: dict
Keyword arguments of the QC step
nc_kws: dict
Keyword arguments of the network constructing step
td_kws: dict
Keyword arguments of the tensor decomposition step
ma_kws: dict
Keyword arguments of the manifold alignment step
ko_kws: dict
Keyword arguments of the Knock out step
"""
ma_kws = {"d": 2} if ma_kws is None else ma_kws
super().__init__(qc_kws=qc_kws, nc_kws=nc_kws, td_kws=td_kws, ma_kws=ma_kws, dr_kws=dr_kws)
self.data_dict["WT"] = data
self.strict_lambda = strict_lambda
self.ko_genes = ko_genes if ko_genes is not None else []
self.ko_method = ko_method
self.ko_kws = {} if ko_kws is None else ko_kws
@classmethod
def get_empty_config(cls):
config = {"data_path": None, "strict_lambda": 0,
"ko_method": "default", "ko_genes": []}
for kw, sig in cls.kw_sigs.items():
config[kw] = cls.list_kws(kw)
return config
@classmethod
def load_config(cls, config):
data_path = Path(config.pop("data_path"))
if data_path.is_dir():
data = read_folder(data_path)
else:
data = pd.read_csv(data_path, sep='\t' if data_path.suffix == ".tsv" else ",")
return cls(data, **config)
def save(self,
file_dir: str,
comps: Union[str, list] = "all",
verbose: bool = True,
**kwargs):
super().save(file_dir, comps, verbose,
data="", # TODO: fix this later
ko_method=self.ko_method,
strict_lambda=self.strict_lambda, ko_genes=self.ko_genes)
def _get_ko_tensor(self, ko_genes, **kwargs):
if self.ko_method == "default":
self.tensor_dict["KO"] = self.tensor_dict["WT"].copy()
self.tensor_dict["KO"].loc[ko_genes, :] = 0
elif self.ko_method == "propagation":
print(self.QC_dict["WT"].index)
self.network_dict["KO"] = reconstruct_pcnets(self.network_dict["WT"],
self.QC_dict["WT"],
ko_gene_id=[self.QC_dict["WT"].index.get_loc(i)
for i in ko_genes],
degree=kwargs.get("degree"),
**self.nc_kws)
self._tensor_decomp("KO", self.shared_gene_names, **self.td_kws)
self.tensor_dict["KO"] = strict_direction(self.tensor_dict["KO"], self.strict_lambda).T
np.fill_diagonal(self.tensor_dict["KO"].values, 0)
else:
ValueError("No such method")
def run_step(self,
step_name: str,
**kwargs):
"""
Run a single step of scTenifoldKnk
Parameters
----------
step_name: str
The name of step to be run, possible steps:
1. qc: Quality control
2. nc: Network construction (PCNet)
3. td: Tensor decomposition
4. ko: Virtual knock out
5. ma: Manifold alignment
6. dr: Differential regulation evaluation
**kwargs
Keyword arguments for the step, if None then use stored kws in this object.
Returns
-------
None
"""
start_time = time.perf_counter()
if step_name == "qc":
if "min_exp_avg" not in self.qc_kws:
self.qc_kws["min_exp_avg"] = 0.05
if "min_exp_sum" not in self.qc_kws:
self.qc_kws["min_exp_sum"] = 25
self._QC("WT", **(self.qc_kws if kwargs == {} else kwargs))
# no norm
print("finish QC: WT")
elif step_name == "nc":
self._make_networks("WT", self.QC_dict["WT"], **(self.nc_kws if kwargs == {} else kwargs))
self.shared_gene_names = self.QC_dict["WT"].index.to_list()
elif step_name == "td":
self._tensor_decomp("WT", self.shared_gene_names, **(self.td_kws if kwargs == {} else kwargs))
self.tensor_dict["WT"] = strict_direction(self.tensor_dict["WT"], self.strict_lambda).T
elif step_name == "ko":
np.fill_diagonal(self.tensor_dict["WT"].values, 0)
if kwargs.get("ko_genes") is not None:
ko_genes = kwargs.pop("ko_genes")
kwargs = (self.ko_kws if kwargs == {} else kwargs)
else:
ko_genes = self.ko_genes
kwargs = (self.ko_kws if kwargs == {} else kwargs)
self._get_ko_tensor(ko_genes, **kwargs)
elif step_name == "ma":
self.manifold = manifold_alignment(self.tensor_dict["WT"],
self.tensor_dict["KO"],
**(self.ma_kws if kwargs == {} else kwargs))
self.step_comps["ma"] = self.manifold
elif step_name == "dr":
self.d_regulation = d_regulation(self.manifold, **(self.dr_kws if kwargs == {} else kwargs))
self.step_comps["dr"] = self.d_regulation
else:
raise ValueError("No such step")
print(f"process {step_name} finished in {time.perf_counter() - start_time} secs.")
def build(self):
"""
Run the whole pipeline of scTenifoldKnk
Returns
-------
d_regulation_df: pd.DataFrame
Differential regulation result dataframe
"""
self.run_step("qc")
self.run_step("nc")
self.run_step("td")
self.run_step("ko")
self.run_step("ma")
self.run_step("dr")
return self.d_regulation
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/core/_base.py
| 0.761583 | 0.159872 |
_base.py
|
pypi
|
from typing import Sequence
import numpy as np
import pandas as pd
import scipy
from scTenifold.core._utils import timer
from tensorly.decomposition import parafac, parafac2, parafac_power_iteration
from tensorly import decomposition
import tensorly as tl
__all__ = ["tensor_decomp"]
@timer
def tensor_decomp(networks: np.ndarray,
gene_names: Sequence[str],
method: str = "parafac",
n_decimal: int = 1,
K: int = 5,
tol: float = 1e-6,
max_iter: int = 1000,
random_state: int = 42,
**kwargs) -> pd.DataFrame:
"""
Perform tensor decomposition on pc networks
Parameters
----------
networks: np.ndarray
Concatenated network, expected shape = (n_genes, n_genes, n_pcnets)
gene_names: sequence of str
The name of each gene in the network (order matters)
method: str, default = 'parafac'
Tensor decomposition method, tensorly's decomposition method was used:
http://tensorly.org/stable/modules/api.html#module-tensorly.decomposition
n_decimal: int
Number of decimal in the final df
K: int
Rank in parafac function
tol: float
Tolerance in the iteration
max_iter: int
Number of interation
random_state: int
Random seed used to reproduce the same result
**kwargs:
Keyword arguments used in the decomposition function
Returns
-------
tensor_decomp_df: pd.DataFrame
The result of tensor decomposition, expected shape = (n_genes, n_genes)
References
----------
http://tensorly.org/stable/modules/api.html#module-tensorly.decomposition
"""
# Us, est, res_hist = cp_als(networks, n_components=K, max_iter=max_iter, tol=tol)
# print(est.shape, len(Us), res_hist)
print("Using tensorly")
factors = getattr(decomposition, method)(networks, rank=K, n_iter_max=max_iter, tol=tol,
random_state=random_state, **kwargs)
estimate = tl.cp_to_tensor(factors)
print(estimate.shape)
out = np.sum(estimate, axis=-1) / len(networks)
out = np.round(out / np.max(abs(out)), n_decimal)
return pd.DataFrame(out, index=gene_names, columns=gene_names)
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/core/_decomposition.py
| 0.899431 | 0.315749 |
_decomposition.py
|
pypi
|
import pandas as pd
from warnings import warn
def sc_QC(X: pd.DataFrame,
min_lib_size: float = 1000,
remove_outlier_cells: bool = True,
min_percent: float = 0.05,
max_mito_ratio: float = 0.1,
min_exp_avg: float = 0,
min_exp_sum: float = 0) -> pd.DataFrame:
"""
main QC function in scTenifold pipelines
Parameters
----------
X: pd.DataFrame
A single-cell RNAseq DataFrame (rows: genes, cols: cells)
min_lib_size: int, float, default = 1000
Minimum library size of cells
remove_outlier_cells: bool, default = True
Whether the QC function will remove the outlier cells
min_percent: float, default = 0.05
Minimum fraction of cells where the gene needs to be expressed to be included in the analysis.
max_mito_ratio: float, default = 0.1
Maximum mitochondrial genes ratio included in the final df
min_exp_avg: float, default = 0
Minimum average expression value in each gene
min_exp_sum: float, default = 0
Minimum sum of expression value in each gene
Returns
-------
X_modified: pd.DataFrame
The DataFrame after QC
"""
outlier_coef = 1.5
X[X < 0] = 0
lib_size = X.sum(axis=0)
before_s = X.shape[1]
X = X.loc[:, lib_size > min_lib_size]
print(f"Removed {before_s - X.shape[1]} cells with lib size < {min_lib_size}")
if remove_outlier_cells:
lib_size = X.sum(axis=0)
before_s = X.shape[1]
Q3 = lib_size.to_frame().quantile(0.75, axis=0).values[0]
Q1 = lib_size.to_frame().quantile(0.25, axis=0).values[0]
interquartile_range = Q3 - Q1
X = X.loc[:, (lib_size >= Q1 - interquartile_range * outlier_coef) &
(lib_size <= Q3 + interquartile_range * outlier_coef)]
print(f"Removed {before_s - X.shape[1]} outlier cells from original data")
mt_genes = X.index.str.upper().str.match("^MT-")
if any(mt_genes):
print(f"Found mitochondrial genes: {X[mt_genes].index.to_list()}")
before_s = X.shape[1]
mt_rates = X[mt_genes].sum(axis=0) / X.sum(axis=0)
X = X.loc[:, mt_rates < max_mito_ratio]
print(f"Removed {before_s - X.shape[1]} samples from original data (mt genes ratio > {max_mito_ratio})")
else:
warn("Mitochondrial genes were not found. Be aware that apoptotic cells may be present in your sample.")
before_g = X.shape[0]
X = X[(X != 0).mean(axis=1) > min_percent]
print(f"Removed {before_g - X.shape[0]} genes expressed in less than {min_percent} of data")
before_g = X.shape[0]
if X.shape[1] > 500:
X = X.loc[X.mean(axis=1) >= min_exp_avg, :]
else:
X = X.loc[X.sum(axis=1) >= min_exp_sum, :]
print(f"Removed {before_g - X.shape[0]} genes with expression values: average < {min_exp_avg} or sum < {min_exp_sum}")
return X
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/core/_QC.py
| 0.87674 | 0.668752 |
_QC.py
|
pypi
|
import re
from pathlib import Path
import zipfile
from warnings import warn
from scipy.sparse.csr import csr_matrix
import pandas as pd
__all__ = ["read_mtx", "read_folder"]
def _get_mtx_body(rows, decode=None, print_header=True):
find_header_btn, row_ptr = False, 0
while not find_header_btn:
m = re.match(r"\d*\s\d*\s\d*", rows[row_ptr].strip()
if decode is None else rows[row_ptr].decode(decode).strip())
if m is not None:
find_header_btn = True
row_ptr += 1
if decode is None:
header, body = rows[:row_ptr], rows[row_ptr:]
else:
header, body = [r.decode(decode) for r in rows[:row_ptr]], [r.decode(decode) for r in rows[row_ptr:]]
if print_header:
print(header)
return body, header[-1].strip().split(" ")
def _build_matrix_from_sparse(sparse_data, shape):
row, col, data = [], [], []
for data_row in sparse_data:
r, c, d = data_row.strip().split(" ")
row.append(int(r) - 1)
col.append(int(c) - 1)
data.append(float(d))
return csr_matrix((data, (row, col)), shape=shape).toarray()
def _parse_mtx(mtx_file_name):
suffix = Path(mtx_file_name).suffix
if suffix == ".txt":
with open(mtx_file_name) as f:
rows = f.readlines()
body, header = _get_mtx_body(rows)
n_rows, n_cols = header[0], header[1]
is_dense = False
elif suffix == ".tsv":
body = pd.read_csv(mtx_file_name, sep='\t', header=None, index_col=False).values
n_rows, n_cols = body.shape
is_dense = True
elif suffix == ".csv":
body = pd.read_csv(mtx_file_name, header=None, index_col=False).values
n_rows, n_cols = body.shape
is_dense = True
elif suffix == ".zip":
archive = zipfile.ZipFile(mtx_file_name, 'r')
with archive.open(archive.namelist()[0]) as fn:
sf = Path(archive.namelist()[0]).suffix
if sf not in [".csv", ".tsv"]:
rows = fn.readlines()
body, header = _get_mtx_body(rows, decode="utf-8")
n_rows, n_cols = header[0], header[1]
is_dense = False
else:
body = pd.DataFrame([f.decode("utf-8").strip().split("," if sf == ".csv" else "\t")
for f in fn.readlines()]).iloc[1:, 1:].values
n_rows, n_cols = body.shape
is_dense = True
else:
raise ValueError("The suffix of this file is not valid")
return body, is_dense, n_rows, n_cols
def read_mtx(mtx_file_name,
gene_file_name,
barcode_file_name) -> pd.DataFrame:
"""
Read mtx data
Parameters
----------
mtx_file_name: str
File name of mtx data
gene_file_name
File name of gene vector
barcode_file_name
File name of barcode vector
Returns
-------
df: pd.DataFrame
A dataframe with genes as rows and cells as columns
"""
genes = pd.read_csv(gene_file_name, sep='\t', header=None).iloc[:, 0]
barcodes = pd.read_csv(barcode_file_name, sep='\t', header=None).iloc[:, 0] \
if barcode_file_name is not None else None
if barcodes is None:
warn("Barcode file is not existed. Added fake barcode name in the dataset")
body, is_dense, n_rows, n_cols = _parse_mtx(mtx_file_name)
barcodes = barcodes if barcodes is not None else [f"barcode_{i}" for i in range(n_cols)]
print(f"creating a {(len(genes), len(barcodes))} matrix")
if not is_dense:
data = _build_matrix_from_sparse(body, shape=(len(genes), len(barcodes)))
else:
data = body
df = pd.DataFrame(index=genes, columns=barcodes, data=data)
return df
def read_folder(file_dir,
matrix_fn = "matrix",
gene_fn = "genes",
barcodes_fn = "barcodes"):
dir_path = Path(file_dir)
fn_dic = {fn: None for fn in [matrix_fn, gene_fn, barcodes_fn]}
if not dir_path.is_dir():
raise ValueError("Path is not exist or is not a folder path")
for fn in dir_path.iterdir():
for k in fn_dic:
if k in fn.name:
fn_dic[k] = fn
return read_mtx(mtx_file_name=(dir_path / fn_dic[matrix_fn]).name,
gene_file_name=(dir_path / fn_dic[gene_fn]).name,
barcode_file_name=(dir_path / fn_dic[barcodes_fn]).name)
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/data/_io.py
| 0.407805 | 0.291989 |
_io.py
|
pypi
|
from typing import Dict, Union, List
import zipfile
import gzip
from io import BytesIO
import re
from pathlib import Path
import requests
import pandas as pd
from ._io import read_mtx
_valid_ds_names = ["AD", "Nkx2_KO", "aging", "cetuximab", "dsRNA", "morphine"]
_repo_url = "https://raw.githubusercontent.com/{owner}/scTenifold-data/master/{ds_name}"
_repo_tree_url = "https://api.github.com/repos/{owner}/scTenifold-data/git/trees/main?recursive=1"
__all__ = ["list_data", "fetch_data"]
def fetch_and_extract(url, saved_path):
resp = requests.get(url, stream=True)
content = resp.content
zf = zipfile.ZipFile(BytesIO(content))
with zf as f:
f.extractall(saved_path)
def download_url(url, save_path, chunk_size=128):
r = requests.get(url, stream=True)
with open(save_path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
def list_data(owner="qwerty239qwe", return_list=True) -> Union[dict, List[str]]:
"""
Parameters
----------
owner: str, default = 'qwerty239qwe'
owner name of dataset repo
return_list: bool, default = True
To return list of data name or return a dict indicating repo structure
Returns
-------
data_info_tree: list or dict
The obtainable data store in a dict, structure {'data_name': {'group': ['file_names']}}
or in a list of data_names
"""
tree = requests.get(_repo_tree_url.format(owner=owner)).json()['tree']
ds_list = [p["path"] for p in tree if "/" not in p["path"] and p["type"] == "tree"]
if return_list:
return ds_list
s_pattern = re.compile(r"/")
lv1, lv2 = {}, []
for t in tree:
if len(re.findall(s_pattern, t['path'])) == 1:
lv1[t["path"]] = []
elif len(re.findall(s_pattern, t['path'])) == 2:
lv2.append(t["path"])
for b in lv2:
lv1[re.findall(r"(.*)/", b)[0]].append(b)
ds_dic = {ds: {} for ds in ds_list}
for k, v in lv1.items():
ds_dic[re.findall(r"(.*)/", k)[0]][k] = v
return ds_dic
def fetch_data(ds_name: str,
dataset_path: Path = Path(__file__).parent.parent.parent / Path("datasets"),
owner="qwerty239qwe") -> Dict[str, pd.DataFrame]:
if not dataset_path.is_dir():
dataset_path.mkdir(parents=True)
ds_dic = list_data(owner=owner, return_list=False)
result_df = {}
for lv_1, files in ds_dic[ds_name].items():
fn_names = {k: None for k in ["matrix", "genes", "barcodes"]}
for f in files:
if not (dataset_path / Path(lv_1)).is_dir():
(dataset_path / Path(lv_1)).mkdir(parents=True, exist_ok=True)
for fn_name in fn_names:
if fn_name in f:
fn_names[fn_name] = f
if not (dataset_path / Path(f)).exists():
download_url(url=_repo_url.format(owner=owner, ds_name=f), save_path=(dataset_path / Path(f)))
result_df[re.findall(r".*/(.*)", lv_1)[0]] = read_mtx(mtx_file_name=str((dataset_path / Path(fn_names["matrix"]))),
gene_file_name=str((dataset_path / Path(fn_names["genes"]))),
barcode_file_name=str((dataset_path / Path(fn_names["barcodes"])))
if fn_names["barcodes"] is not None else None) # optional
return result_df
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/data/_get.py
| 0.645902 | 0.219599 |
_get.py
|
pypi
|
from functools import partial
from warnings import warn
from typing import Optional, List
import pandas as pd
import numpy as np
def _check_features(df,
features):
valid_features = set(df.index) & set(features)
if len(features) != len(valid_features):
warn(f"Found {len(features) - len(valid_features)} invalid features (e.g. not shown in the dataframe)")
return valid_features
def calc_auc(rank_val: pd.Series,
max_rank: int):
insig_part = rank_val > max_rank
if all(insig_part):
return 0
else:
rank_val[insig_part] = max_rank + 1
rank_sum = sum(rank_val)
n = rank_val.shape[0]
u_val = rank_sum - (n * (n + 1)) / 2 # lower if the rank is higher
auc = 1 - (u_val / (n * max_rank))
return auc
def calc_U_stat_df(features,
df: pd.DataFrame,
neg_features: Optional[List[str]] = None,
max_rank=1500,
w_neg=1):
if neg_features is None:
neg_features = []
pos_features = list(set(features) - set(neg_features))
if len(pos_features) > 0:
pos = df.reindex(index=pos_features).apply(partial(calc_auc, max_rank=max_rank), axis=0).values
else:
pos = np.zeros(shape=(df.shape[2],))
if len(neg_features) > 0:
neg = df.reindex(index=neg_features).apply(partial(calc_auc, max_rank=max_rank), axis=0).values
else:
neg = np.zeros(shape=(df.shape[2],))
diff = pos - w_neg * neg
# diff[diff < 0] = 0
return diff
def cal_Uscore(X: pd.DataFrame,
pos_genes,
neg_genes,
max_rank=1500,
w_neg=1,
ties_method="average"):
ranked_df = X.rank(ascending=False, method=ties_method)
pos_genes = _check_features(X, pos_genes)
cell_auc = calc_U_stat_df(pos_genes, ranked_df,
neg_features=neg_genes,
max_rank=max_rank,
w_neg=w_neg)
return pd.DataFrame(cell_auc, index=ranked_df.columns)
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/cell_cycle/UCell.py
| 0.81409 | 0.355467 |
UCell.py
|
pypi
|
from typing import Optional, Dict, List
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from scanpy.tools import score_genes
from scTenifold.data._sim import *
def adobo_score(X,
genes,
n_bins: int = 25,
n_ctrl: int = 50,
random_state: int = 42,
file_path: Path = None):
if len(genes) == 0:
raise ValueError('Gene list ("genes") is empty.')
gene_mean = X.mean(axis=1)
gene_mean = gene_mean.sort_values()
binned = pd.qcut(gene_mean, n_bins)
ret = []
for g in genes:
sampled_bin = binned[binned == binned[binned.index == g].values[0]]
if n_ctrl > sampled_bin.shape[0]:
ret.append(sampled_bin.index)
else:
ret.append(
sampled_bin.sample(n_ctrl, replace=True, random_state=random_state).index
)
con = []
for g in ret:
con.append(X[X.index.isin(g)].mean(axis=0))
con = pd.concat(con, axis=1).transpose()
con.index = genes
targets = X[X.index.isin(genes)]
targets = targets.reindex(genes)
scores = (targets-con).mean(axis=0)
if file_path:
scores.to_csv(file_path)
return scores
def _get_assigned_bins(data_avg: np.ndarray,
cluster_len: int,
n_bins: int) -> np.ndarray:
assigned_bin = np.zeros(shape=(cluster_len, ), dtype=np.int32) # (G,)
bin_size = cluster_len / n_bins
for i_bin in range(n_bins):
assigned_bin[(assigned_bin == 0) &
(data_avg <= data_avg[int(np.round(bin_size * i_bin))])] = i_bin
return assigned_bin
def _get_ctrl_use(assigned_bin: np.ndarray,
gene_arr,
target_dict,
n_ctrl,
random_state) -> List[str]:
selected_bins = list(set(assigned_bin[np.in1d(gene_arr, target_dict["Pos"])]))
genes_in_same_bin = gene_arr[np.in1d(assigned_bin, selected_bins)]
ctrl_use = list()
for _ in range(len(target_dict["Pos"])):
ctrl_use.extend(random_state.choice(genes_in_same_bin, n_ctrl))
return list(set(ctrl_use))
def cell_cycle_score(X,
gene_list: List[str],
sample_list: List[str],
target_dict: Optional[Dict[str, List[str]]] = None,
n_bins: int = 25,
n_ctrl: int = 50,
random_state: int = 42,
file_path: Optional[Path] = None):
random_state = np.random.default_rng(random_state)
if target_dict is None:
target_dict = {"Pos": DEFAULT_POS,
"Neg": DEFAULT_NEG}
else:
target_dict = {k: [i.upper() for i in v] for k, v in target_dict.items()}
if len(set(gene_list) & set(target_dict["Pos"])) == 0:
raise ValueError('No feature genes found in gene_list.')
gene_list = [i.upper() for i in gene_list]
cluster_len = X.shape[0]
data_avg = X.mean(axis=1)
sort_arg = np.argsort(data_avg)
data_avg = data_avg[sort_arg]
gene_list = np.array(gene_list)[sort_arg]
X = X[sort_arg, :]
assigned_bin = _get_assigned_bins(data_avg, cluster_len, n_bins)
used_ctrl = _get_ctrl_use(assigned_bin, gene_list, target_dict,
n_ctrl, random_state)
ctrl_score = X[np.in1d(gene_list, used_ctrl), :].mean(axis=0).T
features_score = X[np.in1d(gene_list, target_dict["Pos"]), :].mean(axis=0).T
scores = features_score - ctrl_score
if file_path:
pd.DataFrame({"score": scores}, index=sample_list).to_csv(file_path)
return scores
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--random_state",
help="random seed", default=42, type=int)
parser.add_argument("-o", "--output_path",
help="output directory, it will be automatically and recursively created",
default=".", type=str)
parser.add_argument("-g", "--genes",
help="number of the genes in the test data",
default=1000, type=int)
parser.add_argument("-s", "--samples",
help="number of the samples (cells/observations) in the test data",
default=100, type=int)
parser.add_argument("-b", "--bins",
help="number of bins",
default=25, type=int)
parser.add_argument("-c", "--ctrls",
help="number of controls",
default=50, type=int)
args = parser.parse_args()
output_dir = Path(args.output_path)
output_dir.mkdir(parents=True, exist_ok=True)
data_obj = TestDataGenerator(n_genes=args.genes,
n_samples=args.samples,
n_bins=args.bins,
n_ctrl=args.ctrls,
random_state=args.random_state)
data_obj.save_data(output_dir / Path("test_data.csv"), use_normalized=True)
np_data = data_obj.get_data("numpy", True)
np_data["file_path"] = output_dir / Path("cell_scores.csv")
pd_data = data_obj.get_data("pandas", True)
pd_data["file_path"] = output_dir / Path("adobo_cell_scores.csv")
cell_cycle_score(**np_data)
score_genes(**(data_obj.get_data("ann_data", True))).write_csvs(output_dir / Path("scanpy_result"))
adobo_score(**pd_data)
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/cell_cycle/scoring.py
| 0.724773 | 0.371678 |
scoring.py
|
pypi
|
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, Isomap, MDS, SpectralEmbedding, LocallyLinearEmbedding
import umap
from sklearn.preprocessing import StandardScaler
import pandas as pd
from enum import Enum
__all__ = ["prepare_PCA_dfs", "prepare_embedding_dfs"]
class Reducer(Enum):
TSNE = "TSNE"
Isomap = "Isomap"
MDS = "MDS"
SpectralEmbedding = "SpectralEmbedding"
LocallyLinearEmbedding = "LocallyLinearEmbedding"
UMAP = "UMAP"
REDUCER_DICT = {Reducer.TSNE: TSNE,
Reducer.MDS: MDS,
Reducer.Isomap: Isomap,
Reducer.LocallyLinearEmbedding: LocallyLinearEmbedding,
Reducer.SpectralEmbedding: SpectralEmbedding,
Reducer.UMAP: umap.UMAP}
def prepare_PCA_dfs(feature_df,
transform_func=None,
n_components=None,
standardize=True):
if transform_func is not None:
x = transform_func(feature_df)
else:
x = feature_df
x = StandardScaler().fit_transform(x.values.T) if standardize else x.values.T
pca = PCA(n_components=n_components)
if not n_components:
n_components = min(x.shape[0], x.shape[1])
principal_components = pca.fit_transform(x)
final_df = pd.DataFrame(data=principal_components,
columns=[f'PC {num + 1}' for num in range(principal_components.shape[1])],
index=feature_df.columns)
exp_var_df = pd.DataFrame(data=pca.explained_variance_ratio_,
index=[f'PC {num + 1}' for num in range(n_components)])
component_df = pd.DataFrame(data=pca.components_.T,
columns=[f'PC {num + 1}' for num in range(n_components)],
index=feature_df.index)
return final_df, exp_var_df, component_df
def prepare_embedding_dfs(feature_df,
transform_func=None,
n_components=2,
reducer="TSNE",
standardize=True, **kwargs):
if transform_func:
x = transform_func(feature_df.values)
else:
x = feature_df.values
if isinstance(reducer, str):
reducer = Reducer(reducer)
sample_names = feature_df.columns.to_list()
x = StandardScaler().fit_transform(x.T) if standardize else x.values.T
X_embedded = REDUCER_DICT[reducer](n_components=n_components, **kwargs).fit_transform(x)
df = pd.DataFrame(X_embedded,
columns=["{reducer} {i}".format(reducer=reducer.value, i=i) for i in range(1, n_components + 1)],
index=sample_names)
return df
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/plotting/_dim_reduction.py
| 0.803019 | 0.446495 |
_dim_reduction.py
|
pypi
|
from typing import Tuple, Optional
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import networkx as nx
from scipy.stats import chi2
from scTenifold.plotting._dim_reduction import *
def plot_network_graph(network: np.ndarray,
weight_thres=0.1,
con_thres=0) -> None:
"""
Plot graph of a PCnet
Parameters
----------
network: np.ndarray
A pc net
weight_thres: float
Minimum threshold of the pcnet's weights
con_thres: float or int
Minimum threshold of sum of weights
Returns
-------
None
"""
network = abs(network.copy())
network[network < weight_thres] = 0
valid_rows, valid_cols = (network.sum(axis=1) > con_thres), (network.sum(axis=0) > con_thres)
network = network[valid_rows,:][:, valid_cols]
G = nx.from_numpy_array(network)
pos = nx.kamada_kawai_layout(G)
fig, ax = plt.subplots(figsize=(8, 8))
nx.draw_networkx_edges(G, pos,
ax=ax, nodelist=[0], alpha=0.4)
nx.draw_networkx_nodes(G, pos,
ax=ax,
node_size=10,
cmap=plt.cm.Reds_r)
plt.show()
def plot_network_heatmap(network: np.ndarray,
figsize=(12, 12)) -> None:
"""
Plot a heatmap of a PC network
Parameters
----------
network: np.ndarray
A pcnet
figsize: tuple of ints
output figure size
Returns
-------
None
"""
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(network, center=0.0, ax=ax)
def plot_qqplot(df,
exp_col="FC",
stat_col="adjusted p-value",
plot_qqline: bool = True,
sig_threshold: float = 0.1) -> None:
"""
Plot QQ-plot using a d_regulation dataframe
Parameters
----------
df: pd.DataFrame
A d_regulation dataframe
exp_col: str
Column name of data used to put the y-axis
stat_col: str
Column name of data used to check significance
plot_qqline: bool
Plot Q-Q line on the plot
sig_threshold: float
The significance
Returns
-------
None
"""
the_col = "Theoretical quantiles"
len_x = df.shape[0]
data = df.loc[:, [exp_col, stat_col]]
data["significant"] = data[stat_col].apply(lambda x: x < sig_threshold)
data.sort_values(exp_col, inplace=True)
data[the_col] = chi2.ppf(q=np.linspace(0, 1, len_x + 2)[1:-1], df=1)
sns.scatterplot(data=data, x="Theoretical quantiles", y=exp_col, hue="significant")
if plot_qqline:
xl_1, xl_2 = plt.gca().get_xlim()
x1, x2 = data[the_col].quantile(0.25), data[the_col].quantile(0.75)
y1, y2 = data[exp_col].quantile(0.25), data[exp_col].quantile(0.75)
slope = (y2 - y1) / (x2 - x1)
intercept = y1 - slope * x1
plt.plot([xl_1, xl_2],
[slope * xl_1 + intercept, slope * xl_2 + intercept])
plt.xlim([xl_1, xl_2])
plt.show()
def plot_embedding(df,
groups: dict,
method: str = "UMAP",
plot_2D: bool = True,
figsize: tuple = (8, 8),
size: int = 10,
title: str = None,
palette: str = "muted",
**kwargs):
"""
Do dimension reduction and plot the embeddings onto a 2D plot
Parameters
----------
df: pd.DataFrame
A dataframe to perform dimension reduction
groups: dict(str, list)
A dict indicating the groups
method: str
The name of used method, could be: PCA, TSNE, UMAP, Isomap, MDS, SpectralEmbedding, LocallyLinearEmbedding
plot_2D: bool
Draw a 2D or 3D (if false) plot
figsize: tuple of int
The figure size of the plot: (width, height)
title: str
The subplot's title
palette: str
The name of used seaborn color palette,
reference: https://seaborn.pydata.org/generated/seaborn.color_palette.html
kwargs: keyword arguments of doing dimension reduction
Returns
-------
None
"""
if method == "PCA":
feature_df, exp_var_df, component_df = prepare_PCA_dfs(df, **kwargs)
emb_name = "PC"
else:
feature_df = prepare_embedding_dfs(df, reducer=method, **kwargs)
emb_name = method
if groups is None:
groups = {"all": df.columns.to_list()}
colors = sns.color_palette(palette)
if plot_2D:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection="3d")
for i, (group_name, sample_names) in enumerate(groups.items()):
em1, em2 = np.array([feature_df.loc[name, '{} 1'.format(emb_name)] for name in sample_names]), \
np.array([feature_df.loc[name, '{} 2'.format(emb_name)] for name in sample_names])
if plot_2D:
ax.scatter(em1, em2, s=size, label=group_name, c=[colors[i]])
else:
em3 = np.array([feature_df.loc[name, '{} 3'.format(emb_name)] for name in sample_names])
ax.scatter(em1, em2, em3, s=size, label=group_name, c=[colors[i]])
x_label = '{} 1'.format(emb_name)
y_label = '{} 2'.format(emb_name)
z_label = None if plot_2D else '{} 3'.format(emb_name)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
if z_label is not None:
ax.set_zlabel(z_label)
if title is not None:
ax.set_title(title)
ax.legend()
ax.grid()
plt.tight_layout()
plt.show()
def plot_hist(df_1,
df_1_name: str,
df_2: Optional[pd.DataFrame] = None,
df_2_name: Optional[str] = None,
sum_axis: int = 0,
label: str = "Sample",
figsize: Tuple[int, int] = (10, 8)):
"""
Parameters
----------
df_1
df_1_name
df_2
df_2_name
sum_axis
label
figsize
Returns
-------
"""
fig, ax = plt.subplots(figsize=figsize)
df_1 = df_1.copy()
df_2 = df_2.copy() if df_2 is not None else None
if sum_axis == 0:
df_1 = df_1.T
df_2 = df_2.T if df_2 is not None else None
elif sum_axis != 1:
raise ValueError("Passed df should be a 2D df")
df_1 = df_1.sum(axis=1).to_frame()
df_2 = df_2.sum(axis=1).to_frame() if df_2 is not None else None
df_1.columns = [label]
df_1["name"] = df_1_name
if df_2 is not None:
df_2.columns = [label]
df_2["name"] = df_2_name
df_1 = pd.concat([df_1, df_2])
sns.histplot(data=df_1, x=label, hue="name", ax=ax)
else:
sns.histplot(data=df_1, x=label, ax=ax)
plt.show()
|
/scTenifoldpy-0.1.3.tar.gz/scTenifoldpy-0.1.3/scTenifold/plotting/_plotting.py
| 0.944498 | 0.646209 |
_plotting.py
|
pypi
|
from control import step_response as st, impulse_response, initial_response, forced_response
from matplotlib import pyplot as plt
def step(sys, T=None,xylim=None, X0=0.0, input=None, output=None, transpose=False, return_x=False, squeeze=True,grid=False):
"""
Step response of a linear system
If the system has multiple inputs or outputs (MIMO), one input has
to be selected for the simulation. Optionally, one output may be
selected. The parameters `input` and `output` do this. All other
inputs are set to 0, all other outputs are ignored.
For information on the **shape** of parameters `T`, `X0` and
return values `T`, `yout`, see :ref:`time-series-convention`.
Parameters
----------
sys: StateSpace, or TransferFunction
LTI system to simulate
T: array-like object, optional
Time vector (argument is autocomputed if not given)
xylim: lista de límite inferior y superior de x e y
X0: array-like or number, optional
Initial condition (default = 0)
Numbers are converted to constant arrays with the correct shape.
input: int
Index of the input that will be used in this simulation.
output: int
Index of the output that will be used in this simulation. Set to None
to not trim outputs
transpose: bool
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and scipy.signal.lsim)
return_x: bool
If True, return the state vector (default = False).
squeeze: bool, optional (default=True)
If True, remove single-dimensional entries from the shape of
the output. For single output systems, this converts the
output response to a 1D array.
grid: bool, optional (default=False)
If True, agregate a grid of the graphic
Returns
-------
T: array
Time values of the output
yout: array
Response of the system
xout: array
Individual response of each x variable
See Also
--------
forced, initial, impulse
Notes
-----
This function uses the `forced` function with the input set to a
unit step.
Examples
--------
>>> T, yout = step(sys, T, X0)
"""
yout, T = st(sys,T,X0,input,output,transpose,return_x,squeeze)
plt.plot(yout,T)
plt.title("Step response")
plt.xlabel("Time (seconds)")
plt.ylabel("Amplitude")
plt.axhline(T[int(2*len(T)/3):-1].mean(), color="black", linestyle="--")
if xylim != None:
plt.axis(xylim)
if grid == True: plt.grid()
plt.show()
def impulse(sys, T=None, X0=0.0, input=0, output=None, transpose=False, return_x=False, squeeze=True):
"""
Impulse response of a linear system
If the system has multiple inputs or outputs (MIMO), one input has
to be selected for the simulation. Optionally, one output may be
selected. The parameters `input` and `output` do this. All other
inputs are set to 0, all other outputs are ignored.
For information on the **shape** of parameters `T`, `X0` and
return values `T`, `yout`, see :ref:`time-series-convention`.
Parameters
----------
sys: StateSpace, TransferFunction
LTI system to simulate
T: array-like object, optional
Time vector (argument is autocomputed if not given)
X0: array-like object or number, optional
Initial condition (default = 0)
Numbers are converted to constant arrays with the correct shape.
input: int
Index of the input that will be used in this simulation.
output: int
Index of the output that will be used in this simulation. Set to None
to not trim outputs
transpose: bool
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and scipy.signal.lsim)
return_x: bool
If True, return the state vector (default = False).
squeeze: bool, optional (default=True)
If True, remove single-dimensional entries from the shape of
the output. For single output systems, this converts the
output response to a 1D array.
Returns
-------
T: array
Time values of the output
yout: array
Response of the system
xout: array
Individual response of each x variable
See Also
--------
forced, initial, step
Notes
-----
This function uses the `forced` function to compute the time
response. For continuous time systems, the initial condition is altered to
account for the initial impulse.
Examples
--------
>>> T, yout = impulse(sys, T, X0)
"""
yout, T = impulse_response(sys,T,X0,input,output,transpose,return_x,squeeze)
plt.plot(yout,T)
plt.title("Impulse response")
plt.xlabel("Time (seconds)")
plt.ylabel("Response")
plt.show()
def initial(sys, T=None, X0=0.0, input=0, output=None, transpose=False, return_x=False, squeeze=True):
"""
Initial condition response of a linear system
If the system has multiple outputs (MIMO), optionally, one output
may be selected. If no selection is made for the output, all
outputs are given.
For information on the **shape** of parameters `T`, `X0` and
return values `T`, `yout`, see :ref:`time-series-convention`.
Parameters
----------
sys: StateSpace, or TransferFunction
LTI system to simulate
T: array-like object, optional
Time vector (argument is autocomputed if not given)
X0: array-like object or number, optional
Initial condition (default = 0)
Numbers are converted to constant arrays with the correct shape.
input: int
Ignored, has no meaning in initial condition calculation. Parameter
ensures compatibility with step_response and impulse_response
output: int
Index of the output that will be used in this simulation. Set to None
to not trim outputs
transpose: bool
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and scipy.signal.lsim)
return_x: bool
If True, return the state vector (default = False).
squeeze: bool, optional (default=True)
If True, remove single-dimensional entries from the shape of
the output. For single output systems, this converts the
output response to a 1D array.
Returns
-------
T: array
Time values of the output
yout: array
Response of the system
xout: array
Individual response of each x variable
See Also
--------
forced, impulse, step
Notes
-----
This function uses the `forced` function with the input set to
zero.
Examples
--------
>>> T, yout = initial(sys, T, X0)
"""
yout, T = initial_response(sys,T,X0,input,output,transpose, return_x, squeeze)
plt.plot(yout,T)
plt.title("Response to Initial Conditions")
plt.xlabel("Time (seconds)")
plt.ylabel("Amplitude")
plt.show()
def forced(sys, T=None, U=0.0, X0=0.0, transpose=False, interpolate=False, squeeze=True):
"""
Simulate the output of a linear system.
As a convenience for parameters `U`, `X0`:
Numbers (scalars) are converted to constant arrays with the correct shape.
The correct shape is inferred from arguments `sys` and `T`.
For information on the **shape** of parameters `U`, `T`, `X0` and
return values `T`, `yout`, `xout`, see :ref:`time-series-convention`.
Parameters
----------
sys: LTI (StateSpace, or TransferFunction)
LTI system to simulate
T: array-like, optional for discrete LTI `sys`
Time steps at which the input is defined; values must be evenly spaced.
U: array-like or number, optional
Input array giving input at each time `T` (default = 0).
If `U` is ``None`` or ``0``, a special algorithm is used. This special
algorithm is faster than the general algorithm, which is used
otherwise.
X0: array-like or number, optional
Initial condition (default = 0).
transpose: bool, optional (default=False)
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and scipy.signal.lsim)
interpolate: bool, optional (default=False)
If True and system is a discrete time system, the input will
be interpolated between the given time steps and the output
will be given at system sampling rate. Otherwise, only return
the output at the times given in `T`. No effect on continuous
time simulations (default = False).
squeeze: bool, optional (default=True)
If True, remove single-dimensional entries from the shape of
the output. For single output systems, this converts the
output response to a 1D array.
Returns
-------
T: array
Time values of the output.
yout: array
Response of the system.
xout: array
Time evolution of the state vector.
See Also
--------
step, initial, impulse
Notes
-----
For discrete time systems, the input/output response is computed using the
:scipy-signal:ref:`scipy.signal.dlsim` function.
For continuous time systems, the output is computed using the matrix
exponential `exp(A t)` and assuming linear interpolation of the inputs
between time points.
Examples
--------
>>> T, yout, xout = forced(sys, T, u, X0)
See :ref:`time-series-convention`.
"""
yout, T,xout = forced_response(sys, T, U, X0, transpose, interpolate, squeeze)
plt.plot(yout,T)
plt.title("Response to Forced Conditions")
plt.xlabel("Time (seconds)")
plt.ylabel("Amplitude")
plt.show()
|
/sca-fiuna-0.2.1.1.tar.gz/sca-fiuna-0.2.1.1/sca/graph.py
| 0.912231 | 0.798776 |
graph.py
|
pypi
|
import cmath
from control.matlab import TransferFunction
from control.xferfcn import tf
def phase(V):
"""
function phase:
receive one or vector of complex numbers and return the vector of phase
angle respect the origin on radian
num: complex single or vector of complex
def: single number or vector of phases
"""
if not type(V) == type([]): V = [V]
fases = [cmath.phase(Poriginal) for Poriginal in V]
if len(fases) == 1: return fases[0]
return fases
def phased(V):
"""
function phased:
receive one or vector of complex numbers and return the vector of phase
angle respect the origin on deg
num: complex single or vector of complex
def: single number or vector of phases
"""
fases = phase(V)
if not type(fases) == type([]): fases = [fases]
fases = [fase*180/cmath.pi for fase in fases]
if len(fases) == 1: return fases[0]
return fases
def evals(G, Pole):
"""
function evals:
Receive a TransferFunction and one complex number s and evaluate in
G: TransferFunction
Pole: complex number
return the complex number of result
"""
return G.horner(Pole)[0][0]
def zpk(zeros, poles, gain):
"""
zero pole gain function
Create a Transfer Function with the zero, pole and gain
Inputs:
zeros: zero list of the system
poles: pole list of the system
gain: gain of the system
Output:
G: the transfer function
Example:
G = zpk([0],[3,2],10)
10*s
G = -----------
s^2-5*s+6
"""
s = tf("s")
G = 1
for i in zeros:
z = s-i
G = G*z
for i in poles:
p = s-i
G = G/p
return G*gain
def canon_controllable(sys):
"""
Función canon_controllable(sys):
input:
sys: state space system
output:
zsys: state space system
T: matrix of transformation
Recibe un sistema de tipo state space y devuelve otro sistema del mismo tipo
en su forma canónica controlable usando la matriz de transformación T
A' = inv(T)*A*T
B' = inv(T)*B
C' = C*T
D' = D
Donde:
T = M*W
M = [B | A*B | A^2*B | ... | A^(n-1)*B | A^n*B]
W = [[a_(n-1), a_(n-2), ... a_1, 1]
[a_(n-2), a_(n-3), ... 1, 0]
[a_(n-3), ... , 1 , 0, 0]
.
.
.
[1, 0, 0, ... 0, 0, 0]]
"""
A = sys.A
B = sys.B
n = len(A)
#Calcular la Matriz Mister
M = eye(n)
M[:,0] = B.T[0]
#Calcular la matriz Wilson
pol_car = poly(A).tolist()
W = flip(eye(n),-1)
aux = eye(n)
pol_car.pop(0)
pol_car = flip(pol_car, -1).tolist()
for i in range(n-1):
aux = aux*A
aux2 = aux*B
M[:,i+1] = aux2.T[0]
pol_car.pop(0)
m = len(pol_car)
W[i,0:m] = pol_car
T = matrix(M)*matrix(W)
A = inv(T)*A*T
B = inv(T)*B
C = sys.C*T
D = sys.D
return ss(A,B,C,D), T
|
/sca-fiuna-0.2.1.1.tar.gz/sca-fiuna-0.2.1.1/sca/funclib.py
| 0.558568 | 0.850033 |
funclib.py
|
pypi
|
import spidev
from utils.constant import Constant
_STANDARD_GRAVITY = 9.80665 # m/s^2
# 32 bit SPI commands to interact with device
_READ_ACC_X = Constant(0x040000F7)
_READ_ACC_Y = Constant(0x080000FD)
_READ_ACC_Z = Constant(0x0C0000FB)
_SW_RESET = Constant(0xB4002098)
_WHO_AM_I = Constant(0x40000091)
_SELF_TEST = Constant(0x100000E9)
_SUMMARY = Constant(0x180000E5)
_MODE_1 = Constant(0xB400001F)
_MODE_2 = Constant(0xB4000102)
_MODE_3 = Constant(0xB4000225)
_MODE_4 = Constant(0xB4000338)
_MODE_1_SENSITIVITY = Constant(2700)
_MODE_2_SENSITIVITY = Constant(1350)
_MODE_3_SENSITIVITY = Constant(5400)
class Modes:
"""
There are 4 different modes i.e
`Mode 1,
Mode 2,
Mode 3,
and Mode 4`
"""
MODE_1 = Constant(0b00)
MODE_2 = Constant(0b01)
MODE_3 = Constant(0b10)
MODE_4 = Constant(0b11)
class SCA3300:
"""
SCA300 driver class.
:param max_speed: SPI maximum frequency rate
:param bus: On which bus accelerometer connected?
:param device: Which device is accelerometer?
"""
def __init__(self, max_speed: int = 7629, bus: int = 0, device: int = 0) -> None:
self._spi = spidev.SpiDev()
self._spi.open(bus, device)
self._spi.max_speed_hz = max_speed
self._current_mode = _MODE_3
# Only mode 0 is supported!
self._spi.mode = 0
self._spi.xfer2(_MODE_3.value.to_bytes(length=4, byteorder='big'))
@property
def mode(self) -> Modes:
if self._current_mode is _MODE_1:
return Modes.MODE_1
elif self._current_mode is _MODE_2:
return Modes.MODE_2
elif self._current_mode is _MODE_3:
return Modes.MODE_3
else:
return Modes.MODE_4
@mode.setter
def mode(self, mode: Modes) -> None:
"""
Set sensor's working mode i.e max gravity
:param mode:
"""
if mode is Modes.MODE_2:
self._current_mode = _MODE_2
elif mode is Modes.MODE_1:
self._current_mode = _MODE_1
elif mode is Modes.MODE_3:
self._current_mode = _MODE_3
else:
self._current_mode = _MODE_4
self._spi.xfer2(self._current_mode.value.to_bytes(length=4, byteorder='big'))
@property
def acceleration(self) -> tuple:
"""
Here we return X, Y and Z data.
It sends current request's response in next SPI frame. Therefore, the order
is different.
:return: float list as three axis data
"""
result_x = self._spi.xfer2(_READ_ACC_Y.value.to_bytes(length=4, byteorder='big'))
x = self._convert_acceleration(result_x[1], result_x[2]) * _STANDARD_GRAVITY
result_y = self._spi.xfer2(_READ_ACC_Z.value.to_bytes(length=4, byteorder='big'))
y = self._convert_acceleration(result_y[1], result_y[2]) * _STANDARD_GRAVITY
result_z = self._spi.xfer2(_READ_ACC_X.value.to_bytes(length=4, byteorder='big'))
z = self._convert_acceleration(result_z[1], result_z[2]) * _STANDARD_GRAVITY
return x, y, z
def _convert_acceleration(self, first_byte: str, second_byte: str) -> float:
overall = hex(int(first_byte) << 8 | int(second_byte))
signed_value = self.to_signed(int(overall, 16))
if self._current_mode is _MODE_1:
return signed_value / _MODE_1_SENSITIVITY.value
elif self._current_mode is _MODE_2:
return signed_value / _MODE_2_SENSITIVITY.value
else:
return signed_value / _MODE_3_SENSITIVITY.value
@staticmethod
def to_signed(value: int) -> int:
return -(value & 0x8000) | (value & 0x7fff)
|
/sca3300-1.0.1-py3-none-any.whl/sca3300.py
| 0.706798 | 0.417568 |
sca3300.py
|
pypi
|
import enum
import re
# status modes
MODE_SUCCESS = 0x0
MODE_FAILURE = 0x1
MODE_PENDING = 0x2
# status domain separators
DOMAIN_GENERIC = 0x0
DOMAIN_BE = 0x1
DOMAIN_FE = 0x2
class MWStatus(enum.IntEnum):
def encode(mode, domain, value):
return ((mode & 0x3) << 30) | ((domain & 0x3) << 28) | ((value & 0xFFFF) << 0)
@staticmethod
def build(value):
try: # B10 Integer
return MWStatus(int(value))
except ValueError: # B16 Integer
return MWStatus(int(value, 16))
@staticmethod
def dict(status: enum.IntEnum, data=None) -> dict:
if data is None:
return {
'status': status.hex()
}
data['status'] = status.hex()
return data
# status values: generic
SUCCESS = encode(MODE_SUCCESS, DOMAIN_GENERIC, 0x0000)
# status values: generic, job related
PENDING_JOB_EXECUTION = encode(MODE_PENDING, DOMAIN_GENERIC, 0x0100)
PENDING_JOB_COMPLETION = encode(MODE_PENDING, DOMAIN_GENERIC, 0x0101)
# status values: back-end
FAILURE_BE_JOB_PROLOGUE = encode(MODE_FAILURE, DOMAIN_BE, 0x0000)
FAILURE_BE_JOB_PROCESS = encode(MODE_FAILURE, DOMAIN_BE, 0x0001)
FAILURE_BE_JOB_EPILOGUE = encode(MODE_FAILURE, DOMAIN_BE, 0x0002)
# status values: front-end, job/contest-related
FAILURE_FE_JOB_NOT_FOUND = encode(MODE_FAILURE, DOMAIN_FE, 0x0000)
FAILURE_FE_JOB_INVALID = encode(MODE_FAILURE, DOMAIN_FE, 0x0001)
FAILURE_FE_CONTEST_NOT_FOUND = encode(MODE_FAILURE, DOMAIN_FE, 0x0002)
# status values: front-end, API-related
FAILURE_FE_API_QUEUE_EMPTY = encode(MODE_FAILURE, DOMAIN_FE, 0x0100)
FAILURE_FE_API_QUEUE_FULL = encode(MODE_FAILURE, DOMAIN_FE, 0x0101)
FAILURE_FE_API_INVALID_TOKEN = encode(MODE_FAILURE, DOMAIN_FE, 0x0102)
FAILURE_FE_API_SCHEMA_MISMATCH = encode(MODE_FAILURE, DOMAIN_FE, 0x0103)
# status values: front-end, User-related
FAILURE_FE_USER_NOT_FOUND = encode(MODE_FAILURE, DOMAIN_FE, 0x0200)
FAILURE_FE_USER_NO_CREDITS = encode(MODE_FAILURE, DOMAIN_FE, 0x0201)
FAILURE_FE_USER_NOT_AUTHORIZED = encode(MODE_FAILURE, DOMAIN_FE, 0x0202)
# status values: front-end, GitHub-related
FAILURE_FE_GITHUB_UNACTIONABLE_EVENT = encode(MODE_FAILURE, DOMAIN_FE, 0x0300)
FAILURE_FE_GITHUB_NO_CONFIG_FILE = encode(MODE_FAILURE, DOMAIN_FE, 0x0301)
FAILURE_FE_GITHUB_INSTALLATION_TOKEN = encode(MODE_FAILURE, DOMAIN_FE, 0x0302)
def is_success(self):
return ((self.value >> 30) & 0x3) == MODE_SUCCESS
def is_failure(self):
return ((self.value >> 30) & 0x3) == MODE_FAILURE
def is_pending(self):
return ((self.value >> 30) & 0x3) == MODE_PENDING
def describe(self):
if (self.value == self.SUCCESS):
t = 'success'
elif (self.value == self.PENDING_JOB_EXECUTION):
t = 'Job pending execution'
elif (self.value == self.PENDING_JOB_COMPLETION):
t = 'Job pending completion'
elif (self.value == self.FAILURE_BE_JOB_PROLOGUE):
t = 'Job failed during processing prologue (before processing, e.g. allocation of resources)'
elif (self.value == self.FAILURE_BE_JOB_PROCESS):
t = 'Job failed during processing'
elif (self.value == self.FAILURE_BE_JOB_EPILOGUE):
t = 'Job failed during processing epilogue (after processing, e.g. deallocation of resources)'
elif (self.value == self.FAILURE_FE_JOB_UNKNOWN):
t = 'Job suffered an unknown failure on the frontend'
elif (self.value == self.FAILURE_FE_JOB_INVALID):
t = 'Job is invalid'
elif (self.value == self.FAILURE_FE_API_QUEUE_EMPTY):
t = 'The queue is empty and the job cannot be retrieved'
elif (self.value == self.FAILURE_FE_API_QUEUE_FULL):
t = 'The queue is full and the job cannot be submitted'
elif (self.value == self.FAILURE_FE_AWS_AUTH):
t = 'AWS authentication failed'
elif (self.value == self.FAILURE_FE_AWS_URL):
t = 'AWS storage URL generation failed'
return re.sub(r'\s\s+', ' ', t)
def hex(self):
return "0x{0:08X}".format(self.value)
def __repr__(self):
return (self.name) + ' : ' + ('<' + '{0:08X}'.format(self.value) + '>')
|
/sca3s_cli-1.0.6-py3-none-any.whl/sca3s_cli/classes/middleware_status.py
| 0.52902 | 0.21686 |
middleware_status.py
|
pypi
|
import multiprocessing
import os
from collections import defaultdict
from pathlib import Path
import colorama
import h5py
import numpy as np
from tensorflow.keras.utils import to_categorical # nopep8 pylint: disable=import-error
from tqdm import tqdm
from termcolor import cprint
from .aes import AES
colorama.init()
NUM_AVAILABLE_TRACES = 4
ATTACK_POINTS = ['key', 'sub_bytes_in', 'sub_bytes_out']
def load_shards(shards, num_stacked_traces, attack_point, target_byte,
data_type, lstm_rows):
"Load a collection of shards"
num_shards = len(shards)
pb = tqdm(total=num_shards,
desc='loading %s shards' % data_type,
unit='shard')
data = defaultdict(list)
data['metadata'] = defaultdict(list)
for path in shards:
params = [
path, num_stacked_traces, attack_point, target_byte, lstm_rows
] # nopep8
# loading data
shard_data = load_shard(params)
# accumulating
for k in ['x', 'y']:
data[k].extend(shard_data[k])
for k, v in shard_data['metadata'].items():
data['metadata'][k].extend(v)
pb.update()
pb.close()
# swapping x axis
# (examples, traces, trace_data, 1) > (traces, examples, trace_data, 1)\
data['x'] = np.array(data['x'])
data['x'] = np.swapaxes(data['x'], 0, 1)
data['metadata']['ct'] = np.array(data['metadata']['ct'])
data['metadata']['pt'] = np.array(data['metadata']['pt'])
data['metadata']['key'] = np.array(data['metadata']['key'])
return data
def load_shard(params):
shard_path, num_stacked_traces, attack_point, target_byte, lstm_rows = params
h5_file = h5py.File(shard_path)
data = {}
round_idx = '1'
# X
# Input shape is: [num_traces, trace_value]
# Output shape is: [num_key_pt, num_traces, trace_values]
x = h5_file.get('x')
num_pairs = (x.shape[0] // NUM_AVAILABLE_TRACES)
# Reshape
if lstm_rows:
row_size = int(lstm_rows / num_pairs)
x = np.reshape(x,
(num_pairs, NUM_AVAILABLE_TRACES, lstm_rows, row_size))
else:
# CNN
x = np.reshape(x, [num_pairs, NUM_AVAILABLE_TRACES, -1])
x = np.expand_dims(x, 3) # CNN 1d
# efficient way to clip X to n_traces
data['x'] = x[:, :num_stacked_traces, :, :]
# Y
# The key is in the 'metadata' dataset, not the precomputed
# intermediates in 'y' dataset
# intemediate need to be remapped to match byte_idx
t = h5_file.get('y')
# find the right value
if attack_point == 'key':
values = h5_file['metadata']['key'][..., target_byte]
else:
dataset_byte_idx = str(AES.intermediate_map[target_byte])
values = list(t[round_idx][attack_point][dataset_byte_idx])
# recall there are NUM_AVAILABLE_TRACES per example so we need to 'jump'
# some values as they are just duplicates.
data['y'] = []
for trace_idx in range(x.shape[0]):
position = trace_idx * NUM_AVAILABLE_TRACES
categorical_value = to_categorical(values[position], 256)
data['y'].append(categorical_value)
# metadata
mt = h5_file.get('metadata')
data['key'] = mt[0][1]
metadata = {"ct": [], "pt": [], "key": []}
for idx in range(x.shape[0]):
metadata['pt'].append(mt[idx * NUM_AVAILABLE_TRACES][0])
metadata['ct'].append(mt[idx * NUM_AVAILABLE_TRACES][2])
key = data['key']
array_with_single_key = np.expand_dims(key, axis=0)
repeated_key = np.repeat(array_with_single_key, len(data['x']), axis=0)
metadata['key'].extend(repeated_key)
data['metadata'] = metadata
return data
def load_data(dataset_name,
data_type='train',
num_shards=500,
num_validation_shards=0,
target_byte=0,
attack_point='key',
num_stacked_traces=1,
lstm_rows=False):
"""Load the needed data to train or evaluate a given SCAAML attack
Args:
dataset_name (str): dataset to use
data_type (str, optional): type of data to load. Either 'train' or
'holdout'. Defaults to 'train'.
num_shards (int, optional): number of shard to load -- each shard hold
100 traces in train mode. 1000 in holdout.
num_validation_shards (int, optional): number of validation shard to
load -- each shard hold 100 traces.
target_byte (int, optional): byte to attack. Defaults to 0.
attack_point (str, optional): which attack point to target. Available
points are: {'key', 'sub_byte_in', 'sub_byte_out'}. Defaults to 'key'.
num_stacked_traces (int, optional): how many stacked trace to use for
each example. Max is 4. Paper use 3. Defaults to 1.
lsmt_rows (int, optional): num of LSTM cells. Defaults to 0. If 0 then
data is returned in CNN version
Returns
list: x, y
"""
# basic checks
assert data_type in ['train', 'holdout']
if data_type == 'holdout':
cprint('[Warning] NEVER USE HOLDOUT FOR TRAINING', 'yellow')
if data_type == 'holdout' and num_validation_shards:
cprint(
"[Error] holdout is for attack testing not training\
-- validation_shards are meaningless in this setting", 'red')
quit()
assert attack_point in ATTACK_POINTS
dataset_path = '%s/dataset/%s/' % (dataset_name, data_type)
if not os.path.exists(dataset_path):
cprint("[Error] %s path not found -- dataset downloaded?" %
dataset_path, 'red') # nopep8
quit()
cprint("[Loading %s data from: %s]" % (data_type, dataset_name), 'blue')
shards = list(Path(dataset_path).glob('*.h5'))
# shuffle shard
np.random.shuffle(shards)
available_shards = len(shards)
cprint('|- %d available shards' % available_shards, 'green')
# training shards
num_shards = min(available_shards, num_shards)
shards_to_load = shards[:num_shards]
data = load_shards(shards_to_load, num_stacked_traces, attack_point,
target_byte, data_type, lstm_rows)
results = [np.array(data['x']), np.array(data['y']), data['metadata']]
if num_validation_shards:
shards_to_load = shards[num_shards:num_shards + num_validation_shards]
data = load_shards(shards_to_load, num_stacked_traces, attack_point,
target_byte, 'validation', lstm_rows)
results.extend(
[np.array(data['x']),
np.array(data['y']), data['metadata']])
# casting and returning
return results
def load_attack_validation_data(dataset_name,
num_shards=500,
target_byte=0,
attack_point='key',
num_stacked_traces=1,
lstm_rows=False,
workers=0):
"""Load the needed data to train or evaluate a given SCAAML attack
Args:
dataset_name (str): dataset to use
num_shards (int, optional): number of shard to load -- each shard hold
100 traces in train mode. 1000 in holdout.
target_byte (int, optional): byte to attack. Defaults to 0.
attack_point (str, optional): which attack point to target. Available
points are: {'key', 'sub_byte_in', 'sub_byte_out'}. Defaults to 'key'.
num_stacked_traces (int, optional): how many stacked trace to use for
each example. Max is 4. Paper use 3. Defaults to 1.
lsmt_rows (int, optional): num of LSTM cells. Defaults to 0. If 0 then
data is returned in CNN version
Returns
list
"""
data_type = 'holdout'
dataset_path = '%s/dataset/%s/' % (dataset_name, data_type)
# basic sanity checks.
assert attack_point in ATTACK_POINTS
if not os.path.exists(dataset_path):
raise ValueError("Path not found '%s' -- is the dataset downloaded?" %
dataset_path)
cprint("[Loading %s data from: %s]" % (data_type, dataset_name), 'blue')
shards = list(Path(dataset_path).glob('*.h5'))
available_shards = len(shards)
num_shards = min(available_shards, num_shards)
shards_to_load = shards[:num_shards]
cprint('|- %d available shards' % available_shards, 'green')
cprint('|- %d shards to load' % num_shards, 'green')
output_shards = []
params = []
for shard in shards_to_load:
params.append(
(shard, num_stacked_traces, attack_point, target_byte, lstm_rows))
if workers:
pool = multiprocessing.Pool()
for shard in tqdm(pool.imap_unordered(load_shard, params),
desc="Loading",
unit=" Shard",
total=len(params)):
output_shards.append(shard)
else:
for shard_params in tqdm(params, desc="Loading", unit=" Shard"):
output_shards.append(load_shard(shard_params))
return output_shards
|
/aes/data.py
| 0.502686 | 0.247407 |
data.py
|
pypi
|
import os
from collections import defaultdict, namedtuple
from multiprocessing import Pool
from pathlib import Path
import colorama
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import to_categorical # nopep8 pylint: disable=import-error
from termcolor import cprint
from .aes import AES
try:
class_name = get_ipython().__class__.__name__
if "Terminal" in class_name:
IS_NOTEBOOK = False
else:
IS_NOTEBOOK = True
except NameError:
IS_NOTEBOOK = False
if IS_NOTEBOOK:
from tqdm import tqdm_notebook as tqdm
from IPython.display import HTML
else:
from tqdm import tqdm
colorama.init()
# Create a named tuple for shard data, for ease of passing the data around.
#Shard = namedtuple("Shard", ["x", "y", "key", "ct", "pt"])
class Shard(object):
def __init__(self, x, y, key, ct, pt, predictions=None):
self.x = x
self.y = y
self.key = key
self.ct = ct
self.pt = pt
self.predictions = predictions
self.__iterindex = 0
def __len__(self):
return len(self.x)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.__iterindex
def load_shard(filename, attack_point, byte_idx):
"""Load a single shard of data from an HDF5 file.
Arguments:
filename {str} - - Filename of the HDF5 file to load.
model_type {str} - - "cnn" | "lstm"
lstm_rows {int} - - Number of "rows" of data to format the input trace into.
attack_point {str} - - "key" | "sub_bytes_in" | "sub_bytes_out" - the type
of byte we are trying to attack.
Returns:
Shard - - A Shard containing the data.
"""
with h5py.File(filename, "r") as train:
x_train = train['x'][:]
y_train = train[attack_point][:, byte_idx][:]
key = train['key'][:]
ct_train = train['ciphertext'][:]
pt_train = train['plaintext'][:]
return Shard(x=x_train, y=y_train, key=key, ct=ct_train, pt=pt_train)
def shard_to_dict(batch):
"""Given a batch of data from the holdout_batch_generator, transform it into
a simple dictionary of np.ndarrays.
Arguments:
batch {list[tuple]} -- The batch of items.
Returns:
dict[str -> np.ndarray] -- the dictionary of name to array.
"""
output = defaultdict(list)
for idx in range(len(batch)):
output['x'].append(batch.x[idx])
output['y'].append(batch.y[idx])
output['key'].append(batch.key[idx])
output['ct'].append(batch.ct[idx])
output['pt'].append(batch.pt[idx])
for k, v in output.items():
output[k] = np.array(v)
return output
def holdout_batch_generator(holdout_shard):
"""Given a holdout shard, generate a set of batches, where each batch
contains the data for a single key.
Arguments:
holdout_shard {Shard} -- Shard containing holdout data.
Returns:
generator -- Generator which produces one batch of items at a time.
"""
last = None
pb = tqdm(desc="Computing holdout batches", total=holdout_shard.x.shape[0])
shard = Shard([], [], [], [], [], [])
for x, y, key, ct, pt in zip(holdout_shard.x, holdout_shard.y,
holdout_shard.key, holdout_shard.ct,
holdout_shard.pt):
pb.update(1)
if last is None or not np.array_equal(last, key):
last = key
old_shard = shard
shard = Shard([], [], [], [], [], [])
if len(old_shard):
yield old_shard
shard.x.append(x)
shard.y.append(y)
shard.key.append(key)
shard.ct.append(ct)
shard.pt.append(pt)
if len(shard):
yield shard
pb.close()
return None
|
/aes/combined_data.py
| 0.660063 | 0.23875 |
combined_data.py
|
pypi
|
import numpy as np
from termcolor import cprint
import tensorflow
from .aes import AES
from scaaml.utils import hex_display, hex_display_recovered_key
from scaaml.aes.combined_data import shard_to_dict
from tqdm import tqdm
def pred2byte(attack_point, pt, prediction_index):
"""Recover the byte value
Args:
attack_point (str): attack point target
pt (int): Plain text value
prediction_index (int): prediction index
Raises:
ValueError: if attack point is unknown
Returns:
int: byte value
"""
if attack_point == 'add_round_key_out':
byte_idx = prediction_index ^ pt
elif attack_point == 'sub_bytes_in':
byte_idx = prediction_index ^ pt
elif attack_point == 'sub_bytes_out':
byte_idx = AES.rsbox[prediction_index] ^ pt
elif attack_point == 'key':
return prediction_index
else:
raise ValueError('target point not implemented')
return byte_idx
def prediction_to_byte_array(attack_point, pt, predictions):
"""Maps model predictions to byte predictions.
Args:
predictions (np.float): Array of predicted probabilities. 1D.
pt (int): Plaintext.
attack_point (str): The attack point being targeted.
Returns:
np.array (256,): Byte predictions.
"""
byte_probabilities = np.zeros(256)
for pred_idx, pred_value in enumerate(predictions):
byte_idx = pred2byte(attack_point, pt, pred_idx)
byte_probabilities[byte_idx] = pred_value
return byte_probabilities
def check_byte_recovery(shard, byte_idx=0, attack_point='key', debug=1):
"check if recovery code work "
if debug:
cprint('\nRecovery using target point %s:' % (attack_point), 'magenta')
if debug:
cprint("|-Key info:", 'magenta')
key = shard.key[0]
if debug:
hex_display(key, prefix="key:\t\t", color='white')
# plaintext
pt = shard.pt[0]
if debug:
hex_display(pt, prefix="pt:\t\t", color='white')
# ciphertext
ct = shard.ct[0]
if debug:
hex_display(ct, prefix="ct:\t\t", color='yellow')
# AES forward
aes = AES()
computed_ct = aes.encrypt(pt, key, 16)
if debug:
hex_display(computed_ct, prefix="computed_ct:\t", color='yellow')
if attack_point == 'sub_bytes_in':
true_intermediate = aes.intermediates['sbox_in'][0][byte_idx]
elif attack_point == 'sub_bytes_out':
true_intermediate = aes.intermediates['sbox_out'][0][byte_idx]
else:
true_intermediate = key[byte_idx]
true_label = shard.y[0]
guess_byte = pred2byte(attack_point, pt[byte_idx], true_label)
if debug:
print('|-Plain text:%s' % hex(pt[byte_idx]))
print("|-Prediction: %s - expected: %s" %
(hex(true_label), hex(true_intermediate)))
print("|-Recovered Byte: %s" % (hex(guess_byte)))
print("|-True Byte:%s" % (hex(key[byte_idx])))
return int(guess_byte), int(key[byte_idx])
def compute_rank(byte_probabilities, real_byte):
"""Given predicted probabilities for a single example, compute the rank of
the correct byte, within the predictions."""
sorted_results = np.argsort(-byte_probabilities)
for i in range(256):
if sorted_results[i] == real_byte:
return i
raise ValueError("Invalid byte value: ", real_byte)
def compute_metrics_for_predictions(predicted,
y_arr,
pt_arr,
attack_point="key",
byte_idx=0,
probability_accumulation='log10'):
# Accumulated byte probabilities across traces.
byte_probabilities = np.zeros(256)
confusion_matrix = np.zeros([256, 256])
ranks = []
# Accumulate predictions to infer the most likely byte.
for idx, prediction in enumerate(predicted):
pt = pt_arr[idx][byte_idx]
real_byte = pred2byte(attack_point, pt, y_arr[idx])
trace_predictions = prediction_to_byte_array(attack_point, pt,
prediction)
for proba_idx, probability in enumerate(trace_predictions):
if probability_accumulation == 'log10':
byte_probabilities[proba_idx] += np.log10(probability + 1e-22)
else:
byte_probabilities[proba_idx] += probability
guess_byte = np.argmax(byte_probabilities)
confusion_matrix[real_byte][guess_byte] += 1
ranks.append(compute_rank(byte_probabilities, real_byte))
return ranks, confusion_matrix
def process_batch(predict_fn,
data,
start_idx,
end_idx,
attack_point="key",
byte_idx=0,
probability_accumulation='log10'):
x = data.x[start_idx:end_idx]
y = data.y[start_idx:end_idx]
pt = data.pt[start_idx:end_idx]
preds = predict_fn(x)
return compute_metrics_for_predictions(
preds,
y,
pt,
attack_point=attack_point,
byte_idx=byte_idx,
probability_accumulation=probability_accumulation)
def create_rank_table(ranks):
top20 = 0
top10 = 0
top5 = 0
top1 = 0
for rank in ranks:
if rank < 20:
top20 += 1
if rank < 10:
top10 += 1
if rank < 5:
top5 += 1
if rank == 0:
top1 += 1
results = [['num keys attacked', len(ranks)],
['mean_rank', float(np.mean(ranks))],
['max_rank', float(np.max(ranks))],
['median_rank', float(np.median(ranks))],
['min_rank', float(np.min(ranks))], ["top1", int(top1)],
["top5", int(top5)], ["top10", int(top10)],
["top20", int(top20)]]
header = ["Metric", "Value"]
return results, header
def side_channel_attack(model,
data,
attack_point="key",
byte_idx=0,
predict_fn=None,
probability_accumulation='log10'):
ranks = []
rank_histories = []
confusions = []
summed_confusion = np.zeros([256, 256])
if not predict_fn:
predict_fn = lambda x: model.predict(x)
batch_start = 0
last_key = data.key[0]
key = data.key[0]
# Data - currently in the order it was pulled from shards.
idx = 0
for idx in range(1, len(data.x)):
key = data.key[idx]
if not np.array_equal(data.key[idx], last_key):
rank_history, confusion = process_batch(
predict_fn,
data,
batch_start,
idx,
attack_point=attack_point,
byte_idx=byte_idx,
probability_accumulation=probability_accumulation)
ranks.append(rank_history[-1])
rank_histories.append(rank_history)
confusions.append(confusion)
summed_confusion = summed_confusion + confusion
batch_start = idx
last_key = key
if idx != batch_start:
rank_history, confusion = process_batch(
predict_fn,
data,
batch_start,
idx + 1,
attack_point=attack_point,
byte_idx=byte_idx,
probability_accumulation=probability_accumulation)
ranks.append(rank_history[-1])
rank_histories.append(rank_history)
confusions.append(confusion)
summed_confusion = summed_confusion + confusion
batch_start = idx
last_key = key
return ranks, rank_histories, confusions, summed_confusion
|
/aes/attack.py
| 0.849628 | 0.403773 |
attack.py
|
pypi
|
import abc
import dataclasses
import logging
import typing
import scopeton.scope
from dataclasses_json import dataclass_json
import yaml
# @dataclasses.dataclass
# @dataclass_json
from scopeton.decorators import Inject
class ArgsConfig:
lib_path: str
upgrade: bool
v: bool
pm_file: str
def __str__(self):
return "{}:[{}]".format(self.__class__.__name__, self.__dict__)
class DEP_TYPES:
git = "git"
@dataclass_json()
@dataclasses.dataclass
class Dependency(object):
url: str
path: typing.Optional[str] = None
revision: typing.Optional[str] = None
dependency_type: str = DEP_TYPES.git
@dataclass_json()
@dataclasses.dataclass
class FileConfig(object):
dependencies: typing.List[Dependency]
dep_path: typing.Union[str, None] = None
class ConfigParser:
@abc.abstractmethod
def parse(self, file: str):
pass
@abc.abstractmethod
def is_type(self, file: str) -> bool:
pass
class YamlConfigParser(ConfigParser):
def is_type(self, file: str) -> bool:
return file.split(".")[-1] == 'yaml'
@Inject()
def __init__(self, args_config: ArgsConfig):
self.args_config = args_config
def parse(self, file: str):
with open(file, "r") as stream:
data = yaml.safe_load(stream)
logging.info("Parsing data: {}".format(data))
return self.map_data(data)
def map_data(self, data):
conf = FileConfig.from_dict(data) # type: FileConfig
for dep in conf.dependencies:
if dep.path is None:
dep.path = dep.url.split("/")[-1].split(".")[0]
return conf
class PmConfigParser(ConfigParser):
def is_type(self, file: str) -> bool:
return file.split(".")[-1] == 'pm'
@Inject()
def __init__(self, args_config: ArgsConfig):
self.args_config = args_config
def parse(self, file: str):
with open(file, "r") as stream:
data = stream.readlines()
logging.info("Parsing data: {}".format(data))
return self.map_data(data)
def map_data(self, data):
deps = [self._parse_dep(k) for k in data]
deps = filter(lambda x: x is not None, deps)
return FileConfig(list(deps))
def _parse_dep(self, k):
k = k.strip()
if k.startswith("#"):
return None
path = k.split("/")[-1].split(".")[0]
if "#" in k:
branch = k.split("#")[1]
else:
branch = None
path = path.split("#")[0]
return Dependency(
k,
path=path,
revision=branch
)
class MainConfigParser:
@Inject()
def __init__(self, scope: scopeton.scope.Scope):
self.scope = scope
self.parsers = scope.getInstances(ConfigParser)
logging.info("Config parsers: {}".format(self.parsers))
def _get_parser(self, file: str) -> ConfigParser:
for k in self.parsers:
if k.is_type(file):
return k
raise Exception("Parser not found for file: {}".format(file))
def parse(self, file: str) -> FileConfig:
return self._get_parser(file).parse(file)
|
/scad_pm-0.23.tar.gz/scad_pm-0.23/scad_pm_mod/config.py
| 0.673729 | 0.175361 |
config.py
|
pypi
|
import sys
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
sys.path.append('')
from scada_data_analysis.utils.binning_function import binning_func
from scada_data_analysis.modules.power_curve_preprocessing import PowerCurveFiltering
class ExpectedPower:
def __init__(self, turbine_label, windspeed_label, power_label, method=None, kind=None,
cut_in_speed=3, bin_interval=0.5, z_coeff=2, filter_cycle=5) -> None:
"""
turbine_label: Column name of unique turbine identifiers or turbine names
windspeed_label: Column name of wind speed
power_label: Column name of active power
method: Specifies method for estimating expected power from processed training data.
The string has to be 'binning' or 'autoML'
kind: Only applies to the binning method and specifies the kind of interpolation
to apply on binned data points. Available methods are: 'linear', 'quadratic' and 'cubic'.
Quadratic and cubic methods use spline interpolation of second and third order respectively.
cut_in_speed: Cut in speed of turbine
bin_interval: Wind speed bin interval
z_coeff: Threshold of standard deviation used in filter
within which operational data is considered normal
filter_cycle: Number of times to pass scada data through filter
"""
self.turbine_label = turbine_label
self.windspeed_label = windspeed_label
self.power_label = power_label
self.method = method
self.kind = kind
self.cut_in_speed = cut_in_speed
self.bin_interval = bin_interval
self.z_coeff = z_coeff
self.filter_cycle = filter_cycle
def fit(self, training_data):
"""
Method to create models based on training data
training_data: Pandas dataframe of scada data for extracting
production benchmark (typical operating condition)
"""
# initialize power curve processing class
pc_filter = PowerCurveFiltering(self.turbine_label, self.windspeed_label, self.power_label,
training_data, self.cut_in_speed, self.bin_interval, self.z_coeff, self.filter_cycle)
# get data points during normal operating conditions (filtered data)
self.normal_df, _ = pc_filter.process()
# get unique turbine names in training data
self.turbine_names = self.normal_df[self.turbine_label].unique()
# instantiate a dictionary to store prediction functions and max power for each turbine
self.pred_funcs_dict = dict()
self.max_power_dict = dict()
for turbine_name in self.turbine_names:
# extract filterd data for a single turbine
normal_temp_df = self.normal_df[self.normal_df.Wind_turbine_name == turbine_name].copy()
if self.method == 'binning':
# bin filtered data before interpolation
binned_df = binning_func(normal_temp_df, self.windspeed_label, self.power_label, self.bin_interval)
# create turbine-level interpolation function for estimating expected power
f = interp1d(binned_df.windspeed_bin_median, binned_df.pwr_bin_mean, kind=self.kind, fill_value="extrapolate")
elif self.method == 'autoML':
print('AutoML method is yet to be released. Hence, reverting to binning method')
# bin filtered data before interpolation
binned_df = binning_func(normal_temp_df, self.windspeed_label, self.power_label, self.bin_interval)
# create turbine-level interpolation function for estimating expected power
f = interp1d(binned_df.windspeed_bin_median, binned_df.pwr_bin_mean, kind=self.kind, fill_value="extrapolate")
self.pred_funcs_dict[turbine_name] = f
self.max_power_dict[turbine_name] = binned_df.pwr_bin_mean.round().max()
return self
def predict(self, test_data):
"""
Returns the same data as input with an additional expected power column
"""
self.pred_df = test_data.copy()
for turbine_name in self.turbine_names:
test_temp_df = self.pred_df[self.pred_df[self.turbine_label] == turbine_name]
test_temp_index = test_temp_df.index
self.pred_df.loc[test_temp_index, 'expected_power'] = self.pred_funcs_dict[turbine_name](test_temp_df[self.windspeed_label])
# post process expected power estimations to not exceed maximum value in training data
self.pred_df.loc[test_temp_index, 'expected_power'] = self.pred_df.loc[test_temp_index,
'expected_power'].clip(upper=self.max_power_dict[turbine_name])
self.pred_df['expected_power'].clip(0, inplace=True)
return self.pred_df
if __name__ == "__main__":
from sklearn.metrics import mean_absolute_error
train_df = pd.read_csv(r'examples\datasets\training_data.zip')
test_df = pd.read_csv(r'examples\datasets\test_data.zip')
power_model = ExpectedPower(turbine_label='Wind_turbine_name', windspeed_label='Ws_avg',
power_label='P_avg', method='binning', kind='cubic')
power_model.fit(train_df)
predictions = power_model.predict(test_df)
print('MAE:', mean_absolute_error(predictions['P_avg'], predictions['expected_power']))
print('Prediction data', predictions.head())
|
/scada_data_analysis-1.0.7.tar.gz/scada_data_analysis-1.0.7/scada_data_analysis/modules/expected_power.py
| 0.614857 | 0.571677 |
expected_power.py
|
pypi
|
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.append('')
from scada_data_analysis.utils.binning_function import binning_func
class PowerCurveFiltering:
"""
This class returns two subsets of the original SCADA data representing normal and abnormal operations
"""
def __init__(self, turbine_label, windspeed_label, power_label, data=None, cut_in_speed=3,
bin_interval=0.5, z_coeff=2, filter_cycle=5, return_fig=False, image_path=None):
"""
turbine_label: column name of unique turbine identifier
windspeed_label: column name of wind speed
power_label: column name of active power
data: pandas dataframe of scada data
cut_in_speed: cut in speed of turbine
bin_interval: Wind speed bin interval
z_coeff: threshold of standard deviation used in filter within which operational data is considered normal
filter_cycle: number of times to pass scada data through filter
return_fig: if true, module returns power curve plot in addition to filtered datasets
image_path: used only if return_fig is True
"""
self.turbine_label = turbine_label
self.windspeed_label = windspeed_label
self.power_label = power_label
self.data = data
self.cut_in_speed = cut_in_speed
self.bin_interval = bin_interval
self.z_coeff = z_coeff
self.filter_cycle = filter_cycle
self.return_fig = return_fig
self.image_path = image_path
def remove_downtime_events(self):
"""
Filters out downtime events from SCADA data
"""
self.no_dt_df = self.data[~((self.data[self.power_label] <= 1) &\
(self.data[self.windspeed_label] >= self.cut_in_speed))]
def remove_fault_events_per_turbine(self):
"""
Filters out data points with unrealistically low power output at moderately high wind speeds.
This step also helps improve filtering procedure by reducing noise in data.
"""
max_power = self.no_dt_per_turbine_df[self.power_label].max()
self.no_dt_per_turbine_df = self.no_dt_per_turbine_df.drop(self.no_dt_per_turbine_df[((self.no_dt_per_turbine_df[self.power_label] < 0.9*max_power) &\
(self.no_dt_per_turbine_df[self.windspeed_label] > 4.5*self.cut_in_speed))].index)
def process(self):
"""
Runs the different methods and functions that processes the scada data
"""
self.remove_downtime_events()
turbine_names = self.data[self.turbine_label].unique()
filtered_ind_list = []
for turbine_name in turbine_names:
self.no_dt_per_turbine_df = self.no_dt_df[self.no_dt_df[self.turbine_label] == turbine_name]
# Remove faulty events from remaining non-downtime data
self.remove_fault_events_per_turbine()
filtered_ind_list.append(self.secondary_filter())
normal_ind_list = sum(filtered_ind_list, [])
abnormal_ind_list = list(set(self.data.index.tolist()).difference(set(normal_ind_list)))
assert len(self.data.index.tolist()) == len(normal_ind_list) + len(abnormal_ind_list)
normal_df = self.data.loc[normal_ind_list]
abnormal_df = self.data.loc[abnormal_ind_list]
if self.return_fig:
self.normal_df = normal_df.copy()
self.abnormal_df = abnormal_df.copy()
self.normal_df.loc[:, 'Abnormal'] = 'No'
self.abnormal_df.loc[:, 'Abnormal'] = 'Yes'
self.processed_data = pd.concat([self.normal_df, self.abnormal_df])
if not os.path.exists(self.image_path):
os.mkdir(self.image_path)
for turbine_name in turbine_names:
turbine_data = self.processed_data[self.processed_data[self.turbine_label] == turbine_name]
plt.figure(figsize=(18,6))
plt.scatter(x=self.windspeed_label, y=self.power_label, s=6, data=turbine_data, c=turbine_data['Abnormal'].map({'No':'blue', 'Yes':'orange'}))
plt.title(f"Operational power curve for turbine {turbine_name}", fontsize=16)
plt.xlabel("Wind Speed", fontsize=14)
plt.ylabel("Power", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
fname = fr"{self.image_path}\{turbine_name}_pc.png"
plt.savefig(fname)
return normal_df, abnormal_df
def secondary_filter(self):
"""
Filters the turbine data using provided threshold (z_coeff)
Returns: Median wind speed, average and standard deviation of produced power for each bin
"""
no_dt_per_turbine_df = self.no_dt_per_turbine_df.reset_index().copy()
if 'windspeed_bin' not in no_dt_per_turbine_df.columns:
max_windspeed = int(max(no_dt_per_turbine_df[self.windspeed_label]))
windspeed_bins = pd.IntervalIndex.from_tuples([(round(self.bin_interval*a, 2),
round((self.bin_interval*a)+self.bin_interval, 2))\
for a in range(0, 2*max_windspeed+1)])
no_dt_per_turbine_df.loc[:, 'windspeed_bin'] = pd.cut(no_dt_per_turbine_df[self.windspeed_label], bins=windspeed_bins)
if self.filter_cycle == 0:
print("Number of iterative steps cannot be less than 1, filter_cycle set to 1!")
self.filter_cycle = 1
for _ in range(int(self.filter_cycle)):
binned_turb_df = binning_func(no_dt_per_turbine_df, self.windspeed_label, self.power_label, self.bin_interval)
if set(no_dt_per_turbine_df.columns).issuperset(set(['windspeed_bin_median', 'pwr_bin_mean', 'pwr_bin_std'])):
no_dt_per_turbine_df.drop(['windspeed_bin_median', 'pwr_bin_mean', 'pwr_bin_std'], axis=1, inplace=True)
no_dt_per_turbine_df = pd.merge(no_dt_per_turbine_df, binned_turb_df, how='left', on='windspeed_bin')
no_dt_per_turbine_df.loc[:, 'pwr_low_thresh'] = no_dt_per_turbine_df['pwr_bin_mean'] - self.z_coeff*no_dt_per_turbine_df['pwr_bin_std']
no_dt_per_turbine_df.loc[:, 'pwr_low_thresh'] = no_dt_per_turbine_df['pwr_low_thresh'].apply(lambda x: 0 if x < 0 else x)
no_dt_per_turbine_df.loc[:, 'pwr_high_thresh'] = no_dt_per_turbine_df['pwr_bin_mean'] + self.z_coeff*no_dt_per_turbine_df['pwr_bin_std']
no_dt_per_turbine_df.loc[:, 'pwr_high_thresh'] = no_dt_per_turbine_df['pwr_high_thresh'].apply(lambda x: 0 if x < 0 else x)
no_dt_per_turbine_df = no_dt_per_turbine_df[(no_dt_per_turbine_df[self.power_label] > no_dt_per_turbine_df.pwr_low_thresh) &\
(no_dt_per_turbine_df[self.power_label] < no_dt_per_turbine_df.pwr_high_thresh) |\
(no_dt_per_turbine_df[self.windspeed_label] < self.cut_in_speed)]
return no_dt_per_turbine_df['index'].tolist()
if __name__ == "__main__":
df = pd.read_csv(r'examples\datasets\la-haute-borne-data-2017-2020.zip', sep=';')
pc_filter = PowerCurveFiltering(turbine_label='Wind_turbine_name', windspeed_label='Ws_avg',
power_label='P_avg', data=df, cut_in_speed=3, bin_interval=0.5,
z_coeff=2.5, filter_cycle=5, return_fig=True, image_path=r'examples\images')
normal_df, abnormal_df = pc_filter.process()
print('Normal Operations Data', normal_df.head())
print('Abnormal Operations Data', abnormal_df.head())
|
/scada_data_analysis-1.0.7.tar.gz/scada_data_analysis-1.0.7/scada_data_analysis/modules/power_curve_preprocessing.py
| 0.508544 | 0.48054 |
power_curve_preprocessing.py
|
pypi
|
# scadnano Python package

[](https://scadnano-python-package.readthedocs.io/en/latest/?badge=latest)
[scadnano](http://scadnano.org) ("scriptable-cadnano") is a program for designing synthetic DNA structures such as DNA origami.
The scadnano Python package
([source code repository here](https://github.com/UC-Davis-molecular-computing/scadnano-python-package))
is a library for programmatically creating and editing these nanostructures.
The scadnano project is developed and maintained by the UC Davis Molecular Computing group.
Note that [cadnano](https://cadnano.org) is a separate project, developed and maintained by the [Douglas lab](https://bionano.ucsf.edu/) at UCSF.
If you find scadnano useful in a scientific project, please cite its associated paper:
> scadnano: A browser-based, scriptable tool for designing DNA nanostructures.
David Doty, Benjamin L Lee, and Tristan Stérin.
DNA 2020: *Proceedings of the 26th International Conference on DNA Computing and Molecular Programming*
[ [paper](https://doi.org/10.4230/LIPIcs.DNA.2020.9) | [BibTeX](https://web.cs.ucdavis.edu/~doty/papers/scadnano.bib) ]
*Note:* If you are reading this on the PyPI website, some of the links below won't work. Many are relative links intended to be read on the [GitHub README page](https://github.com/UC-Davis-molecular-computing/scadnano-python-package#readme).
## Table of contents
* [Overview](#overview)
* [Reporting issues](#reporting-issues)
* [Installation](#installation)
* [Example](#example)
* [Abbreviated syntax with chained methods](#abbreviated-syntax-with-chained-methods)
* [StrandBuilder object for iteratively building up strands with many domains](#strandbuilder-object-for-iteratively-building-up-strands-with-many-domains)
* [Tutorial](#tutorial)
* [API documentation](#api-documentation)
* [Other examples](#other-examples)
* [Contributing](#contributing)
## Overview
This package is used to write Python scripts outputting `.sc` files readable by [scadnano](https://scadnano.org), a web application useful for displaying and manually editing these structures. The purpose of this module is to help automate some of the task of creating DNA designs, as well as making large-scale changes to them that are easier to describe programmatically than to do by hand in the scadnano web interface.
We will try to announce breaking changes (and possibly new features) under the [GitHub releases page](https://github.com/UC-Davis-molecular-computing/scadnano-python-package/releases). The version numbers in this Python library repo and the [web interface repo](https://github.com/UC-Davis-molecular-computing/scadnano/releases) won't always advance at the same time, and sometimes a feature is supported in one before the other.
Following [semantic versioning](https://semver.org/), version numbers are major.minor.patch, i.e., version 0.9.2 has minor version number 9. Prior to version 1.0.0, when a breaking change is made, this will increment the minor version (for example, going from 0.9.4 to 0.10.0). After version 1.0.0, breaking changes will increment the major version.
## Reporting issues
Please report issues in the web interface at the [scadnano web interface GitHub repository](https://github.com/UC-Davis-molecular-computing/scadnano/issues), and report issues in the Python scripting library at the [scadnano Python package GitHub repository](https://github.com/UC-Davis-molecular-computing/scadnano-python-package/issues).
## Installation
Short version: type this at the command line:
```console
pip install scadnano
```
Read below for troubleshooting suggestions if that didn't work.
### Getting Python
The scadnano Python package requires Python version 3.7 or later (with a workaround available for version 3.6, but not for any lower version).
To check your current version of Python, open a command line and type
```
python --version
```
If it is version 2.7 or below, type
```
python3 --version
```
If that fails, or reports Python version 3.5 or below, you will have to install a later version of Python (recommended at least 3.7). Follow [this link](https://www.python.org/downloads/) to install Python. You may also use an alternative Python distribution, such as [Anaconda](https://www.anaconda.com/products/individual#Downloads).
If you are using Python 3.6 and do not wish to upgrade, you can install a package providing the required features: the [dataclasses backport](https://pypi.org/project/dataclasses/); see `pip` instructions below to see how to install it.
Python 3.7 provides the
[dataclasses module](https://docs.python.org/3/library/dataclasses.html) automatically.
### Installing the scadnano Python package
Once Python is installed (and the dataclasses backport if you have Python version 3.6), there are two ways you can install the scadnano Python package:
1. pip (recommended)
Use [pip](https://pypi.org/project/pip/) to install the package by executing the following at the command line:
```console
pip install scadnano
```
If it worked, you should be able to open a Python interpreter and import the scadnano module:
```console
Python 3.7.9 (default, Aug 31 2020, 17:10:11) [MSC v.1916 64 bit (AMD64)] :: Anaconda, Inc. on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import scadnano as sc
>>> print(sc.Domain(helix=1, forward=True, start=0, end=8))
Domain(, helix=1, forward=True, start=0, end=8)
>>>
```
### Troubleshooting
If the above does not work for you, here are some things to try.
If your Python installation does not already have pip installed, you may have to install it.
Executing [this Python script](https://bootstrap.pypa.io/get-pip.py) should work;
see also
https://docs.python.org/3/installing/index.html
or
https://www.liquidweb.com/kb/install-pip-windows/.
Once pip is installed, or if you believe it is already installed, check your version of `pip` by typing
```
pip --version
```
It should say something like
```
pip 19.3.1 from ...lib\site-packages\pip (python 3.8)
```
If the version of Python at the end is Python 3.7 or higher, you are good. If it is version 2.7 or lower, type
```
pip3 --version
```
If that works and shows Python 3.7 or higher, you are good, but you should type `pip3` in the subsequent instructions instead of `pip`.
If it shows Python 3.6, install the [dataclasses backport module](https://pypi.org/project/dataclasses/) via
```
pip install dataclasses
```
If it shows Python 3.5 or lower, then you will need to upgrade your Python version (recommended Python 3.7 or higher).
2. download
As a simple alternative (in case you run into trouble using pip), you can simply download the scadnano.py file. However, you need to first install two packages that are required by scadnano: Install [openpyxl](https://pypi.org/project/openpyxl/) and [tabulate](https://pypi.org/project/tabulate/) by typing the following at the command line: `pip install openpyxl tabulate`.
Download and place the following files in your [PYTHONPATH](https://docs.python.org/3/using/cmdline.html#envvar-PYTHONPATH) (e.g., in the same directory as the scripts you are running). **Note:** If you are reading this on the PyPI website or anywhere other than GitHub, the links below won't work. They are relative links intended to be read on the [GitHub README page](https://github.com/UC-Davis-molecular-computing/scadnano-python-package#readme).
- *required*: [scadnano.py](scadnano/scadnano.py)
- *optional*: [modifications.py](scadnano/modifications.py); This contains some common DNA modifications such as biotin and Cy3.
- *optional*: [origami_rectangle.py](scadnano/origami_rectangle.py); This can help create origami rectangles, but it is not necessary to use scadnano.
To download them, right-click on "Raw" near the top and select (in Chrome or Firefox) "Save link as...":

The scadnano package uses the Python package [xlwt](https://pypi.org/project/xlwt/) to write Excel files, so xlwt must be installed in order to call the method [`Design.write_idt_plate_excel_file()`](https://scadnano-python-package.readthedocs.io/#scadnano.Design.write_idt_plate_excel_file) to export an Excel file with DNA sequences. To install xlwt, type `pip install xlwt` at the command line. (If you instead use pip to install the scadnano package, xlwt will be automatically installed.)
## Example
Consider the following design:

The following Python script produces this design.
```python
import scadnano as sc
import modifications as mod
def create_design() -> sc.Design:
# helices
helices = [sc.Helix(max_offset=48), sc.Helix(max_offset=48)]
# left staple
stap_left_domain1 = sc.Domain(helix=1, forward=True, start=8, end=24)
stap_left_domain0 = sc.Domain(helix=0, forward=False, start=8, end=24)
stap_left = sc.Strand(domains=[stap_left_domain1, stap_left_domain0])
# right staple
stap_right_domain0 = sc.Domain(helix=0, forward=False, start=24, end=40)
stap_right_domain1 = sc.Domain(helix=1, forward=True, start=24, end=40)
stap_right = sc.Strand(domains=[stap_right_domain0, stap_right_domain1])
stap_right.set_modification_5p(mod.biotin_5p)
# scaffold
scaf_domain1_left = sc.Domain(helix=1, forward=False, start=8, end=24)
scaf_domain0 = sc.Domain(helix=0, forward=True, start=8, end=40)
loopout = sc.Loopout(length=3)
scaf_domain1_right = sc.Domain(helix=1, forward=False, start=24, end=40)
scaf = sc.Strand(domains=[scaf_domain1_left, scaf_domain0, loopout, scaf_domain1_right], is_scaffold=True)
# whole design
design = sc.Design(helices=helices, strands=[scaf, stap_left, stap_right], grid=sc.square)
# deletions and insertions added to design are added to both strands on a helix
design.add_deletion(helix=1, offset=20)
design.add_insertion(helix=0, offset=14, length=1)
design.add_insertion(helix=0, offset=26, length=2)
# also assigns complement to strands other than scaf bound to it
design.assign_dna(scaf, 'AACGT' * 18)
return design
if __name__ == '__main__':
design = create_design()
design.write_scadnano_file(directory='output_designs')
```
Running the code above produces a `.sc` file that, if loaded into scadnano, should appear as in the screenshot above. The [web interface README](https://github.com/UC-Davis-molecular-computing/scadnano/blob/master/README.md#terminology) explains many of the terms used in the code (domain, helix, loopout, insertion, etc.).
## Abbreviated syntax with chained methods
Instead of explicitly creating variables and objects representing each domain in each strand, there is a shorter syntax using chained method calls. Instead of the above, create only the helices first, then create the Design. Then strands can be added using a shorter syntax, to describe how to draw the strand starting at the 5' end and moving to the 3' end. The following is a modified version of the above `create_design` function using these chained methods:
```python
def create_design() -> sc.Design:
# helices
helices = [sc.Helix(max_offset=48), sc.Helix(max_offset=48)]
# whole design
design = sc.Design(helices=helices, grid=sc.square)
# for absolute offsets, call method "to"
# left staple
design.draw_strand(1, 8).to(24).cross(0).to(8)
# for relative offsets, call method "move"
# right staple
design.draw_strand(0, 40).move(-16).cross(1).move(16).with_modification_5p(mod.biotin_5p)
# scaffold
design.draw_strand(1, 24).move(-16).cross(0).move(32).loopout(1, 3).move(-16).as_scaffold()
# deletions and insertions added to design are added to both strands on a helix
design.add_deletion(helix=1, offset=20)
design.add_insertion(helix=0, offset=14, length=1)
design.add_insertion(helix=0, offset=26, length=2)
# also assigns complement to strands other than scaf bound to it
design.assign_dna(design.scaffold, 'AACGT' * 18)
return design
```
Documentation is available in the [API docs](https://scadnano-python-package.readthedocs.io/en/latest/#scadnano.Design.draw_strand).
## StrandBuilder object for iteratively building up strands with many domains
The method [`Design.draw_strand`](https://scadnano-python-package.readthedocs.io/en/latest/#scadnano.Design.draw_strand), as well as all those that follow it in a chained method call (e.g., `move`, `cross`, etc.) all return an instance of the class [`StrandBuilder`](https://scadnano-python-package.readthedocs.io/en/latest/#scadnano.StrandBuilder).
Above, that `StrandBuilder` instance is anonymous, i.e., never assigned to a variable.
Some long strands may be easier to specify with loops, for example an M13 scaffold strand for an origami.
If so, then to use the above methods, assign the `StrandBuilder` object to a variable, and call the relevant methods on that object to build up the strand in each iteration of the loop.
For example, the following modification of the above `create_design` function creates a linear scaffold strand that zig-zags back and forth across 32 helices:
```python
def create_design() -> sc.Design:
num_helices = 32
helices = [sc.Helix(max_offset=200) for _ in range(num_helices)]
design = sc.Design(helices=helices, grid=sc.square)
strand_builder = design.draw_strand(0, 0)
for helix in range(num_helices):
# move forward if on an even helix, otherwise move in reverse
move_distance = 200 if helix % 2 == 0 else -200
strand_builder.move(move_distance)
if helix < 31: # crossover to next helix, unless it's the last helix
strand_builder.cross(helix + 1)
strand_builder.as_scaffold()
return design
```
## API Documentation
Online documentation of the package API (which classes, methods, functions, and constants are provided by the package) is located here:
https://scadnano-python-package.readthedocs.io
## Tutorial
A [tutorial](https://github.com/UC-Davis-molecular-computing/scadnano-python-package/blob/master/tutorial/tutorial.md) shows how to create a "standard" 24-helix DNA origami rectangle using the scadnano Python package.
## Other examples
*Note:* If you are reading this on the PyPI website, the links below won't work. They are relative links intended to be read on the [GitHub README page](https://github.com/UC-Davis-molecular-computing/scadnano-python-package#readme).
Several example scripts are located in the
[examples/](examples) subfolder.
Their output is contained in the
[examples/output_designs/](examples/output_designs) subfolder.
## Contributing
If you wish to contribute to scadnano, please see the [CONTRIBUTING document](CONTRIBUTING.md) to contribute to the scadnano Python package. There is also a [CONTRIBUTING document](https://github.com/UC-Davis-molecular-computing/scadnano/blob/master/CONTRIBUTING.md) for the web interface.
|
/scadnano-0.18.1.tar.gz/scadnano-0.18.1/README.md
| 0.893356 | 0.904229 |
README.md
|
pypi
|
import warnings
warnings.simplefilter(action='ignore')
import argparse, os
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from scaespy import scAEspy
from _version import __version__
def getArgs():
parser = argparse.ArgumentParser(add_help=False)
# The required setting
parser.add_argument('--matrices', help='Gene expression matrices (each row must be a cell)', nargs='+', required=True)
# Optional settings
parser.add_argument('--delimiter', help='String used to separate values', nargs='?', default="\t", type=str)
parser.add_argument('--output', help='Output folder', nargs='?', default="output", type=str)
parser.add_argument('--latent', help='Dimensions of the latent space', nargs='?', default=16, type=int)
parser.add_argument('--hidden', help='Dimensions of the hidden layers (list)', nargs='+', default=[64], type=int)
parser.add_argument('--loss', help='Loss function', nargs='?', default="Poisson", type=str)
parser.add_argument('--activation', help='Activation fuction', nargs='?', default="sigmoid", type=str)
parser.add_argument('--epochs', help='Number of epochs', nargs='?', default=100, type=int)
parser.add_argument('--batch', help='Batch size', nargs='?', default=100, type=int)
parser.add_argument('--gaussian', help='Number of Gaussian distribution(s)', nargs='?', default=1, type=int)
parser.add_argument('--alpha', help='alpha setting used to balance between KL and MMD', nargs='?', default=0, type=int)
parser.add_argument('--lambda', help='lambda setting used to balance between KL and MMD', nargs='?', default=1, type=int)
parser.add_argument('--patience', help='Max patience', nargs='?', default=None, type=int)
parser.add_argument('--seed', help='Seed value used for reproducibility', nargs='?', default=None, type=int)
parser.add_argument('--synthetic', help='Number of synthetic cells to generate', nargs='?', default=None, type=int)
# Settings without a parameter value
parser.add_argument('--constrained', help='Enable the constrained version of the loss fuction', action='store_true')
parser.add_argument('--clipping', help='Clip the value when NB loss fuctions are used', action='store_true')
parser.add_argument('--prior', help='Enable the learnable prior distribution', action='store_true')
parser.add_argument('--split', help='Split the provided matrix into train and test sets', action='store_true')
parser.add_argument('--plot', help='Plot the term of the ELBO function', action='store_true')
parser.add_argument('--verbose', help='Enable the verbose modality of scAEspy', action='store_true')
parser.add_argument('--help', action='help')
args = vars(parser.parse_args())
return args
def prefixAE(args):
if args["alpha"] == 0 and args["lambda"] == 1:
if args["gaussian"] == 1:
value = "VAE"
elif args["gaussian"] > 1:
value = "GMVAE"
elif args["alpha"] == 1 and args["lambda"] == 1:
if args["gaussian"] == 1:
value = "MMDAE"
elif args["gaussian"] > 1:
value = "GMMMD"
elif args["alpha"] == 0 and args["lambda"] == 2:
if args["gaussian"] == 1:
value = "MMDVAE"
elif args["gaussian"] > 1:
value = "GMMMDVAE"
else:
value = "AE_alpha=%d_lambda=%d"%(args["alpha"], args["lambda"])
return value
def saveCells(args, cells_ids, genes_ids, prefix, synthetic_cells, reconstructed_cells, latent_cells):
if args["synthetic"] is not None:
synthetic_cells = pd.DataFrame(index=["Cell %d"%(d+1) for d in range(args["synthetic"])], columns=genes_ids, data=synthetic_cells)
synthetic_cells.to_csv(args["output"]+os.sep+"%s_synthetic_cells.tsv"%prefix, sep="\t")
latent_cells = pd.DataFrame(index=cells_ids, columns=["Latent_%d"%(d+1) for d in range(args["latent"])], data=latent_cells)
latent_cells.to_csv(args["output"]+os.sep+"%s_latent_representation.tsv"%prefix, sep="\t")
reconstructed_cells = pd.DataFrame(index=cells_ids, columns=genes_ids, data=reconstructed_cells)
reconstructed_cells.to_csv(args["output"]+os.sep+"%s_reconstructed_cells.tsv"%prefix, sep="\t")
def saveLosses(args, prefix, history):
train_losses = pd.DataFrame(columns=["Total loss",
"Reconstruction loss",
"KLy loss",
"KLz loss",
"MMD loss"],
data=np.transpose([history["train_loss"],
history["train_rec"],
history["train_kly"],
history["train_klz"],
history["train_mmd"]]))
test_losses = pd.DataFrame(columns=["Total loss",
"Reconstruction loss",
"KLy loss",
"KLz loss",
"MMD loss"],
data=np.transpose([history["test_loss"],
history["test_rec"],
history["test_kly"],
history["test_klz"],
history["test_mmd"]]))
train_losses.to_csv(args["output"]+os.sep+"%s_train_losses.tsv"%prefix, sep="\t")
test_losses.to_csv(args["output"]+os.sep+"%s_test_losses.tsv"%prefix, sep="\t")
def readMatrices(matrices, delimiter):
dataframes = []
for matrix in matrices:
try:
df = np.loadtxt(matrix, delimiter=delimiter)
df = pd.DataFrame(df)
if df.shape[1] < 2:
exit(-100)
dataframes.append(df)
continue
except:
pass
try:
df = pd.read_csv(matrix, sep=delimiter)
if df.shape[1] < 2:
exit(-100)
if is_string_dtype(df[df.columns[0]]):
df.set_index(df.columns[0], drop=True, inplace=True)
df.index.name = None
dataframes.append(df)
continue
except:
print("Error, unable to load the gene expression matrix %s using the provided delimiter"%matrix)
print("Consider using another delimiter for .%s files"%matrix.split(".")[-1])
exit(-100)
ignore_index = False
if len(dataframes) > 1:
toExit = False
for idx,dataframe in enumerate(dataframes):
try:
names = list(map(int, dataframe.columns.tolist()))
print(names)
print("The gene names of matrix %s are numeric"%matrices[idx])
toExit = True
except:
continue
if toExit:
print("Consider using standard gene names or strings")
exit(-200)
names = set(dataframes[0].index.tolist())
for dataframe in dataframes[1:]:
names = names.intersection(set(dataframe.index.tolist()))
if len(names) > 0:
print("* Warning! There are some cells with the same IDs")
print("* Cells have been concatenated with new IDs")
ignore_index = True
merged = pd.concat(dataframes, join="outer", axis=0, ignore_index=ignore_index)
merged = merged.fillna(0)
return merged.index.tolist(), merged.columns.tolist(), merged.values
def main():
print("#"*200)
print("* scAEspy (v.%s): a unifying tool based on autoencoders for the analysis of single-cell RNA sequencing data"%__version__)
print()
args = getArgs()
if args["verbose"]:
print("* Loading the gene expression matrices using the provided delimiter")
cells_ids, genes_ids, merged_matrix = readMatrices(args["matrices"], args["delimiter"])
prefix = prefixAE(args)
if args["verbose"]:
print("* Initialising scAEspy ...")
print("\t* alpha = %2d; lambda = %2d -> %s"%(args["alpha"], args["lambda"], prefix))
print("\t* Loss -> %s"%args["loss"])
print("\t* Constrained -> %s"%args["constrained"])
print("\t* Number of Gaussian(s) -> %s"%args["gaussian"])
print("\t* Learnable prior -> %s"%args["prior"])
print("\t* Number of epochs -> %s"%args["epochs"])
print("\t* Batch size -> %s"%args["batch"])
print("\t* Max patience (epochs) -> %s"%args["patience"])
print()
print()
scaespy = scAEspy(merged_matrix.shape[1],
hidden_layers = args["hidden"],
latent_layer = args["latent"],
activation = args["activation"],
rec_loss = args["loss"],
num_gaussians = args["gaussian"],
learnable_prior = args["prior"],
alpha = args["alpha"],
lambd = args["lambda"],
constrained = args["constrained"],
clipping = args["clipping"],
verbose = args["verbose"],
seed = args["seed"])
if args["verbose"]:
print()
print("#"*200)
print("* Building %s ..."%prefix)
scaespy.build()
if args["split"]:
if args["verbose"]:
print()
print("* Splitting in training and validation sets shuffling the data ...")
print()
print("* Analysing %d cells across %d genes"%merged_matrix.shape)
print()
print()
x_train, x_test = train_test_split(merged_matrix, test_size=0.1, random_state=42, shuffle=True)
scaespy.train(x_train, x_test, epochs=args["epochs"], batch_size=args["batch"], max_patience=args["patience"])
else:
if args["verbose"]:
print()
print("* Shuffling the data ...")
print()
print("* Analysing %d cells across %d genes"%merged_matrix.shape)
print()
print()
matrix_shuffled = shuffle(merged_matrix, random_state=42)
scaespy.train(matrix_shuffled, matrix_shuffled, epochs=args["epochs"], batch_size=args["batch"], max_patience=args["patience"])
if not os.path.isdir(args["output"]):
os.mkdir(args["output"])
history = scaespy.getHistory()
if args["verbose"]:
print("#"*200)
print("* Saving the values of the terms of the ELBO function ...")
print()
saveLosses(args, prefix, history)
if args["plot"]:
if args["verbose"]:
print("* Plotting the terms of the ELBO function ...")
scaespy.plotLosses(show=False, folder=args["output"], name=prefix)
synthetic_cells = None
if args["verbose"]:
print("* Generating the reconstructed cells ...")
print()
reconstructed_cells = scaespy.reconstructedRepresentation(merged_matrix)
if args["verbose"]:
print("* Generating the latent representation of the cells ...")
print()
latent_cells = scaespy.latentRepresentation(merged_matrix)
if args["synthetic"] is not None:
if args["verbose"]:
print("* Generating %d synthetic cells ..."%args["synthetic"])
synthetic_cells = scaespy.sampling(args["synthetic"])
if args["verbose"]:
print("* Saving the generated data ...")
saveCells(args, cells_ids, genes_ids, prefix, synthetic_cells, reconstructed_cells, latent_cells)
print("#"*200)
print()
if __name__ == '__main__':
main()
|
/scaespy-1.2.1-py3-none-any.whl/cli/scaespy.py
| 0.648466 | 0.243474 |
scaespy.py
|
pypi
|
import argparse
import datetime
import logging
import os
import re
import shlex
import subprocess
import sys
import textwrap
import time
logging.basicConfig(level=logging.INFO)
RE_YEARRANGE = re.compile(r"(\d{4})-(\d{4})", re.ASCII)
REPOROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
CURRENTYEAR = int(datetime.date.today().strftime("%Y"))
class Error(Exception):
"""Exceptions to be catched and nicely formatted to the user."""
def auto_fr_educnat(years):
"""Iterate over commands and destination files to run.
Yields tuples (command, file), where:
- command is the command to run, as a list to be passed to subprocess.run()
(working directory being the repository root).
- file is the name of the destination file
(absolute, or relative to the repository root):
standard output of the command is to be written to this file
(which is overwritten without asking if it already exists).
"""
for year in years:
for zone, ville in (("A", "Grenoble"), ("B", "Rennes"), ("C", "Paris")):
yield (
["./bin/autoscl", "fr.educnat", f"{year}-{year+1}", ville],
f"doc/examples/fr_{year}{year+1}_{zone}.scl",
)
COUNTRIES = {"fr.educnat": auto_fr_educnat}
def _type_country(text):
"""Check that country is supported."""
if text in COUNTRIES:
return text
raise argparse.ArgumentTypeError(
"{} is not a valid countries. Choose a string among: {}.".format(
text, ", ".join(COUNTRIES.keys())
)
)
def _type_yearrange(text):
"""Check that year is a range of years (e.g. "2015-2020")."""
match = RE_YEARRANGE.match(text)
if not match:
raise argparse.ArgumentTypeError(
f"{text} is not a valid year range: it must be of the form YYYY-YYYY (e.g. 2015-2020)."
)
start, end = int(match.groups()[0]), int(match.groups()[1])
return list(range(start, end))
def argumentparser():
"""Return an argument parser."""
# pylint: disable=line-too-long
parser = argparse.ArgumentParser(
description="Generate and overwrite .scl examples.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(
"""\
Example: 'autoautoscl fr.educnat 2017-2020' generates the .scl files for French official school years, from 2017 to 2020.
<french>
Pour générer les calendriers des trois zones, depuis la racine du projet, utiliser (ajouter éventuellement les années):
./bin/autoautoscl
./bin/generate_examples.sh
</french>
"""
),
)
parser.add_argument(
"-c",
"--country",
help="Country of calendar.",
default="fr.educnat",
type=_type_country,
)
parser.add_argument(
"years",
help="Calendar school years.",
nargs="?",
type=_type_yearrange,
default="{}-{}".format(CURRENTYEAR, CURRENTYEAR + 1),
)
return parser
def main():
"""Main function."""
args = argumentparser().parse_args()
for command, destfile in COUNTRIES[args.country](args.years):
time.sleep(2) # Be kind to the opendata servers.
completed = subprocess.run(
command,
stdin=subprocess.DEVNULL,
capture_output=True,
cwd=REPOROOT,
text=True,
)
if completed.returncode == 0:
with open(os.path.join(REPOROOT, destfile), "w") as stdout:
stdout.write(completed.stdout)
logging.info(
"""Command "%s" completed successfully.""", shlex.join(command)
)
else:
logging.error(
"""Command "%s" exited with errors:\n%s""",
shlex.join(command),
completed.stderr,
)
if __name__ == "__main__":
try:
if sys.version_info < (3, 8):
raise Error("This program requires python version 3.8 or above.")
main()
except Error as error:
logging.error(str(error))
sys.exit(1)
|
/scal-2.1.0.tar.gz/scal-2.1.0/bin/lib/autoautoscl/__main__.py
| 0.471953 | 0.252183 |
__main__.py
|
pypi
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import logging
import optparse
from foursquare.source_code_analysis.exception import SourceCodeAnalysisException
from foursquare.source_code_analysis.scala.scala_import_parser import PathValidator, ScalaImportParser
from foursquare.source_code_analysis.scala.scala_imports import ScalaImportClause, ScalaSymbolPath
from foursquare.source_code_analysis.scala.scala_source_file_rewriter import ScalaSourceFileRewriter
VERSION = '0.1'
log = logging.getLogger()
class ScalaImportRewriteRule(object):
"""How to rewrite an import."""
def __init__(self, from_string, to_string):
self.from_path = ScalaSymbolPath(from_string) # Rewrite imports of this symbol...
self.to_path = ScalaSymbolPath(to_string) # ... to this symbol.
class ScalaImportRewriter(ScalaSourceFileRewriter):
"""Rewrites imports in scala source files.
Handles all import forms, e.g.,:
import foo.bar.Baz
import foo.bar.{Baz, Qux}
import foo.bar.{Baz, Qux => Quux}
Notes:
- Only rewrites imports that are on a line on their own. This should be virtually all imports we encounter in
practice, but technically Scala allows inline imports, e.g., if (x > 0) { import java.util.Vector; }.
- Overwrites the original file. Use with caution.
- Does not handle imports with embedded or trailing comments.
- Does not regroup/reorder imports. scala_import_sorter does that.
- Does not removed unused imports. scala_unused_import_remover does that.
- We use semi-naive regexps, so this may not do what you expect in very extreme corner cases. For example,
it will rewrite some malformed, syntactically incorrect import statements, but not others.
You definitely want to eyeball all changes made by this script before committing them.
USAGE: python src/python/foursquare/source_code_analysis/scala/scala_import_rewriter.py --nobackup --rewrite_from=foo.bar.Baz --rewrite_to=foo.qux.Baz <files_or_directories>
(don't forget to put the code on your PYTHONPATH).
"""
def __init__(self, rewrite_rule, backup):
super(ScalaImportRewriter, self).__init__(backup)
self._rewrite_rule = rewrite_rule
def apply_to_rewrite_cursor(self, rewrite_cursor):
import_clause = ScalaImportParser.search(rewrite_cursor)
while import_clause is not None:
rewritten_clauses = self.apply_rewrite(import_clause)
if rewritten_clauses is None:
# Spit out the original text, with any wrapping, whitespace or other idiosyncracies it may have. This makes
# sure that We only modify files where needed, and not just because of such non-germane differences.
rewrite_cursor.emit(import_clause.src_text)
else:
new_text = '\n'.join([repr(x) for x in rewritten_clauses]) + '\n'
rewrite_cursor.emit(new_text)
import_clause = ScalaImportParser.search(rewrite_cursor)
def apply_rewrite(self, import_clause):
"""Returns a list of ScalaImportClause objects containing the rewritten imports from the input clause.
Note that a rewrite may turn one clause into more than one, e.g., applying foo.bar.Qux -> foo.qux.Qux:
import foo.bar.{Baz, Qux} -> import foo.bar.Baz
import foo.qux.Qux
"""
clauses = []
def _find_clause(path_string):
for clause in clauses:
if clause.path.path_string == path_string:
return clause
return None
def _find_or_create_clause(path_string):
ret = _find_clause(path_string)
if ret is None:
ret = ScalaImportClause(import_clause.indent, path_string)
clauses.append(ret)
return ret
rewritten = False
for scala_import in import_clause.imports:
maybe_rewritten_import = scala_import.get_maybe_rewritten_import(self._rewrite_rule)
if maybe_rewritten_import != scala_import:
rewritten = True
clause = _find_or_create_clause('.'.join(maybe_rewritten_import.path.get_all_but_name()))
clause.add_import(maybe_rewritten_import.path.get_name(), maybe_rewritten_import.as_name)
if not rewritten:
return None # So we don't change the wrapping etc. of imports that weren't otherwise rewritten.
else:
return clauses
def get_command_line_args():
opt_parser = optparse.OptionParser(usage='%prog [options] scala_source_file_or_dir(s)', version='%prog ' + VERSION)
opt_parser.add_option('--log_level', type='choice', dest='log_level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
default='INFO', help='Log level to display on the console.')
opt_parser.add_option('--rewrite_from', type='string', dest='rewrite_from', metavar='foo.bar.Baz',
help='import to rewrite')
opt_parser.add_option('--rewrite_to', type='string', dest='rewrite_to', metavar='foo.qux.Baz',
help='rewrite the import to this')
opt_parser.add_option('--nobackup', action='store_true', dest='nobackup', default=False,
help='If unspecified, we back up modified files with a .bak suffix before rewriting them.')
(options, args) = opt_parser.parse_args()
if not options.rewrite_from:
opt_parser.error('Must specify --rewrite_from')
if not options.rewrite_to:
opt_parser.error('Must specify --rewrite_to')
if not PathValidator.validate(options.rewrite_from):
opt_parser.error('--rewrite_from must be of the form foo.bar.Baz')
if not PathValidator.validate(options.rewrite_to):
opt_parser.error('--rewrite_to must be of the form foo.bar.Baz')
if len(args) == 0:
opt_parser.error('Must specify at least one scala source file or directory to rewrite')
return options, args
def main(options, scala_source_files):
numeric_log_level = getattr(logging, options.log_level, None)
if not isinstance(numeric_log_level, int):
raise SourceCodeAnalysisException('Invalid log level: {0}'.format(options.log_level))
logging.basicConfig(level=numeric_log_level)
import_rewriter = ScalaImportRewriter(ScalaImportRewriteRule(options.rewrite_from, options.rewrite_to),
not options.nobackup)
import_rewriter.apply_to_source_files(scala_source_files)
log.info('Done!')
if __name__ == '__main__':
(options, args) = get_command_line_args()
main(options, args)
|
/scala-source-tools-0.14.tar.gz/scala-source-tools-0.14/foursquare/source_code_analysis/scala/scala_import_rewriter.py
| 0.666605 | 0.184235 |
scala_import_rewriter.py
|
pypi
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
class ScalaSymbolPath(object):
""""A dotted path of identifiers."""
def __init__(self, path_string):
"""Object is immutable."""
self.path_string = path_string
self.path_parts = path_string.split('.')
def get_name(self):
"""Returns the last component of the path."""
return self.path_parts[-1]
def get_all_but_name(self):
"""Returns a list of all but the last component of the path."""
return self.path_parts[0:-1]
def get_top_level(self):
"""Returns the first component of the path."""
return self.path_parts[0]
def is_prefix_of(self, other):
"""If this symbol is a prefix of the other symbol, returns a list of the suffix parts. Returns None otherwise."""
if self.path_parts == other.path_parts[0:len(self.path_parts)]:
return other.path_parts[len(self.path_parts):]
else:
return None
def with_suffix(self, suffix_parts):
"""Returns an instance of ScalaSymbolPath representing this path with the suffix parts added."""
if len(suffix_parts) > 0:
return ScalaSymbolPath('.'.join(self.path_parts + suffix_parts))
else:
return self
def __repr__(self):
return self.path_string
def __eq__(self, other):
return self.path_string == other.path_string
class ScalaImport(object):
"""An import of a single symbol, possibly renamed."""
def __init__(self, path_string, as_name):
"""Object is immutable."""
self.path = ScalaSymbolPath(path_string)
self.as_name = as_name # An identifier, or None if the same as name.
def get_name(self):
"""Returns the name by which code will reference the imported symbol."""
if self.as_name is None:
return self.path.get_name()
else:
return self.as_name
def get_maybe_rewritten_import(self, rewrite_rule):
"""Returns a new ScalaImport instance, or this instance if no rewrite occurred."""
suffix = rewrite_rule.from_path.is_prefix_of(self.path)
if suffix is None:
return self
else:
return ScalaImport(repr(rewrite_rule.to_path.with_suffix(suffix)), self.as_name)
def get_selector_string(self):
"""Returns the part of the import after the last dot, minus the braces if any."""
if self.as_name is None:
return self.path.get_name()
else:
return '{0} => {1}'.format(self.path.get_name(), self.as_name)
def __repr__(self):
if self.as_name is None:
return self.path.path_string
else:
# Note: The outer {{ }} turn into literal { } and the inner {0} is replaced by the selector string.
return '.'.join(self.path.get_all_but_name() + ['{{{0}}}'.format(self.get_selector_string())])
def __eq__(self, other):
return self.path == other.path and self.as_name == other.as_name
class ScalaImportClause(object):
"""A single import clause, possibly importing multiple possibly renamed symbols."""
def __init__(self, indent, path, src_text=None, src_begin_idx=-1, src_end_idx=-1):
"""Object is mutable - see add_import()."""
self.indent = indent
self.path = ScalaSymbolPath(path) # The common path prefix of all imports in this clause.
self.src_text = src_text # The original text we parsed this import clause from, if any.
self.src_begin_idx = src_begin_idx
self.src_end_idx = src_end_idx
self.imports = [] # The imports declared by this clause.
def add_import(self, name, as_name):
imprt = ScalaImport(repr(self.path.with_suffix([name])), as_name)
if imprt not in self.imports:
self.imports.append(imprt)
def remove_import(self, name):
ret = (x for x in self.imports if x.get_name() == name).next()
self.imports = filter(lambda x: x.get_name() != name, self.imports)
return ret
def sort_imports(self):
self.imports.sort(cmp=lambda x,y: cmp(x.path.path_string, y.path.path_string))
MAX_LINE_LEN = 120
def _to_str(self, include_indent=True):
if len(self.imports) == 0:
ret = '<empty import clause>'
elif len(self.imports) == 1:
ret = '{0}import {1}'.format(self.indent if include_indent else '', repr(self.imports[0]))
else:
selector_strings = [x.get_selector_string() for x in self.imports]
delimited_selector_strings = [x + ', ' for x in selector_strings[0:-1]] + [selector_strings[-1] + '}']
ret = '{0}import {1}.{{'.format(self.indent if include_indent else '', self.path)
continuation_indent_size = 4 # To indent under the first selector, replace 4 with len(ret).
line_len = len(ret)
for s in delimited_selector_strings:
if line_len + len(s) > ScalaImportClause.MAX_LINE_LEN:
ret = ret.rstrip()
ret += '\n'
ret += ' ' * continuation_indent_size
line_len = continuation_indent_size
ret += s
line_len += len(s)
return ret
def str_no_indent(self):
return self._to_str(include_indent=False)
def __repr__(self):
return self._to_str()
def __eq__(self, other):
return self.path == other.path and self.imports == other.imports
|
/scala-source-tools-0.14.tar.gz/scala-source-tools-0.14/foursquare/source_code_analysis/scala/scala_imports.py
| 0.900233 | 0.263274 |
scala_imports.py
|
pypi
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import re
from foursquare.source_code_analysis.exception import SourceCodeAnalysisException
from foursquare.source_code_analysis.scala.scala_imports import ScalaImportClause
# A single identifier, e.g., foo, Bar, baz_2, _root_ .
_IDENTIFIER_PATTERN = '(?:\w*)'
# A dotted path of identifiers, e.g., foo.bar.Baz .
_PATH_PATTERN = '(?:{identifier}(\.{identifier})*)'.format(identifier=_IDENTIFIER_PATTERN)
_PATH_RE = re.compile('^{path}$'.format(path=_PATH_PATTERN))
# An identifier rewrite, e.g., Foo => Bar .
_SELECTOR_PATTERN = '{identifier}(?:\s*=>\s*{identifier})?'.format(identifier=_IDENTIFIER_PATTERN)
# A (possibly multiline) import clause.
_IMPORT_PATTERN = ('^(?P<indent> *)import\s*(?P<path>{path})\.'
'(?P<selectors>{identifier}|(?:\{{\s*{selector}(?:\s*,\s*{selector})*\s*\}}))[ \t]*\n').format(
path=_PATH_PATTERN,
identifier=_IDENTIFIER_PATTERN,
selector=_SELECTOR_PATTERN)
IMPORT_RE = re.compile(_IMPORT_PATTERN, re.MULTILINE)
class PathValidator(object):
@staticmethod
def validate(path):
return _PATH_RE.match(path) is not None
class ScalaImportParser(object):
@staticmethod
def find_all(src_text):
"""Returns a list of ScalaImportClauses representing all the imports in the text.
Doesn't interact with a rewrite cursor, so is not useful for rewriting.
"""
return [ ScalaImportParser._create_clause_from_matchobj(m) for m in IMPORT_RE.finditer(src_text) ]
@staticmethod
def search(rewrite_cursor):
"""Returns the next ScalaImportClause found, advancing the cursor as needed.
Skips over, and emits verbatim, anything that isn't an import clause.
Returns None if it finds no import clause.
"""
ret = ScalaImportParser._apply_regex(rewrite_cursor, True)
if ret is None:
rewrite_cursor.finish()
return ret
@staticmethod
def match(rewrite_cursor):
"""If the cursor is currently on an import clause, returns a ScalaImportClause and advances the cursor.
Returns None otherwise.
"""
return ScalaImportParser._apply_regex(rewrite_cursor, False)
@staticmethod
def _apply_regex(rewrite_cursor, search):
if search:
m = IMPORT_RE.search(rewrite_cursor.src_text, rewrite_cursor.src_pos)
else:
m = IMPORT_RE.match(rewrite_cursor.src_text, rewrite_cursor.src_pos)
if m is None:
return None
# Copy whatever we skipped over.
rewrite_cursor.copy_from_src_until(m.start())
# Move past the string we matched.
rewrite_cursor.set_src_pos(m.end())
return ScalaImportParser._create_clause_from_matchobj(m)
@staticmethod
def _create_clause_from_matchobj(m):
indent_string = m.group('indent')
path_string = m.group('path')
selectors_string = m.group('selectors').strip()
if len(selectors_string) == 0:
raise SourceCodeAnalysisException('Something wrong with import: {0}; trailing dot, possibly?'.format(m.group(0)))
if selectors_string[0] == '{':
if selectors_string[-1] != '}':
raise SourceCodeAnalysisException('Bad regex match: opening brace has no closing brace.')
selectors_string = selectors_string[1:-1]
ret = ScalaImportClause(indent_string, path_string, m.group(0), m.start(), m.end())
selectors = [x.strip() for x in selectors_string.split(',')]
for selector in selectors:
parts = [x.strip() for x in selector.split('=>')]
name = parts[0]
if len(parts) == 2:
as_name = parts[1]
else:
as_name = None
ret.add_import(name, as_name)
return ret
|
/scala-source-tools-0.14.tar.gz/scala-source-tools-0.14/foursquare/source_code_analysis/scala/scala_import_parser.py
| 0.724675 | 0.152821 |
scala_import_parser.py
|
pypi
|
from __future__ import annotations
import json
import logging
import os
import re
import secrets
import string
import shutil
from datetime import datetime
from typing import Any, Dict, List, MutableSequence, Optional, Tuple, Union, overload
import ipinfo
import requests
from requests.models import Response
from scala_wrapper.utils import typedef
def get_id(value: Optional[Dict[str, Any]]) -> Optional[int]:
"""
The get_id function returns the id of a given value. If the value is None, then it returns None.
:param value:Optional[Dict[str: Used to indicate that the value parameter is a dictionary and it can be None.
:param Any]]: Used to accept any type of value.
:return: None if the value is None.
:doc-author: Trelent
"""
if not value is None:
return value.get('id')
return None
def get_name(value: Optional[Dict[str, Any]]) -> Optional[str]:
"""
The get_name function returns the name of a given value.
:param value:Optional[Dict[str: Used to specify that the value parameter is an optional Dict[str, Any] type.
:param Any]]: Used to indicate that the value can be any type.
:return: None if the value parameter is None.
:doc-author: Trelent
"""
if not value is None:
return value.get('name')
return None
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.CategoryList) -> List[ContentManager.Category]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.DistributionServerList) -> List[ContentManager.DistributionServer]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.ExModuleList) -> List[ContentManager.ExModule]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.MediaList) -> List[ContentManager.Media]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.ResourceList) -> List[ContentManager.Resource]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.PlayerGroupList) -> List[ContentManager.PlayerGroup]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.PlayerList) -> List[ContentManager.Player]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.RoleList) -> List[ContentManager.Role]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.UserList) -> List[ContentManager.User]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.WorkgroupList) -> List[ContentManager.Workgroup]:
...
def get_list(
value: Optional[List[Union[Dict[str, Any], int]]],
data: Union[
ContentManager.CategoryList,
ContentManager.DistributionServerList,
ContentManager.ExModuleList,
ContentManager.MediaList,
ContentManager.PlayerGroupList,
ContentManager.PlayerList,
ContentManager.ResourceList,
ContentManager.RoleList,
ContentManager.UserList,
ContentManager.WorkgroupList,
]
):
"""
The get_list function is used to convert a list of either IDs or names into a list of objects.
The function takes two parameters: value and data. The value parameter is the list that needs to be converted,
and the data parameter is the type of object that each item in the value parameter should be converted into.
:param value:Optional[List[Union[Dict[str: Used to define the parameters in the get_list function.
:param Any]: Used to allow the return type to be a list of.
:param int]]]: Used to specify the type of object being returned.
:param data:Union[ContentManager.CategoryList: Used to determine the type of data being passed in.
:param ContentManager.DistributionServerList: Used to get the.
:param ContentManager.ExModuleList: Used to get the list of.
:param ContentManager.MediaList: Used to get a list of media objects.
:param ContentManager.PlayerGroupList: Used to get a list of player groups.
:param ContentManager.PlayerList: Used to get a list of players.
:param ContentManager.ResourceList: Used to retrieve a list of resources.
:param ContentManager.RoleList: Used to get a list of roles.
:param ContentManager.UserList: Used to get a list of users.
:param ContentManager.WorkgroupList: Used to get a list of workgroups.
:param ]: Used to indicate the end of a list.
:return: a list of objects.
:doc-author: Trelent
"""
temp: List[Any] = []
if not value is None:
for item in value:
if isinstance(item, int):
d = data.get(item)
if d is None:
continue
temp.append(d)
else:
item_id = get_id(item)
if item_id is None:
item_id = get_name(item)
if item_id is None:
continue
d = data.get(item_id)
if d is None:
continue
temp.append(d)
return temp
def search_children(search: Union[int, str], children: List[Any], int_attr: str, str_attr: str, child_attr: str) -> Optional[Any]:
"""
The search_children function searches through a list of elements for an element with the specified attribute. If it is found,
the function returns that element. Otherwise, it searches through all of the children of those elements and returns the first match
it finds.
:param search:Union[int: Used to indicate that the function can accept either an int or a string.
:param str]: Used to identify the type of data being passed to this function.
:param children:List[Any]: Used to pass the children of a node to search_children.
:param int_attr:str: Used to specify the attribute that is used to identify the element.
:param str_attr:str: Used to specify the attribute of the element that contains a string.
:param child_attr:str: Used to specify the attribute of the child element that contains.
:return: None if the search parameter is not found in the children list.
:doc-author: Trelent
"""
for elem in children:
if isinstance(search, int):
if getattr(elem, int_attr) == search:
return elem
else:
if getattr(elem, str_attr) == search:
return elem
temp = search_children(search, getattr(elem, child_attr), int_attr, str_attr, child_attr)
if not temp is None:
return temp
return None
@overload
def clean_data(data: Dict[Any, Any]) -> Optional[Dict[Any, Any]]:
...
@overload
def clean_data(data: List[Any]) -> Optional[List[Any]]:
...
def clean_data(data: Union[Dict[Any, Any], List[Any]]):
"""
The clean_data function removes any empty values from the data.
:param data:Union[Dict[Any: Used to specify the type of data that is expected to be returned.
:param Any]: Used to allow for a list of any type of data to be passed in.
:param List[Any]]: Used to make the function more flexible.
:return: None if the data is empty.
:doc-author: Trelent
"""
if isinstance(data, dict):
for key, value in data.copy().items():
if value is None:
del data[key]
if isinstance(value, list) or isinstance(value, dict):
c_data = clean_data(value)
if not c_data is None:
data[key] = c_data
else:
del data[key]
if len(data) > 0:
return data
else:
return None
else:
for i, elem in enumerate(data):
if elem is None:
data.remove(elem)
if isinstance(elem, list) or isinstance(elem, dict):
c_data = clean_data(elem)
if not c_data is None:
data[i] = c_data
else:
data.pop(i)
if len(data) > 0:
return data
else:
return None
class ContentManager:
"""
Description of ContentManager
Args:
username (str):
password (str):
cm_url (str):
client (Optional[str]=None,short:Optional[str]=None):
client_id (Optional[int]=None,ip_handler:Optional[str]=None):
"""
def __init__(self, username: str, password: str, cm_url: str, client: Optional[str] = None, short: Optional[str] = None, client_id: Optional[int] = None, ip_handler: Optional[str] = None) -> None:
"""
The __init__ function is called when a new object is created from the class.
It initializes all of the variables that are defined in the __init__ function,
and can be used to set default values if no values are provided when creating
the object. In this case, it sets self.client to None (if no client was provided),
self.short to None (if no short name was provided), and self.client_id to None (if
no client ID number was provided). It then calls login(), which authenticates with
the CM server.
:param self: Used to reference the object instance.
:param username:str: Used to set the username for the CM instance.
:param password:str: Used to store the password.
:param cm_url:str: Used to specify the CM URL.
:param client:Optional[str]=None: Used to pass in a client name.
:param short:Optional[str]=None: Used to set the short name of the client.
:param client_id:Optional[int]=None: Used to specify the client id of a specific client.
:param ip_handler:Optional[str]=None: Used to pass an IP handler to the getHandler function.
:return: None.
:doc-author: Trelent
"""
self.client = client
self.short = short
self.client_id = client_id
self.cm_url = cm_url
self.username = username
self.password = password
self.air_id = None
self.version = None
self.ip_handler = ipinfo.getHandler(ip_handler) if not ip_handler is None else None
self.last_load = datetime.now()
self.approvalstatuses = self.ApprovalStatusList(self, [])
self.categories = self.CategoryList(self, [])
self.channels = self.ChannelList(self, [])
self.distributionservers = self.DistributionServerList(self, [])
self.ex_modules = self.ExModuleList(self, [])
self.licenses = self.LicenseList(self, [])
self.media = self.MediaList(self, [])
self.networks = self.NetworkList(self, [])
self.playergroups = self.PlayerGroupList(self, [])
self.playerhealths = self.PlayerHealthList(self, [])
self.player_metadatas = self.PlayerMetadataList(self, [])
self.players = self.PlayerList(self, [])
self.playlists = self.PlaylistList(self, [])
self.resources = self.ResourceList(self, [])
self.roles = self.RoleList(self, [])
self.templates = self.TemplateList(self, [])
self.users = self.UserList(self, [])
self.workgroups = self.WorkgroupList(self, [])
self.login()
self.get_version()
""" BASIC FUNCTIONALITY """
def request(self, method: str, path: str, params: Optional[Dict[Any, Any]] = None, headers: Optional[Dict[Any, Any]] = None, data: str = '', debug_key: Optional[str] = None) -> Dict[Any, Any]:
"""
The request function is used to make a request to the CM API.
It takes in a method, path, params (optional), headers (optional) and data (optional).
The function will then return the response from the CM API as JSON.
:param self: Used to access the instance variables of the class.
:param method:str: Used to determine the HTTP method to use.
:param path:str: Used to determine the path of the request.
:param params:Optional[Dict[Any: Used to pass in the parameters for the request function.
:param Any]]=None: Used to allow the function to be used with or without a request parameter.
:param headers:Optional[Dict[Any: Used to pass the headers to the request function.
:param Any]]=None: Used to make the function signature compatible with both Python 2 and 3.
:param data:str='': Used to send data to the server.
:param debug_key:Optional[str]=None: Used to differentiate between different requests.
:return: a dict with the following keys:.
:doc-author: Trelent
"""
params = params if not params is None else {}
headers = headers if not headers is None else {}
headers.update(self.header)
logging.debug(f"{method} - {path}")
if method.lower() == "delete":
self.__delete(path, headers)
return {"success": True}
response_end = None
offset = 0
new = True
while True:
try:
while new:
if method.lower() == "get":
params['offset'] = offset
params['limit'] = params.get("limit") if not params.get("limit") is None else 1000
response: Response = requests.request(method, f'{self.cm_url}{path}', params=params, headers=headers, data=data)
if not response.ok:
logging.warning(f"Something went wrong when requesting {path} via {method}")
if response.status_code == 401:
logging.warning('login token expired requesting new one and trying again')
self.login()
headers.update(self.header)
logging.error(f"ERROR on {path} - code {response.status_code}")
logging.debug(response.text)
continue
response_json: Union[List[Any], Dict[str, Any]] = response.json()
if isinstance(response_json, list):
response_json = {'list': response_json, 'count': 0}
if response_json.get('count', 0) < offset + params.get('limit', float('inf')):
new = False
else:
offset += params.get('limit', 0)
if response_end is None:
response_end = response_json
else:
response_end['list'].extend(response_json['list'])
if response_end is None:
raise Exception('No response')
debug_path = "cm_responses.json"
debug_path_old = "cm_responses_old.json"
if os.path.isfile(debug_path):
with open(debug_path, "r") as f:
try:
data_ = json.load(f)
shutil.copyfile(debug_path, debug_path_old)
except ValueError:
data_ = {}
pass
else:
data_ = {}
if not debug_key is None:
key = debug_key
else:
key = f'{method} - {path}'
if not key in data_.keys():
data_[key] = {}
with open(debug_path, "w") as f:
if isinstance(response_end, list):
data_[key] = typedef.process_type(response_end, data_[key], False)
json.dump(data_, f)
else:
data_[key] = typedef.type_def_dict(response_end, data_[key], False)
json.dump(data_, f)
return response_end
except requests.exceptions.ConnectionError as e:
logging.error(e)
continue
except requests.exceptions.ReadTimeout as e:
logging.error(e)
continue
def __delete(self, path: str, headers: Optional[Dict[Any, Any]] = None):
"""
The __delete function is used to delete a object from the CM.
It takes two arguments: path and headers.
path is the relative path of the object you want to delete, without a leading slash (e.g., "file/my_file").
headers is an optional dictionary that can contain any additional HTTP headers you wish to pass (e.g., Content-Type).
The function will loop until it succeeds in deleting your file.
:param self: Used to reference the instance of the class.
:param path:str: Used to specify the path of the resource to be deleted.
:param headers:Optional[Dict[Any: Used to define the headers of the request.
:param Any]]=None: Used to allow the function to be used with.
:return: None.
:doc-author: Trelent
"""
headers = headers if not headers is None else {}
headers.update(self.header)
while True:
try:
requests.delete(f'{self.cm_url}{path}', headers=headers)
return
except requests.exceptions.ConnectionError as e:
logging.error(e)
continue
except requests.exceptions.ReadTimeout as e:
logging.error(e)
continue
def login(self):
"""
The login function is used to authenticate the user with Contentful.
It takes a username and password as parameters, and returns an api_token that can be used in subsequent requests.
:param self: Used to access the instance variables of the class.
:return: the api_token, user object and network object.
:doc-author: Trelent
"""
payload: Dict[str, Union[str, bool]] = {
'username': self.username,
'password': self.password,
'rememberMe': True
}
payload_str = json.dumps(payload)
headers: Dict[str, str] = {
'Content-type': 'application/json'
}
response = None
while True:
try:
response = requests.post(f'{self.cm_url}/auth/login', data=payload_str, headers=headers)
except requests.exceptions.ConnectionError as e:
logging.error(e)
continue
break
if response is None:
raise Exception('Login Failed')
else:
response_json: Dict[str, Any] = response.json()
self.api_token = response_json.get('apiToken')
if self.api_token is None:
raise Exception('No ApiToken found')
else:
self.header = {
'ApiToken': self.api_token,
'Content-Type': 'application/json'
}
self.user = self.users.append(ContentManager.User(self, response_json.get('user', {})))
self.network = self.networks.get(get_id(response_json.get('network')))
if self.network is None:
raise Exception('No Network id found')
self.token = response_json.get('token')
self.api_license_token = response_json.get('apiLicenseToken')
server_time = response_json.get('serverTime')
if not server_time is None:
self.time_dif_gmt = datetime.strptime(server_time.get('datetime', ''), '%Y-%m-%d %H:%M:%S') - datetime.strptime(server_time.get('gtmDatetime'), '%Y-%m-%d %H:%M:%S GMT')
""" SETTERS OBJECT """
def set_airtable_id(self, air_id: Optional[str]):
"""
The set_airtable_id function sets the airtable_id attribute of a given object.
The airtable_id is used to identify objects in the Airtable database.
:param self: Used to refer to the instance of the class.
:param air_id:Optional[str]: Used to set the airtable_id of a.
:return: the air_id that was set.
:doc-author: Trelent
"""
self.air_id = air_id
""" GETTERS ONLINE """
def get_version(self):
"""
The get_version function returns the version of the CM.
:param self: Used to access the instance variables of the class.
:return: the version of the CM that is currently in use.
:doc-author: Trelent
"""
response = self.request('get', '/misc/productinfo')
self.version: Optional[str] = response.get('version')
""" FUNCTIONS """
def group_players(self, solutions: Dict[str, str]):
"""
The group_players function takes a dictionary of player groups and regular expressions.
It iterates through the list of players in the game, and assigns them to one of the groups based on their name.
If no group is found, it will assign them to Other.
:param self: Used to access the class attributes.
:param solutions:Dict[str: Used to map the group names to the regular expressions.
:param str]: Used to specify the solutions that are used to group the players.
:return: a list of player groups.
:doc-author: Trelent
"""
for player in self.players:
for solution, reg in solutions.items():
if reg is None or player.name is None:
player_group = self.playergroups.get(solution)
if player_group is None:
raise Exception("Expected player group Other")
player.playergroups.append(player_group)
break
else:
match = re.search(reg, player.name)
if not match is None:
player_group = self.playergroups.get(solution)
if player_group is None:
raise Exception("Expected player group")
player.playergroups.append(player_group)
break
else:
continue
player.pollingInterval = 10
player.save()
""" APPROVALSTATUS """
class ApprovalStatus:
"""
Description of ApprovalStatus
Args:
cm (ContentManager):
json (Dict[str,Any]):
"""
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
"""
The __init__ function is called when the class is instantiated.
It can take arguments that become attributes of the instance, and it can return values.
:param self: Used to access the attributes of the class.
:param cm:ContentManager: Used to access the content manager.
:param json:Dict[str: Used to store the data from the json file.
:param Any]: Used to make the code more flexible.
:return: None.
:doc-author: Trelent
"""
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a JSON object as input and returns an instance of the class.
:param self: Used to refer to the object that is being called.
:param json:Dict[str: Used to unpack the json data.
:param Any]: Used to accept any type of value.
:return: None.
:doc-author: Trelent
"""
self.description: Optional[str] = json.get('description')
self.status: Optional[str] = json.get('status')
self.prettifyStatus: Optional[str] = json.get('prettifyStatus')
def json(self, **kwargs: bool):
"""
The json function is a helper function that returns the json representation of an object.
It is used to help with the serialization of objects for storage in a database or file.
:param self: Used to access the attributes of the object.
:param **kwargs:bool: Used to determine if the json function should return a list of dictionaries or just one dictionary.
:return: a dictionary containing the data of the object.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ApprovalStatusList(MutableSequence[ApprovalStatus]):
"""_summary_
Args:
MutableSequence (_type_): _description_
"""
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.ApprovalStatus]] = None) -> None:
"""
The __init__ function is called when a class is instantiated.
It initializes the attributes of the class, and it can accept arguments as well.
The __init__ function is not necessary to create a class, but it does help organize code.
:param self: Used to distinguish the instance of the class from other instances of the same class.
:param cm:ContentManager: Used to get the data from the ContentManager class.
:param init_list:Optional[List[ContentManager.ApprovalStatus]]=None: Used to pass in the list of approval statuses.
:return: what?.
:doc-author: Trelent
"""
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
"""
The __get_data function retrieves the data from the Content Manager API and returns it as a dictionary.
The function is called by __init__, which initializes self.data to be used in other methods.
:param self: Used to refer to the object that is calling the function.
:return: a dictionary with the following keys:.
:doc-author: Trelent
"""
response: Dict[str, Any] = self.cm.request('get', '/approvalStatus')
for elem in response.get('list', []):
item = ContentManager.ApprovalStatus(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.ApprovalStatus]:
"""
The get function returns the approval status of a given content item.
If no search is provided, it returns None. If an invalid search is provided,
it returns None.
:param self: Used to access the instance of the class.
:param search:Union[int: Used to determine what type of search is being performed.
:param str: Used to get the status of a certain approval.
:param None]: Used to indicate that the function is not used.
:return: the status of the content manager.
:doc-author: Trelent
"""
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning("Int is not possible to search")
return None
else:
if elem.status == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning("Int is not possible to search")
return None
else:
if elem.status == search:
return elem
logging.warning(f'ApprovalStatus with {search} not found')
return None
def __len__(self) -> int:
"""
The __len__ function returns the number of items in the collection.
:param self: Used to access the instance variables.
:return: the number of items in the list.
:doc-author: Trelent
"""
return len(self.__data)
def __iter__(self):
"""
The __iter__ function is what makes an object iterable.
This function is called when you use a for loop, or when you call iter(object) to get an iterator from that object.
The __iter__ function should return a new iterator object that can iterate over all the objects of interest.
If your class defines __next__ (as in Pythons built-in range class), then calling the __iter__ function is not mandatory.
:param self: Used to access the instance variables.
:return: an iterable object.
:doc-author: Trelent
"""
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
"""
The __getitem__ function allows you to use the object as if it were a list.
For example, if x is an instance of MyList, the code x[i] is equivalent to mlist[i].
:param self: Used to access the instance attributes of the class.
:param i:Union[slice: Used to specify that the index can be either a slice or an integer.
:param int]: Used to index the data.
:return: a new instance of the class, with.
:doc-author: Trelent
"""
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
"""
The __delitem__ function removes the item from the list.
:param self: Used to access the instance of the object.
:param i:int: Used to specify which item to delete.
:return: None.
:doc-author: Trelent
"""
del self.__data[i]
def __setitem__(self, i: int, value):
"""
The __setitem__ function is used to set a value in the list.
It takes two arguments, the first is an index of a value to change and the second is what you want to change it to.
:param self: Used to reference the object itself.
:param i:int: Used to indicate the index of the item to be set.
:param value: Used to set the value of the item at index i.
:return: the value of the item that was set.
:doc-author: Trelent
"""
self.__data[i] = value
def insert(self, i: int, value) -> None:
"""
The insert function inserts a value into the list at the given index.
The function will raise an IndexError if i is greater than or equal to the
length of the list. The function will also raise a TypeError if value is not
of type int.
:param self: Used to access the instance variables of the class.
:param i:int: Used to indicate the index at which to insert.
:param value: Used to insert the value at index i.
:return: the new node.
:doc-author: Trelent
"""
self.__data.insert(i, value)
def append(self, value: ContentManager.ApprovalStatus) -> None:
"""
The append function adds a value to the end of the list.
:param self: Used to refer to the instance of the class.
:param value:ContentManager.ApprovalStatus: Used to determine whether the content is approved or not.
:return: the list.
:doc-author: Trelent
"""
self.__data.append(value)
""" CATEGORY """
class Category:
"""
Description of Category
Args:
cm (ContentManager):
json (Dict[str,Any]):
"""
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
"""
The __init__ function is called when a new instance of the class is created.
The __init__ function receives the arguments passed to the class constructor as its own arguments.
The first argument is always a reference to the instance being constructed, and by convention, this argument is named self.
:param self: Used to access the instance variables of the class.
:param cm:ContentManager: Used to access the content manager.
:param json:Dict[str: Used to pass the json data to the class.
:param Any]: Used to make the code more flexible.
:return: None.
:doc-author: Trelent
"""
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a dictionary of JSON data and unpacks it into an instance of the Category class.
The unpack_json function is used to create instances of the Category class from JSON data returned by requests to
the API.
:param self: Used to access the class itself.
:param json:Dict[str: Used to pass the json dict to the unpack_json function.
:param Any]: Used to accept all possible inputs for json,.
:return: a list of Category objects.
:doc-author: Trelent
"""
self.children: List[ContentManager.Category] = [ContentManager.Category(self.cm, elem) for elem in json.get('children', [])]
self.description: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.parentId: Optional[int] = json.get('parentId')
def unpack_usage_json(self, json: Dict[str, Any]):
"""
The unpack_usage_json function unpacks the JSON response from the API call and returns a Usage object.
:param self: Used to access the instance of the class.
:param json:Dict[str: Used to unpack the json data.
:param Any]: Used to accept any data type.
:return: a dictionary of the following format:.
:doc-author: Trelent
"""
self.messagesCount: Optional[int] = json.get('messagesCount')
self.mediaCount: Optional[int] = json.get('mediaCount')
self.templatesCount: Optional[int] = json.get("templatesCount")
self.playlistsCount: Optional[int] = json.get('playlistsCount')
self.remotePublishLocationsCount: Optional[int] = json.get('remotePublishLocationsCount')
def json(self, **kwargs: bool):
"""
The json function is used to convert the class object into a json format.
It takes in kwargs which are flags that determine what data is included in the json output.
The default behavior of this function is to include all fields except for those that start with an underscore.
:param self: Used to access the object that is being called.
:param **kwargs:bool: Used to determine if the json function should return a link to the object or not.
:return: a dictionary with the following keys:.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == "link" and use:
data.pop('parentId', None)
data.pop('description', None)
data['children'] = [elem.json(link=True) for elem in self.children]
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
@staticmethod
def create(cm: ContentManager, name: str, parentId: Optional[int] = None, children: Optional[Union[List[ContentManager.Category], List[int]]] = None, description: Optional[str] = None):
"""
The create function creates a new category.
:param cm:ContentManager: Used to access the ContentManager instance.
:param name:str: Used to set the name of the category.
:param parentId:Optional[int]=None: Used to specify the parent category.
:param children:Optional[Union[List[ContentManager.Category]: Used to create a category with children.
:param List[int]]]=None: Used to indicate that the children parameter is optional.
:param description:Optional[str]=None: Used to set the description of a category.
:return: the created category.
:doc-author: Trelent
"""
children = children if not children is None else []
parentId = parentId if not parentId is None else 0
if len(children) > 0:
if isinstance(children[0], int):
children_list = [cm.categories.get(elem) for elem in children if isinstance(elem, int)]
children_list = [elem for elem in children_list if not elem is None]
else:
children_list = children
else:
children_list = children
if not all(isinstance(elem, ContentManager.Category) for elem in children_list):
raise Exception("Expected all children to be of type category")
data = {
"name": name,
"parentId": parentId,
"description": description,
"children": [elem.json(link=True) for elem in children_list if isinstance(elem, ContentManager.Category)]
}
response = cm.request('post', '/categories', data=json.dumps(data))
cm.categories.append(ContentManager.Category(cm, response))
def delete(self):
"""
The delete function deletes a category from the content manager.
Parameters:
self (class instance): The class instance that is invoking the function.
:param self: Used to access the attributes of the class.
:return: the number of items removed.
:doc-author: Trelent
"""
self.cm.__delete(f'/categories/{self.id}', {})
def usage(self):
"""
The usage function returns the number of times a category has been used.
:param self: Used to refer to the object that is calling this function.
:return: a list of dictionaries containing the following keys:.
:doc-author: Trelent
"""
response = self.cm.request('get', '/categories/usage', params={'ids': self.id})
self.unpack_usage_json(response)
return response
class CategoryList(MutableSequence[Category]):
"""
Description of CategoryList
Inheritance:
MutableSequence[Category]:
"""
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Category]] = None) -> None:
"""
The __init__ function is called when the class is instantiated.
It can take arguments that become attributes of the instance, and it can also take optional default values for those attributes.
:param self: Used to distinguish the instance of the class from other instances of the same class.
:param cm:ContentManager: Used to access the data in the ContentManager class.
:param init_list:Optional[List[ContentManager.Category]]=None: Used to indicate that the.
:return: None.
:doc-author: Trelent
"""
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
"""
The __get_data function retrieves the data from the API and returns it as a dictionary.
The function is called by __init__, which initializes self.data to be equal to
whatever is returned by this function.
:param self: Used to access the instance variables of the class.
:return: a dictionary of the categories.
:doc-author: Trelent
"""
response: Dict[str, Any] = self.cm.request('get', '/categories')
for elem in response.get('list', []):
item = ContentManager.Category(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Category]:
"""
The get function returns a category object based on the search parameter.
If no category is found, it returns None.
:param self: Used to access the instance variables of the class.
:param search:Union[int: Used to make sure that the function can be used with both int and str.
:param str: Used to define the name of the class.
:param None]: Used to indicate that the function should return None if no category is found.
:return: the category with the given ID or name.
:doc-author: Trelent
"""
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Category with {search} not found')
return None
def __len__(self) -> int:
"""
The __len__ function returns the number of items in the collection.
:param self: Used to refer to the instance of the object that is calling this function.
:return: the length of the list.
:doc-author: Trelent
"""
return len(self.__data)
def __iter__(self):
"""
The __iter__ function is what makes an object iterable.
This function is called when you use a for loop, or when you call iter(object) to get an iterator from that object.
The __iter__ function should return a new iterator object that can iterate over all the objects in the container.
:param self: Used to refer to the instance of the class.
:return: an iterator object.
:doc-author: Trelent
"""
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
"""
The __getitem__ function allows you to use the object as if it were a list.
For example, if x is an instance of MyList, the code x[i] is equivalent to mlist[i].
:param self: Used to access the instance variables of the class.
:param i:Union[slice: Used to specify the type of i.
:param int]: Used to index into the list of data.
:return: a new array.
:doc-author: Trelent
"""
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
"""
The __delitem__ function removes an item from the list.
:param self: Used to refer to the instance of the class.
:param i:int: Used to specify the index of the item to be deleted.
:return: None.
:doc-author: Trelent
"""
del self.__data[i]
def __setitem__(self, i: int, value):
"""
The __setitem__ function is used to set a value in the list.
It takes two arguments, the first is an index of a list and second is value to be set at that index.
:param self: Used to access the instance of the class.
:param i:int: Used to specify the index of the item to be set.
:param value: Used to set the value of a specific index.
:return: None.
:doc-author: Trelent
"""
self.__data[i] = value
def insert(self, i: int, value) -> None:
"""
The insert function inserts a value into the list at the given index.
The function will raise an IndexError if i is greater than or equal to
the length of the list. The function will also raise a TypeError if
value is not of type int.
:param self: Used to reference the instance of the object that is calling the function.
:param i:int: Used to indicate the index of the new value.
:param value: Used to insert a value into the array.
:return: the new list.
:doc-author: Trelent
"""
self.__data.insert(i, value)
def append(self, value: ContentManager.Category) -> None:
"""
The append function adds a value to the end of the list.
:param self: Used to reference the object itself.
:param value:ContentManager.Category: Used to determine the category of content being added.
:return: the object it was called on.
:doc-author: Trelent
"""
self.__data.append(value)
""" CHANNEL """
class Channel:
"""
Description of Channel
Args:
cm (ContentManager):
json (Dict[str,Any]):
"""
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the instance variables of the class.
:param cm:ContentManager: Used to access the content manager.
:param json:Dict[str: Used to pass in the json file.
:param Any]: Used to make the code work with both Python 2 and 3.
:return: :.
:doc-author: Trelent
"""
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function is a helper function that takes in a JSON object and returns the appropriate
object of the correct type. For example, if you pass it an object with key "id" and value "123", it will
return an int with value 123. It does this by looking up the class name in self._type_map, which is populated
by using reflection to look at all classes defined within this module (i.e., everything that starts with 'C'),
and then calling eval() on its string representation.
:param self: Used to access the ContentManager object.
:param json:Dict[str: Used to pass the json dictionary to this function.
:param Any]: Used to allow for a Dict[str, Any] or List[Any] to be passed in.
:return: the following:.
:doc-author: Trelent
"""
non_scheduled_playlist_id = get_id(json.get('nonScheduledPlaylist'))
self.alternateSupport: Optional[bool] = json.get('alternateSupport')
self.audioControlledByAdManager: Optional[bool] = json.get('audioControlledByAdManager')
self.campaignChannel: Optional[bool] = json.get('campaignChannel')
self.campaignClone: Optional[bool] = json.get('campaignClone')
self.description: Optional[str] = json.get('description')
self.frameset: ContentManager.Channel.FrameSet = ContentManager.Channel.FrameSet(self, json.get('frameset'))
self.id: Optional[int] = json.get('id')
self.lastModified: Optional[str] = json.get('lastModified')
self.maxFrameAllowed: Optional[int] = json.get('maxFrameAllowed')
self.maxPixelAllowed: Optional[int] = json.get('maxPixelAllowed')
self.muteAudioFromVisual: Optional[bool] = json.get("muteAudioFromVisual")
self.name: Optional[str] = json.get('name')
self.nonScheduledPlaylist: Optional[ContentManager.Playlist] = self.cm.playlists.get(non_scheduled_playlist_id)
self.playDedicatedAudioTrack: Optional[bool] = json.get('playDedicatedAudioTrack')
self.playerCount: int = json.get('playerCount', 0)
self.playerMetadataValues: List[ContentManager.Channel.MetadataValue] = [ContentManager.Channel.MetadataValue(self, elem) for elem in json.get('playerMetadataValues', [])]
self.readOnly: Optional[bool] = json.get('readOnly')
self.triggerSupport: Optional[bool] = json.get('triggerSupport')
self.type: Optional[str] = json.get('type')
self.variables: List[ContentManager.Channel.Variable] = [ContentManager.Channel.Variable(self, elem) for elem in json.get('variables', [])]
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def get_playlists(self) -> List[ContentManager.Playlist]:
"""
The get_playlists function returns a list of all playlists in the frameset.
:param self: Used to access the class variables.
:return: a list of all playlists.
:doc-author: Trelent
"""
temp: List[ContentManager.Playlist] = []
for frame in self.frameset.frames:
for timeslot in frame.timeslots:
if not timeslot.playlist is None:
if not timeslot.playlist in temp:
temp.append(timeslot.playlist)
if not frame.eventtriggers is None:
for eventtrigger in frame.eventtriggers:
if not eventtrigger.playlist is None:
if not eventtrigger.playlist in temp:
temp.append(eventtrigger.playlist)
return temp
def json(self, **kwargs: bool):
"""
The json function is used to convert a class into a JSON object.
It is called by the json function of the base class, and it takes in keyword arguments.
The first argument is always self, which refers to the object itself.
The second argument indicates whether or not we want player_update data included in our JSON output.
:param self: Used to access the instance of the class.
:param **kwargs:bool: Used to determine whether or not to include the player_update data in the json.
:return: a dictionary of the object's attributes.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == 'player_update' and use:
data = {k:v for k,v in data.items() if k in ['campaignChannel', 'campaignClone', 'id', 'name']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
def usage_info(self):
used = False
used_playlists: List[int] = []
used_media: List[int] = []
if self.id is None:
logging.error("Channel has no id specified")
return used, used_playlists, used_media
if self.playerCount > 0:
used = True
playlists = self.get_playlists()
for playlist in playlists:
if not playlist.id is None and not playlist.id in used_playlists:
_, temp_playlists, temp_media = playlist.usage_info()
used_playlists.append(playlist.id)
used_playlists.extend(temp_playlists)
used_media.extend(temp_media)
for frame in self.frameset.frames:
for eventtrigger in frame.eventtriggers:
if not eventtrigger.variable.controlScript is None:
if not eventtrigger.variable.controlScript.id in used_media and not eventtrigger.variable.controlScript.id is None:
used_media.append(eventtrigger.variable.controlScript.id)
return used, list(set(used_playlists)), list(set(used_media))
class FrameSet:
"""
Description of FrameSet
Args:
channel (ContentManager.Channel):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, channel: ContentManager.Channel, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is called when a new instance of the class is created.
It can take arguments that get bound to the named attributes in the class,
and/or it can take keyword arguments that are passed directly to it.
:param self: Used to access the instance variables of the class.
:param channel:ContentManager.Channel: Used to pass the channel object to the class.
:param json:Optional[Dict[str: Used to tell the compiler that json is an optional parameter.
:param Any]]: Used to specify that the parameter can be any type.
:return: None.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.channel = channel
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a JSON object as an argument and returns the following:
- campaignFrameset: A boolean indicating whether or not this frame is part of a frameset.
- eventTriggersCount: The number of events that are triggered by this frame.
- framesCounter: The number of timeslots in the channel. This is used to determine how many timeslots to request from the server when updating all channels for a given day/time range.
- height: The height (in pixels) that each slot should be rendered at on-screen, if applicable (e.g., for video content).
- id_num = An integer ID value assigned by Contentful to uniquely identify this FrameSet instance.
:param self: Used to access the class's attributes.
:param json:Dict[str: Used to pass the json dictionary to the function.
:param Any]: Used to accept List[Dict[str, Any]] or Dict[str, Any].
:return: what?.
:doc-author: Trelent
"""
self.campaignFrameset: Optional[bool] = json.get('campaignFrameset')
self.eventTriggersCount: int = json.get('eventTriggersCount', 0)
self.frames: List[ContentManager.Channel.FrameSet.Frame] = [ContentManager.Channel.FrameSet.Frame(self, elem) for elem in json.get('frames', [])]
self.framesCounter: int = json.get('framesCounter', 0)
self.height: Optional[int] = json.get('height')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.timeslotsCount: int = json.get('timeslotsCount', 0)
self.width: Optional[int] = json.get('width')
def json(self, **kwargs: bool):
"""
The json function is a helper function that converts the object into a json string.
It is used to convert the object into a json string for sending it over the network.
The kwargs are optional arguments that can be passed in to customize how objects are converted.
:param self: Used to reference the object itself.
:param **kwargs:bool: Used to specify which attributes of the object should be included in the json output.
:return: the data in the form of a dictionary.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['channel']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Frame:
"""
Description of Frame
Args:
frameset (ContentManager.Channel.FrameSet):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, frameset: ContentManager.Channel.FrameSet, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the instance variables of the class.
:param frameset:ContentManager.Channel.FrameSet: Used to pass the frameset to the __init__ function.
:param json:Optional[Dict[str: Used to tell the function that it can be passed a dictionary, or None.
:param Any]]: Used to specify that the type of json is unknown.
:return: :.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.frameset = frameset
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function unpacks a JSON object into the Frame class.
:param self: Used to access the frame object itself, and is not used in this function.
:param json:Dict[str: Used to unpack the json parameter.
:param Any]: Used to force the type of the parameter to be a Dict[str, Any].
:return: a Frame object.
:doc-author: Trelent
"""
alternate_playlist_id = get_id(json.get('alternatePlaylist'))
self.alternatePlaylist: Optional[ContentManager.Playlist] = self.frameset.channel.cm.playlists.get(alternate_playlist_id)
self.alternateType: Optional[str] = json.get('alternateType')
self.autoscale: Optional[str] = json.get('autoscale')
self.campaignTarget: Optional[bool] = json.get('campaignTarget')
self.color: Optional[str] = json.get('color')
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.eventTriggersCount: int = json.get('eventTriggersCount', 0)
self.eventtriggers: List[ContentManager.Channel.FrameSet.Frame.EventTrigger] = []
self.height: Optional[int] = json.get('height')
self.hidden: Optional[bool] = json.get('hidden')
self.id: Optional[int] = json.get('id')
self.left: Optional[int] = json.get('left')
self.name: Optional[str] = json.get('name')
self.timeTriggersCount: int = json.get('timeTriggersCount', 0)
self.timeslots: List[ContentManager.Channel.FrameSet.Frame.Timeslot] = []
self.timeslotsCount: int = json.get('timeslotsCount', 0)
self.timetriggers: List[ContentManager.Channel.FrameSet.Frame.TimeTrigger] = []
self.top: Optional[int] = json.get('top')
self.width: Optional[int] = json.get('width')
self.zOrder: Optional[int] = json.get('zOrder')
def json(self, **kwargs: bool):
"""
The json function is used to convert a Frame object into a JSON string.
It takes an optional argument, **kwargs, which is passed to the json.dumps function.
:param self: Used to distinguish between the class and instance methods.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary of the data in a frame.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['frameset']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Timeslot:
"""
Description of Timeslot
Args:
frame (ContentManager.Channel.FrameSet.Frame):
json (Dict[str,Any]):
"""
def __init__(self, frame: ContentManager.Channel.FrameSet.Frame, json: Dict[str, Any]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to refer to the instance of the class.
:param frame:ContentManager.Channel.FrameSet.Frame: Used to pass the frame to the.
:param json:Dict[str: Used to pass the json data from the server to this function.
:param Any]: Used to make the code more flexible.
:return: None.
:doc-author: Trelent
"""
self.frame = frame
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function unpacks a JSON object into the appropriate attributes of the ContentManager.Content object.
:param self: Used to refer to the instance of the class.
:param json:Dict[str: Used to pass the json dict from the server.
:param Any]: Used to force the type of the variable to be a Dict[str, Any].
:return: the following:.
:doc-author: Trelent
"""
alternate_playlist_id = get_id(json.get('alternatePlaylist'))
playlist_id = get_id(json.get('playlist'))
self.alternatePlaylist: Optional[ContentManager.Playlist] = self.frame.frameset.channel.cm.playlists.get(alternate_playlist_id)
self.alternateType: Optional[str] = json.get('alternateType')
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.color: Optional[str] = json.get('color')
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.description: Optional[str] = json.get('description')
self.endDate: Optional[str] = json.get('endDate')
self.endTime: Optional[str] = json.get('endTime')
self.id: Optional[int] = json.get('id')
self.locked: Optional[bool] = json.get('locked')
self.monthPeriod: Optional[str] = json.get('monthPeriod')
self.name: Optional[str] = json.get('name')
self.playFullScreen: Optional[bool] = json.get('playFullScreen')
self.playlist: Optional[ContentManager.Playlist] = self.frame.frameset.channel.cm.playlists.get(playlist_id)
self.priorityClass: Optional[str] = json.get('priorityClass')
self.recurrencePattern: Optional[str] = json.get('recurrencePattern')
self.sortOrder: Optional[str] = json.get('sortOrder')
self.startDate: Optional[str] = json.get('startDate')
self.startTime: Optional[str] = json.get('startTime')
self.weekdays: Optional[List[str]] = json.get('weekdays')
def json(self, **kwargs: bool):
"""
The json function is a custom function that converts the object to json.
It is used by the __repr__ method to convert all of the objects attributes into json format.
The kwargs are passed in from other functions and determine whether or not certain attributes should be included in the JSON output.
:param self: Used to access the object that is calling the function.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary of the data in the frame.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['frame']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class EventTrigger:
"""
Description of EventTrigger
Args:
frame (ContentManager.Channel.FrameSet.Frame):
json (Dict[str,Any]):
"""
def __init__(self, frame: ContentManager.Channel.FrameSet.Frame, json: Dict[str, Any]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to distinguish between the class and instance.
:param frame:ContentManager.Channel.FrameSet.Frame: Used to .
:param json:Dict[str: Used to pass the json dictionary to the __init__ function.
:param Any]: Used to make the function more flexible.
:return: None.
:doc-author: Trelent
"""
self.frame = frame
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function unpacks a JSON object into the appropriate attributes of the Playlist class.
:param self: Used to refer to the class itself.
:param json:Dict[str: Used to get the data from the json object.
:param Any]: Used to force JSON to load this class.
:return: a Playlist object.
:doc-author: Trelent
"""
playlist_id = get_id(json.get('playlist'))
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.id: Optional[int] = json.get('id')
self.itemsToPick: Optional[int] = json.get('itemsToPick')
self.playFullScreen: Optional[bool] = json.get('playFullScreen')
self.playlist: Optional[ContentManager.Playlist] = self.frame.frameset.channel.cm.playlists.get(playlist_id)
self.repeatTriggerResponse: Optional[str] = json.get('repeatTriggerResponse')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.variable: ContentManager.Channel.FrameSet.Frame.EventTrigger.Variable = ContentManager.Channel.FrameSet.Frame.EventTrigger.Variable(self, json.get('variable'))
def json(self, **kwargs: bool):
"""
The json function is a custom function that converts the object into a JSON string.
It is used to convert the data from an object into a JSON format for easy storage and transmission.
The json function takes in optional arguments, which are boolean values that determine whether or not to include certain fields in the resulting JSON string.
:param self: Used to access the object that is being called.
:param **kwargs:bool: Used to determine if the function should return a json string or a python dictionary.
:return: a dictionary of the object's attributes.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['frame']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Variable:
"""
Description of Variable
Attributes:
eventtrigger (type):
Args:
eventtrigger (ContentManager.Channel.FrameSet.Frame.EventTrigger):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, eventtrigger: ContentManager.Channel.FrameSet.Frame.EventTrigger, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the object that is being created.
:param eventtrigger:ContentManager.Channel.FrameSet.Frame.EventTrigger: Used to specify the event that triggers this frame.
:param json:Optional[Dict[str: Used to specify that the json parameter is optional.
:param Any]]: Used to specify that the type of json can be anything.
:return: None.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.eventtrigger = eventtrigger
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function unpacks a JSON object into an instance of the class.
It is used to create new instances of this class from JSON objects that are received from the server.
:param self: Used to access the class instance.
:param json:Dict[str: Used to pass the json dictionary to the.
:param Any]: Used to accept an arbitrary number of arguments.
:return: the following:.
:doc-author: Trelent
"""
media_id = get_id(json.get('controlScript'))
self.controlScript: Optional[ContentManager.Media] = self.eventtrigger.frame.frameset.channel.cm.media.get(media_id)
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.sharedName: Optional[str] = json.get('sharedName')
self.type: Optional[str] = json.get('type')
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in optional keyword arguments and returns a dictionary of the object's properties.
:param self: Used to access the instance variables of the class.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary with the following keys:.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['eventtrigger']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class TimeTrigger:
"""
Description of TimeTrigger
Attributes:
frame (type):
Args:
frame (ContentManager.Channel.FrameSet.Frame):
json (Dict[str,Any]):
"""
def __init__(self, frame: ContentManager.Channel.FrameSet.Frame, json: Dict[str, Any]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the instance variables of the class.
:param frame:ContentManager.Channel.FrameSet.Frame: Used to determine which frame the.
:param json:Dict[str: Used to create a new instance of the FrameSet class.
:param Any]: Used to allow the json parameter to be of any type.
:return: None.
:doc-author: Trelent
"""
self.frame = frame
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function unpacks a JSON object into the Playlist class.
It takes in a dictionary and returns an instance of the Playlist class.
:param self: Used to access the parent class.
:param json:Dict[str: Used to represent the json object that is returned by the server.
:param Any]: Used to force JSON to load as a dictionary, even if it's not.
:return: what?.
:doc-author: Trelent
"""
playlist_id = get_id(json.get('playlist'))
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.days: Optional[List[str]] = json.get('days')
self.endDate: Optional[str] = json.get('endDate')
self.id: Optional[int] = json.get('id')
self.itemsToPick: Optional[int] = json.get('itemsToPick')
self.name: Optional[str] = json.get('name')
self.playFullScreen: Optional[bool] = json.get('playFullScreen')
self.playlist: Optional[ContentManager.Playlist] = self.frame.frameset.channel.cm.playlists.get(playlist_id)
self.recurrencePattern: Optional[str] = json.get('recurrencePattern')
self.repeatEndTime: Optional[str] = json.get('repeatEndTime')
self.repeatStartTime: Optional[str] = json.get('repeatStartTime')
self.repeatTriggerResponse: Optional[str] = json.get('repeatTriggerResponse')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.startDate: Optional[str] = json.get('startDate')
self.time: Optional[str] = json.get('time')
def json(self, **kwargs: bool):
"""
The json function is a custom function that converts the object into a JSON string.
It is used to convert the data from an object into a JSON format for easy storage and transmission.
The json function takes in optional arguments, which are boolean values that determine whether or not to include certain fields in the resulting JSON string.
:param self: Used to access the attributes of the object.
:param **kwargs:bool: Used to determine whether to use the json.
:return: a dictionary of the object's attributes.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['frame']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Variable:
"""
Description of Variable
Attributes:
channel (type):
Args:
channel (ContentManager.Channel):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, channel: ContentManager.Channel, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the instance variables of the class.
:param channel:ContentManager.Channel: Used to pass the channel object to the class.
:param json:Optional[Dict[str: Used to check if the json parameter is None.
:param Any]]: Used to specify that the json parameter can be None.
:return: None.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.channel = channel
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function unpacks a JSON object into an instance of the class.
It is used to create new instances of the class and populate them with data from
a JSON object. It takes one argument, json, which is a dictionary representing
the attributes of an instance.
:param self: Used to access the instance of this class.
:param json:Dict[str: Used to pass the json data to the unpack_json function.
:param Any]: Used to accept an arbitrary number of arguments.
:return: a list of the following items:.
:doc-author: Trelent
"""
media_id = get_id(json.get('controlScript'))
self.controlScript: Optional[ContentManager.Media] = self.channel.cm.media.get(media_id)
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.sharedName: Optional[str] = json.get('sharedName')
self.type: Optional[str] = json.get('type')
def json(self, **kwargs: bool):
"""
The json function is a helper function that converts the object into a json string.
It is used to convert the object into a json string for easy storage and transmission.
The function takes in an optional keyword argument, **kwargs, which can be used to specify
which attributes of the class should be included in the resulting JSON string.
:param self: Used to reference the object itself.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary of the object's attributes.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['channel']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class MetadataValue:
"""
Description of MetadataValue
Attributes:
channel (type):
Args:
channel (ContentManager.Channel):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, channel: ContentManager.Channel, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to distinguish the instance of the class from other instances.
:param channel:ContentManager.Channel: Used to get the channel object.
:param json:Optional[Dict[str: Used to indicate that the json parameter is optional.
:param Any]]: Used to specify that the json parameter can be of any type.
:return: None.
:doc-author: Trelent
"""
self.channel = channel
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a JSON object as input and returns an instance of the class.
The function is used to parse data from the ContentManager API into instances of this class.
:param self: Used to refer to the instance of the class.
:param json:Dict[str: Used to pass in the json dictionary.
:param Any]: Used to force the type of a variable.
:return: a dictionary which contains the key.
:doc-author: Trelent
"""
player_metadata_id = get_id(json.get('playerMetadata'))
self.id: Optional[int] = json.get('id')
self.playerMetadata: Optional[ContentManager.PlayerMetadata] = self.channel.cm.player_metadatas.get(player_metadata_id)
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
"""
The json function is a helper function that returns the object as a json string.
It is used to make sure that all objects are serializable.
:param self: Used to reference the object itself.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary of the object's data.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['channel']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ChannelList(MutableSequence[Channel]):
"""_summary_
Args:
MutableSequence (_type_): _description_
"""
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Channel]] = None) -> None:
"""
The __init__ function is called when a class is instantiated.
It can take arguments, just like any other function, but its main purpose is to set up the instance with the attributes it needs.
The first argument of every __init__ function must be "self", which represents the instance of the object itself.
:param self: Used to access the instance's attributes.
:param cm:ContentManager: Used to access the data.
:param init_list:Optional[List[ContentManager.Channel]]=None: Used to pass in a list of channels to be.
:return: None.
:doc-author: Trelent
"""
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
"""
The __get_data function is used to retrieve the data from the Content Manager.
It is called by __init__ and will populate all of the attributes for each object in a channel.
:param self: Used to access the ContentManager instance.
:return: a list of dicts.
:doc-author: Trelent
"""
response: Dict[str, Any] = self.cm.request('get', '/channels')
for elem in response.get('list', []):
item = ContentManager.Channel(self.cm, elem)
for i, frame in enumerate(item.frameset.frames):
timeslots_response = self.cm.request('get', f'/channels/{item.id}/frames/{frame.id}/timeslots', debug_key="channel_timeslots")
eventtriggers_response = self.cm.request('get', f'/channels/{item.id}/frames/{frame.id}/eventtriggers', debug_key="channel_eventtriggers")
timetriggers_response = self.cm.request('get', f'/channels/{item.id}/frames/{frame.id}/timetriggers', debug_key="channel_timetriggers")
item.frameset.frames[i].timeslots = [ContentManager.Channel.FrameSet.Frame.Timeslot(frame, elem) for elem in timeslots_response.get('timeslots', [])]
item.frameset.frames[i].eventtriggers = [ContentManager.Channel.FrameSet.Frame.EventTrigger(frame, elem) for elem in eventtriggers_response.get('eventTriggers', [])]
item.frameset.frames[i].timetriggers = [ContentManager.Channel.FrameSet.Frame.TimeTrigger(frame, elem) for elem in timetriggers_response.get('timeTriggers', [])]
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Channel]:
"""
The get function returns a Channel object with the given id or name.
If no channel is found, None is returned.
:param self: Used to access the instance of the class.
:param search:Union[int: Used to determine what type of search to use.
:param str: Used to define the name of the function.
:param None]: Used to indicate that the function should return None if no data is found.
:return: the channel with the given id or name.
:doc-author: Trelent
"""
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Channel with {search} not found')
return None
def __len__(self) -> int:
"""
The __len__ function returns the number of items in the collection.
:param self: Used to distinguish between the instance and class methods.
:return: the number of items in the list.
:doc-author: Trelent
"""
return len(self.__data)
def __iter__(self):
"""
The __iter__ function is a special function that allows you to use the object in a for loop.
For example, if you have an array of objects and want to iterate through them, you can write:
for obj in my_array_of_objects:
print(obj)
:param self: Used to access the instance variables of the class.
:return: an iterator object.
:doc-author: Trelent
"""
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
"""
The __getitem__ function allows you to use the object as if it were a list.
For example, if you have an instance of a class called `my_object`, and that
instance has an attribute called `my_list`, then the following two lines are
equivalent:
my_object[0]
my_list[0]
:param self: Used to access the instance variables of the class.
:param i:Union[slice: Used to specify the type of i.
:param int]: Used to index into the data.
:return: an instance of the class.
:doc-author: Trelent
"""
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
"""
The __delitem__ function removes an item from the list.
:param self: Used to reference the object that is being operated on.
:param i:int: Used to specify the index of the item to be deleted.
:return: None.
:doc-author: Trelent
"""
del self.__data[i]
def __setitem__(self, i: int, value):
"""
The __setitem__ function is used to set a value in the list.
It takes two arguments, the first is an index of a value to change and the second is what you want to change it too.
:param self: Used to refer to the instance of the class.
:param i:int: Used to specify the index of the item to be set.
:param value: Used to set the value of the item at index i.
:return: the value that is being assigned to the index.
:doc-author: Trelent
"""
self.__data[i] = value
def insert(self, i: int, value) -> None:
"""
The insert function inserts a value into the list at the given index.
The function will raise an IndexError if i is greater than or equal to the
length of the list. The function will also raise a TypeError if value is not
of type int.
:param self: Used to reference the instance of the class.
:param i:int: Used to indicate the index of the value to be inserted.
:param value: Used to insert a value into the list.
:return: the new node.
:doc-author: Trelent
"""
self.__data.insert(i, value)
def append(self, value: ContentManager.Channel) -> None:
"""
The append function adds a value to the end of the list.
:param self: Used to refer to the instance of the object that is calling the function.
:param value:ContentManager.Channel: Used to determine which channel to append the new item to.
:return: the list itself.
:doc-author: Trelent
"""
self.__data.append(value)
def usage_info(self):
if len(self.__data) == 0:
self.__get_data()
used_channels: List[int] = []
used_playlists: List[int] = []
used_media: List[int] = []
for channel in self.__data:
if channel.id is None:
logging.error("Expected channel to have id")
continue
used, temp_playlist, temp_media = channel.usage_info()
if used:
used_channels.append(channel.id)
used_playlists.extend(temp_playlist)
used_media.extend(temp_media)
used_channels = list(set(used_channels))
used_playlists = list(set(used_playlists))
used_media = list(set(used_media))
return used_channels, used_playlists, used_media
""" DISTRIBUTIONSERVER """
class DistributionServer:
"""
Description of DistributionServer
Attributes:
cm (type):
Args:
cm (ContentManager):
json (Dict[str,Any]):
"""
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
"""
The __init__ function is the constructor for a class. It is called when an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the attributes of the class.
:param cm:ContentManager: Used to access the content manager.
:param json:Dict[str: Used to pass in the json object.
:param Any]: Used to specify that the parameter can be of any type.
:return: None.
:doc-author: Trelent
"""
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a dictionary of JSON data and unpacks it into an object.
The function is intended to be used as a means to convert the JSON response from the Content Manager REST API into an object that can be used by Python.
:param self: Used to access the parent object.
:param json:Dict[str: Used to pass the json object returned from the server.
:param Any]: Used to force additional parameters to be passed.
:return: a list of DistributionServer objects.
:doc-author: Trelent
"""
self.auditSettings: ContentManager.DistributionServer.AuditSettings = ContentManager.DistributionServer.AuditSettings(self, json.get('auditSettings'))
self.broadcastServer: ContentManager.DistributionServer.BroadcastServer = ContentManager.DistributionServer.BroadcastServer(self, json.get('broadcastServer'))
self.description: Optional[str] = json.get('description')
self.driver: Optional[str] = json.get('driver')
self.driverOptions: List[ContentManager.DistributionServer.DriverOptions] = [ContentManager.DistributionServer.DriverOptions(self, elem) for elem in json.get('driverOptions', [])]
self.iadeaServer: ContentManager.DistributionServer.IadeaServer = ContentManager.DistributionServer.IadeaServer(self, json.get('iadeaServer'))
self.id: Optional[int] = json.get('id')
self.monitoringSettings: ContentManager.DistributionServer.MonitoringSettings = ContentManager.DistributionServer.MonitoringSettings(self, json.get('monitoringSettings'))
self.name: Optional[str] = json.get('name')
self.omnicastServer: ContentManager.DistributionServer.OmnicastServer = ContentManager.DistributionServer.OmnicastServer(self, json.get('omnicastServer'))
self.schedules: List[ContentManager.DistributionServer.Schedule] = [ContentManager.DistributionServer.Schedule(self, elem) for elem in json.get('schedules', [])]
self.snapshotSettings: ContentManager.DistributionServer.SnapshotSettings = ContentManager.DistributionServer.SnapshotSettings(self, json.get('snapshotSettings'))
self.synchronization: Optional[str] = json.get('synchronization')
self.uuid: Optional[str] = json.get('uuid')
# self.distributions Do not see added value to add this
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in kwargs which are flags that determine what data is included in the json.
The default is to include all data, but you can use any of these arguments:
player_update - if this flag is set, it will only return driver and id fields for each player
:param self: Used to refer to the object itself.
:param **kwargs:bool: Used to determine whether or not to include the player's data in the json.
:return: a dictionary of the object's data.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == "player_update" and use:
data = {k:v for k,v in data.items() if k in ['driver', 'id', 'name', 'snapshotSettings']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class AuditSettings:
"""
Description of AuditSettings
Attributes:
server (type):
Args:
server (ContentManager.DistributionServer):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to reference the object itself.
:param server:ContentManager.DistributionServer: Used to pass the server object to the.
:param json:Optional[Dict[str: Used to tell the function that it can be called with or without a dictionary.
:param Any]]: Used to make the function call more flexible.
:return: None.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a JSON object as input and returns an instance of the class.
The function is used to deserialize a JSON object into an instance of the class.
:param self: Used to access the class's attributes.
:param json:Dict[str: Used to specify the type of the parameter.
:param Any]: Used to accept any data type.
:return: a dictionary with the key 'enabled' and the value of that key is a boolean.
:doc-author: Trelent
"""
self.enabled: Optional[bool] = json.get('enabled')
self.uploadFrequency: Optional[str] = json.get('uploadFrequency')
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in optional arguments that will remove all attributes from the object that are not necessary for it's representation.
:param self: Used to distinguish between the class and instance methods.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary with the data of the object.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class BroadcastServer:
"""
Description of BroadcastServer
Attributes:
server (type):
Args:
server (ContentManager.DistributionServer):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the current instance of the class.
:param server:ContentManager.DistributionServer: Used to pass the.
:param json:Optional[Dict[str: Used to indicate that the json parameter is optional.
:param Any]]: Used to allow the function to be called with a different type of parameter.
:return: :.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a dictionary of JSON data and unpacks it into an object.
The function is used to take the JSON response from the API call and create an object that can be used by other functions in this module.
:param self: Used to refer to the instance of the class.
:param json:Dict[str: Used to store the json data from the.
:param Any]: Used to force the return type to be a Dict[str, Any].
:return: the following:.
:doc-author: Trelent
"""
self.delivery: Optional[str] = json.get('delivery')
self.lastStatus: Optional[str] = json.get('lastStatus')
self.logLevel: Optional[int] = json.get('logLevel')
self.macAddress: Optional[str] = json.get('macAddress')
self.password: Optional[str] = json.get('password')
self.planRevision: Optional[int] = json.get('planRevision')
self.playerCacheSize: Optional[int] = json.get('playerCacheSize')
self.pollingInterval: Optional[int] = json.get('pollingInterval')
self.serverUrl: Optional[str] = json.get('serverUrl')
self.username: Optional[str] = json.get('username')
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in optional arguments that will remove all attributes from the object that are not necessary for it's representation.
:param self: Used to reference the instance of the class.
:param **kwargs:bool: Used to determine if the json function should return a string or not.
:return: a dictionary containing the data from the object.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class DriverOptions:
"""
Description of DriverOptions
Attributes:
server (type):
Args:
server (ContentManager.DistributionServer):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the instance variables of the class.
:param server:ContentManager.DistributionServer: Used to pass the server instance to the.
:param json:Optional[Dict[str: Used to specify that the json parameter is optional.
:param Any]]: Used to make the function definition more flexible.
:return: ?.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a JSON object as input and returns an instance of the class.
:param self: Used to refer to the instance of the class that is being used.
:param json:Dict[str: Used to unpack the json dictionary.
:param Any]: Used to accept any data type.
:return: a dictionary with the following keys:.
:doc-author: Trelent
"""
self.id: Optional[int] = json.get('id')
self.key: Optional[str] = json.get('key')
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in optional arguments that will remove all attributes from the object that are not necessary for it's representation.
:param self: Used to distinguish between different instances of a class.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary with the data of the object.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class IadeaServer:
"""
Description of IadeaServer
Attributes:
server (type):
Args:
server (ContentManager.DistributionServer):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an instance of a class is created.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to reference the current instance of the class.
:param server:ContentManager.DistributionServer: Used to pass the server to the class.
:param json:Optional[Dict[str: Used to specify that the json parameter is optional.
:param Any]]: Used to make the code more readable.
:return: None.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a dictionary of JSON data and unpacks it into an object with the same attributes as the class.
The function is used to create objects from JSON data returned by API calls.
:param self: Used to access the class's attributes.
:param json:Dict[str: Used to pass in the json dictionary that is returned from the server.
:param Any]: Used to force the return type to be a Dict[str, Any] instead of the default Dict[str, Any].
:return: what?.
:doc-author: Trelent
"""
self.children: List[ContentManager.DistributionServer.IadeaServer] = [ContentManager.DistributionServer.IadeaServer(self.server, elem) for elem in json.get('children', [])]
self.heartbeatErrorRetryRate: Optional[int] = json.get('heartbeatErrorRetryRate')
self.logLevel: Optional[int] = json.get('logLevel')
self.macAddress: Optional[str] = json.get('macAddress')
self.parent: Optional[int] = get_id(json.get('parent'))
self.planErrorRepollingRate: Optional[int] = json.get('planErrorRepollingRate')
self.planPollingRate: Optional[int] = json.get('planPollingRate')
self.planRevision: Optional[int] = json.get('planRevision')
self.planStatusErrorRetryRate: Optional[int] = json.get('planStatusErrorRetryRate')
self.playerHeartbeatRate: Optional[int] = json.get('playerHeartbeatRate')
self.scheduleExpansionDays: Optional[int] = json.get('scheduleExpansionDays')
self.scheduleRefreshTime: Optional[str] = json.get('scheduleRefreshTime')
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in optional arguments that can be used to specify which variables should be included in the json output and/or exclude certain variables from being included.
:param self: Used to distinguish between the class and instance methods.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary of the object's data.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class MonitoringSettings:
"""
Description of MonitoringSettings
Attributes:
server (type):
Args:
server (ContentManager.DistributionServer):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the instance's attributes.
:param server:ContentManager.DistributionServer: Used to .
:param json:Optional[Dict[str: Used to tell the function that it can accept a dictionary as an argument.
:param Any]]: Used to make the function call compatible with both Python 2 and 3.
:return: None.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a dictionary of JSON data and unpacks it into an object with the same attributes as the class.
This is useful for when you need to create an instance of a class from JSON data that has been sent back by the API.
:param self: Used to refer to the instance of the class.
:param json:Dict[str: Used to pass the json dictionary to the unpack_json function.
:param Any]: Used to support multiple Python versions.
:return: the following:.
:doc-author: Trelent
"""
self.diskSpaceReserve: Optional[int] = json.get('diskSpaceReserve')
self.enabled: Optional[bool] = json.get('enabled')
self.heartbeatRate: Optional[int] = json.get('heartbeatRate')
self.overdueRate: Optional[int] = json.get('overdueRate')
self.planStatusInterval: Optional[int] = json.get('planStatusInterval')
self.purgeLogsAfter: Optional[int] = json.get('purgeLogsAfter')
self.uploadLogs: Optional[bool] = json.get('uploadLogs')
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in optional arguments that will remove all attributes from the object that are not necessary for it's representation.
:param self: Used to reference the object that the method is being called on.
:param **kwargs:bool: Used to determine whether or not to include the.
:return: a dictionary of the object's data.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class OmnicastServer:
"""
Description of OmnicastServer
Attributes:
server (type):
Args:
server (ContentManager.DistributionServer):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the instance variables of the class.
:param server:ContentManager.DistributionServer: Used to access the server's.
:param json:Optional[Dict[str: Used to specify that the json parameter is optional.
:param Any]]: Used to make the code more readable.
:return: None.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function accepts a dictionary containing the JSON data from the API call and returns an object of type User.
:param self: Used to refer to the instance of the class.
:param json:Dict[str: Used to specify that the function is expecting a dictionary of strings as its parameter.
:param Any]: Used to allow the function to accept any type of data.
:return: a tuple of two values.
:doc-author: Trelent
"""
self.url: Optional[str] = json.get('url')
self.username: Optional[str] = json.get('username')
self.password: Optional[str] = json.get('password')
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in optional arguments that will remove all attributes from the object that are not necessary for it's representation.
:param self: Used to reference the object itself.
:param **kwargs:bool: Used to determine if the json function should.
:return: a dictionary of the data in the object.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Schedule:
"""
Description of Schedule
Attributes:
server (type):
Args:
server (ContentManager.DistributionServer):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to access the instance variables of the class.
:param server:ContentManager.DistributionServer: Used to pass the server to the constructor.
:param json:Optional[Dict[str: Used to indicate that the json parameter is optional.
:param Any]]: Used to make the function more flexible.
:return: None.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function unpacks a JSON object into an instance of the class.
It is used to create a new instance of the class and populate it with data from
a JSON object. It is also used to update an existing instance when passed a
JSON object that represents that instance.
:param self: Used to access the server object.
:param json:Dict[str: Used to pass the json dict from the get_json function.
:param Any]: Used to accept any data type.
:return: what?.
:doc-author: Trelent
"""
player_group_id = get_id(json.get('playerGroup'))
self.dayOfWeek: Optional[str] = json.get('dayOfWeek')
self.hours: Optional[str] = json.get('hours')
self.id: Optional[int] = json.get('id')
self.minutes: Optional[str] = json.get('minutes')
self.playerGroup: Optional[ContentManager.PlayerGroup] = self.server.cm.playergroups.get(player_group_id)
self.seconds: Optional[str] = json.get('seconds')
self.type: Optional[str] = json.get('type')
def json(self, **kwargs: bool):
"""
The json function is used to convert a class into a JSON object.
It is called by the json method of the class. It takes in kwargs which are boolean values that determine if an attribute should be included in the JSON object or not.
:param self: Used to reference the object itself.
:param **kwargs:bool: Used to determine if the json function should return a string or dictionary.
:return: a dictionary with the following keys:.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class SnapshotSettings:
"""
Description of SnapshotSettings
Attributes:
server (type):
Args:
server (ContentManager.DistributionServer):
json (Optional[Dict[str,Any]]):
"""
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
"""
The __init__ function is the constructor for a class. It is called whenever an object of that class is instantiated.
The __init__ function can take arguments, but self is always the first one.
:param self: Used to refer to the instance of the class.
:param server:ContentManager.DistributionServer: Used to pass the server object to the class.
:param json:Optional[Dict[str: Used to indicate that the json parameter is optional.
:param Any]]: Used to allow the use of Dict[str, Any] as a parameter.
:return: :.
:doc-author: Trelent
"""
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
"""
The unpack_json function takes a JSON object as input and returns the following:
- connectionValid: A boolean indicating whether or not the connection to the server is valid.
- enabled: A boolean indicating whether or not automatic snapshots are enabled for this database.
- intervalNumSnapshots: An integer representing how many snapshots will be taken during each interval. If set to 0, then no snapshots will be taken during that interval (but one may still be taken on-demand). If set to 1, then one snapshot will be taken at the end of that interval if a snapshot has been started during that time period; otherwise no snapshot will be created for that time period but any in progress at the end of it's duration will remain unchanged. Any other value greater than 1 indicates how many snapshots should exist after each interval is completed.
:param self: Used to refer to the instance of the class.
:param json:Dict[str: Used to pass the json dictionary to the unpack_json function.
:param Any]: Used to support multiple types.
:return: a dictionary with the following keys:.
:doc-author: Trelent
"""
self.connectionValid: Optional[bool] = json.get('connectionValid')
self.enabled: Optional[bool] = json.get('enabled')
self.interval: Optional[int] = json.get('interval')
self.intervalNumSnapshots: Optional[int] = json.get('intervalNumSnapshots')
self.intervalProfile: Optional[str] = json.get('intervalProfile')
self.onDemandProfile: Optional[str] = json.get('onDemandProfile')
self.onEventProfile: Optional[str] = json.get('onEventProfile')
def json(self, **kwargs: bool):
"""
The json function is used to convert the object into a json format.
It takes in optional arguments that will filter out specific data from the object.
The only required argument is **kwargs which must include player_update=True
:param self: Used to access the current instance of the object.
:param **kwargs:bool: Used to determine whether or not the json function should return a player_update.
:return: a dictionary of all the data in the object.
:doc-author: Trelent
"""
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
if name == "player_update" and use:
data = {k:v for k,v in data.items() if k in ['connectionValid', 'enabled', 'interval', 'intervalNumSnapshots', 'intervalProfile', 'onDemandProfile', 'onEventProfile']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class DistributionServerList(MutableSequence[DistributionServer]):
"""_summary_
Args:
MutableSequence (_type_): _description_
"""
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.DistributionServer]] = None) -> None:
"""
The __init__ function is called when a class is instantiated. It can take arguments, some of which are defaulted to values that have been specified in the class definition.
The __init__ function is where you should initialize any instance variables that your object needs for its operation.
:param self: Used to access the attributes and methods of the class.
:param cm:ContentManager: Used to access the ContentManager object.
:param init_list:Optional[List[ContentManager.DistributionServer]]=None: Used to initialize the list of servers.
:return: None.
:doc-author: Trelent
"""
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
"""
The __get_data function retrieves the data from the Content Manager API and returns it as a dictionary.
:param self: Used to refer to the instance of the class.
:return: a list of dictionaries.
:doc-author: Trelent
"""
response: Dict[str, Any] = self.cm.request('get', '/distributions')
for elem in response.get('list', []):
item = ContentManager.DistributionServer(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.DistributionServer]:
"""
The get function returns a DistributionServer object if the search parameter is an integer,
or a list of DistributionServer objects if the search parameter is a string. If no match is found,
None will be returned.
:param self: Used to access the attributes and methods of the class.
:param search:Union[int: Used to determine whether the search is an integer or a string.
:param str: Used to define the name of the parameter.
:param None]: Used to indicate that the function should return None if no matching object is found.
:return: a distribution server object.
:doc-author: Trelent
"""
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'DistributionServer with {search} not found')
return None
def __len__(self) -> int:
"""
The __len__ function returns the number of items in the collection.
:param self: Used to distinguish the instance of the class from other instances.
:return: the number of items in the list.
:doc-author: Trelent
"""
return len(self.__data)
def __iter__(self):
"""
The __iter__ function is what makes an object iterable.
This function is called when you use a for loop, or when you call iter(object) to get an iterator from that object.
The __iter__ function should return a new iterator object that can iterate over all the objects in the container.
:param self: Used to access the instance variables of the class.
:return: an iterator object.
:doc-author: Trelent
"""
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
"""
The __getitem__ function allows you to use the object as if it were a list.
For example, if you have an instance of a class called `my_object`, and that
instance has an attribute called `my_list`, then the following two lines are
equivalent:
my_object[0]
my_list[0]
:param self: Used to access the instance variables of the class.
:param i:Union[slice: Used to specify the type of i.
:param int]: Used to index into the data array.
:return: a new object, so it must be a subclass of the.
:doc-author: Trelent
"""
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
"""
The __delitem__ function removes an item from the list.
:param self: Used to reference the instance of the class.
:param i:int: Used to specify the index of the item to be deleted.
:return: None.
:doc-author: Trelent
"""
del self.__data[i]
def __setitem__(self, i: int, value):
"""
The __setitem__ function is used to set a value in the list.
It takes two arguments, the first is an index of a value to change and the second is what you want to change it to.
:param self: Used to refer to the instance of the class.
:param i:int: Used to indicate the index of the item to be set.
:param value: Used to set the value of the item at index i.
:return: None.
:doc-author: Trelent
"""
self.__data[i] = value
def insert(self, i: int, value) -> None:
"""
The insert function inserts a value into the list at the given index.
The first argument is the index of where to insert, and second argument is
the value to be inserted. The function returns nothing.
:param self: Used to reference the object itself.
:param i:int: Used to specify the index of the value to be inserted.
:param value: Used to insert the value at index i.
:return: the value of the node that is inserted.
:doc-author: Trelent
"""
self.__data.insert(i, value)
def append(self, value: ContentManager.DistributionServer) -> None:
"""
The append function adds a value to the end of the list.
:param self: Used to reference the object itself.
:param value:ContentManager.DistributionServer: Used to .
:return: the list.
:doc-author: Trelent
"""
self.__data.append(value)
""" EXMODULE """
class ExModule:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.description: Optional[str] = json.get('description')
self.name: Optional[str] = json.get('name')
self.total: Optional[int] = json.get('total')
self.used: Optional[int] = json.get('used')
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ExModuleList(MutableSequence[ExModule]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.ExModule]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/players/modules')
for elem in response.get('list', []):
item = ContentManager.ExModule(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.ExModule]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning('Int not possible for exModule')
return None
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning('Int not possible for exModule')
return None
else:
if elem.name == search:
return elem
logging.warning(f'ExModule with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.ExModule) -> None:
self.__data.append(value)
""" LICENSE """
class License:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
# self.featureLicenses TODO
# self.playerLicenses TODO
self.advantageCoverageUntil: Optional[str] = json.get('advantageCoverageUntil')
self.basicDesignerSeats: Optional[int] = json.get('basicDesignerSeats')
self.campaignSeats: Optional[int] = json.get('campaignSeats')
self.campaignTargets: Optional[int] = json.get('campaignTargets')
self.countSubNetworks: Optional[int] = json.get('countSubNetworks')
self.dongleId: Optional[str] = json.get("dongleId")
self.exModules: List[ContentManager.ExModule] = [elem for elem in [self.cm.ex_modules.get(get_name(elem)) for elem in json.get('exModules', [])] if not elem is None]
self.hasAdManager: Optional[bool] = json.get('hasAdManager')
self.isBetaDongle: Optional[bool] = json.get("isBetaDongle")
self.isSoftDongle: Optional[bool] = json.get('isSoftDongle')
self.isTrial: Optional[bool] = json.get('isTrial')
self.isUsageUnlimited: Optional[bool] = json.get('isUsageUnlimited')
self.name : Optional[str] = json.get('name')
self.playerCals: Optional[int] = json.get('playerCals')
self.playerCalsUnlimited: Optional[bool] = json.get('playerCalsUnlimited')
self.playerClientAccessLicenses: Optional[str] = json.get('playerClientAccessLicenses')
self.premiumDesignerSeats: Optional[int] = json.get('premiumDesignerSeats')
self.productId: Optional[str] = json.get("productId")
self.professionalDesignerSeats: Optional[int] = json.get('professionalDesignerSeats')
self.scalaMaintenanceExpired: Optional[bool] = json.get('scalaMaintenanceExpired')
self.scalaOutOfMaintenance: Optional[bool] = json.get('scalaOutOfMaintenance')
self.softDongleLicenseTo: Optional[str] = json.get('softDongleLicenseTo')
self.standardDesignerSeats: Optional[int] = json.get('standardDesignerSeats')
self.trailDaysLeft: Optional[int] = json.get('trialDaysLeft')
self.usageUntil: Optional[str] = json.get('usageUntil')
self.usedCount: Optional[int] = json.get('usedCount')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class LicenseList(MutableSequence[License]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.License]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
if self.cm.network is None:
raise Exception('Need current network')
response: Dict[str, Any] = self.cm.request('get', f'/license/networks/{self.cm.network.id}')
if response.get('list') is None:
item = ContentManager.License(self.cm, response)
self.__data.append(item)
else:
for elem in response.get('list', []):
item = ContentManager.License(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.License]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning("Int search not possible for license")
return None
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning("Int search not possible for license")
return None
else:
if elem.name == search:
return elem
logging.warning(f'License with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.License) -> None:
self.__data.append(value)
""" MEDIA """
class Media:
def __init__(self, cm: ContentManager, data: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(data)
def unpack_json(self, json: Dict[str, Any]):
created_user_id = get_id(json.get('createdBy'))
modified_user_id = get_id(json.get('modifiedBy'))
template_id = get_id(json.get('template'))
uploaded_user_id = get_id(json.get('uploadedBy'))
self.approval: ContentManager.Media.Approval = ContentManager.Media.Approval(self, json.get('approval'))
self.approvalDetail: ContentManager.Media.ApprovalDetail = ContentManager.Media.ApprovalDetail(self, json.get('approvalDetail'))
self.approvalStatus: Optional[str] = json.get('approvalStatus')
self.archived: Optional[bool] = json.get('archived')
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.backgroundColor: Optional[str] = json.get('backgroundColor')
self.broadcastPriority: Optional[str] = json.get('broadcastPriority')
self.campaignMedia: Optional[bool] = json.get('campaignMedia')
self.categories: List[ContentManager.Category] = get_list(json.get('categories'), self.cm.categories)
self.createdBy: Optional[ContentManager.User] = self.cm.users.get(created_user_id)
self.createdDate: Optional[str] = json.get('createdDate')
self.description: Optional[str] = json.get('description')
self.downloadPath: Optional[str] = json.get('downloadPath')
self.duration: Optional[int] = json.get('duration')
self.endValidDate: Optional[str] = json.get('endValidDate')
self.fields: List[ContentManager.Media.Field] = [ContentManager.Media.Field(self, elem) for elem in json.get('fields', [])]
self.generatingThumbnail: Optional[bool] = json.get('generatingThumbnail')
self.hasSnapshot: Optional[bool] = json.get('hasSnapshot')
self.hasUnapprovedElements: Optional[bool] = json.get('hasUnapprovedElements')
self.height: Optional[int] = json.get('height')
self.id: Optional[int] = json.get('id')
self.input: Optional[str] = json.get('input')
self.lastModified: Optional[str] = json.get('lastModified')
self.length: Optional[int] = json.get('length')
self.mediaItemFiles: List[ContentManager.Media.ItemFile] = [ContentManager.Media.ItemFile(self, elem) for elem in json.get('mediaItemFiles', [])]
self.mediaType: Optional[str] = json.get('mediaType')
self.messagesCount: int = json.get('messagesCount', 0)
self.modifiedBy: Optional[ContentManager.User] = self.cm.users.get(modified_user_id)
self.name: Optional[str] = json.get('name')
self.neverArchive: Optional[bool] = json.get('neverArchive')
self.originalCreatedDate: Optional[str] = json.get("originalCreatedDate")
self.pages: Optional[int] = json.get('pages')
self.path: Optional[str] = json.get('path')
self.playFullscreen: Optional[bool] = json.get('playFullscreen')
self.playlistsCount: int = json.get('playlistsCount', 0)
self.prettifyDuration: Optional[str] = json.get('prettifyDuration')
self.prettifyLength: Optional[str] = json.get('prettifyLength')
self.prettifyType: Optional[str] = json.get('prettifyType')
self.readOnly: Optional[bool] = json.get('readOnly')
self.revision: Optional[int] = json.get('revision')
self.saveAndApprove: Optional[bool] = json.get('saveAndApprove')
self.snapshotInQueue: Optional[bool] = json.get('snapshotInQueue')
self.startValidDate: Optional[str] = json.get('startValidDate')
self.status: Optional[str] = json.get('status')
self.template: Optional[ContentManager.Template] = self.cm.templates.get(template_id)
self.templatesCount: int = json.get('templatesCount', 0)
self.thumbnailDownloadPaths: ContentManager.Media.ThumbnailDownloadPaths = ContentManager.Media.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
self.uploadType: Optional[str] = json.get('uploadType')
self.uploadedBy: Optional[ContentManager.User] = self.cm.users.get(uploaded_user_id)
self.uri: Optional[str] = json.get('uri')
self.validDateStatus: Optional[str] = json.get('validDateStatus')
self.volume: Optional[int] = json.get('volume')
self.webDavPath: Optional[str] = json.get('webDavPath')
self.width: Optional[int] = json.get('width')
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
def usage_info(self):
used = False
expired = False
if self.validDateStatus == "EXPIRED":
expired = True
if self.messagesCount > 0 or self.templatesCount > 0:
used = True
return used, expired
class ThumbnailDownloadPaths:
def __init__(self, media: ContentManager.Media, json: Optional[Dict[str, Any]]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ApprovalDetail:
def __init__(self, media: ContentManager.Media, json: Optional[Dict[str, Any]]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.approvalStatus: Optional[str] = json.get('approvalStatus')
user_id = get_id(json.get('user'))
self.user: Optional[ContentManager.User] = self.media.cm.users.get(user_id)
to_approve_id = get_id(json.get('toApprove'))
self.toApprove: Optional[ContentManager.User] = self.media.cm.users.get(to_approve_id)
by_approve_id = get_id(json.get('approvedBy'))
self.approvedBy: Optional[ContentManager.User] = self.media.cm.users.get(by_approve_id)
self.messageText: Optional[str] = json.get('messageText')
self.lastModified: Optional[str] = json.get('lastModified')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ItemFile:
def __init__(self, media: ContentManager.Media, json: Dict[str, Any]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.filename: Optional[str] = json.get('filename')
self.size: Optional[int] = json.get('size')
self.prettifySize: Optional[str] = json.get('prettifySize')
self.uploadDate: Optional[str] = json.get('uploadDate')
self.version: Optional[int] = json.get('version')
self.downloadPath: Optional[str] = json.get('downloadPath')
self.originalFilename: Optional[str] = json.get('originalFilename')
self.status: Optional[str] = json.get('status')
self.uploadedBy: Optional[str] = json.get('uploadedBy')
self.md5: Optional[str] = json.get('md5')
self.thumbnailDownloadPaths: Optional[ContentManager.Media.ItemFile.ThumbnailDownloadPaths] = ContentManager.Media.ItemFile.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, itemfile: ContentManager.Media.ItemFile, json: Optional[Dict[str, Any]]) -> None:
self.itemfile = itemfile
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['itemfile']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Approval:
def __init__(self, media: ContentManager.Media, json: Optional[Dict[str, Any]]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.action: Optional[str] = json.get('action')
self.userId: Optional[int] = json.get('integer')
self.messageText: Optional[str] = json.get('messageText')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Field:
def __init__(self, media: ContentManager.Media, json: Optional[Dict[str, Any]]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.displayName: Optional[str] = json.get('displayName')
self.value: Optional[str] = json.get('value')
self.templateId: Optional[int] = json.get('templateId')
self.required: Optional[bool] = json.get('required')
self.type: Optional[str] = json.get('type')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class MediaList(MutableSequence[Media]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Media]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/media')
for elem in response.get('list', []):
item = ContentManager.Media(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Media]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Media with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Media) -> None:
self.__data.append(value)
def usage_info(self):
logging.info("Usage of the media can only be determined if the media is used within a message or template and not expired. When you want to determine usage of all meida used within CM then use the channel usage info")
if len(self.__data) == 0:
self.__get_data()
expired_media: List[int] = []
used_media: List[int] = []
for media in self.__data:
if media.id is None:
logging.error("Expected channel to have id")
continue
used, expired = media.usage_info()
if used and not expired:
used_media.append(media.id)
if expired:
expired_media.append(media.id)
expired_media = list(set(expired_media))
used_media = list(set(used_media))
return expired_media, used_media
""" NETWORK """
class Network:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.active: Optional[bool] = json.get('active')
self.approvalMedia: Optional[bool] = json.get('approvalMedia')
self.approvalMessage: Optional[bool] = json.get('approvalMessage')
self.autoThumbnailGeneration: Optional[bool] = json.get('autoThumbnailGeneration')
self.automaticPlaylistDurationCalculation: Optional[bool] = json.get('automaticPlaylistDurationCalculation')
self.firstDay: Optional[str] = json.get('firstDay')
self.id: Optional[int] = json.get('id')
self.licenseState: Optional[str] = json.get('licenseState')
self.maxDatabaseAge: Optional[int] = json.get('maxDatabaseAge')
self.maxDownloadThreads: Optional[int] = json.get('maxDownloadThreads')
self.name: Optional[str] = json.get('name')
self.newsFeed: Optional[bool] = json.get('newsFeed')
self.newsFeedUrl: Optional[str] = json.get('newsFeedUrl')
self.passwordCheckCharTypes: Optional[bool] = json.get('passwordCheckCharTypes')
self.passwordMinimumLength: Optional[int] = json.get('passwordMinimumLength')
self.passwordUseLowercase: Optional[bool] = json.get('passwordUseLowercase')
self.passwordUseNonAlphanumeric: Optional[bool] = json.get('passwordUseNonAlphanumeric')
self.passwordUseNumbers: Optional[bool] = json.get('passwordUseNumbers')
self.passwordUseUppercase: Optional[bool] = json.get('passwordUseUppercase')
self.playbackAuditParser: Optional[bool] = json.get('playbackAuditParser')
self.purgeDaysPlanGenHistory: Optional[int] = json.get('purgeDaysPlanGenHistory')
self.senderEmailAddress: Optional[str] = json.get('snederEmailAddress')
self.sessionTimeout: Optional[int] = json.get('sessionTimeout')
self.showMessageFieldsInMultiplePages: Optional[bool] = json.get('showMessageFieldsInMultiplePages')
self.smtpAuthentication: Optional[bool] = json.get('smtpAuthentication')
self.smtpEnabled: Optional[bool] = json.get('smtpEnabled')
self.smtpPort: Optional[int] = json.get('smtpPort')
self.smtpServerAddress: Optional[str] = json.get('smtpServerAddress')
self.smtpSsl: Optional[bool] = json.get('smtpSsl')
self.smtpUsername: Optional[str] = json.get('smtpUsername')
self.userPasswordExpiresIn: Optional[int] = json.get('userPasswordExpiresIn')
self.userPasswordExpiresInMinutes: Optional[bool] = json.get('userPasswordExpiresInMinutes')
self.viewReport: Optional[bool] = json.get('viewReport')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class NetworkList(MutableSequence[Network]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Network]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/networks')
for elem in response.get('list', []):
item = ContentManager.Network(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Network]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Network with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Network) -> None:
self.__data.append(value)
""" PLAYER """
class PlayerGroup:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.description: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.numberOfPlayers: Optional[int] = json.get('numberOfPlayers')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == 'player_update' and use:
data = {k:v for k,v in data.items() if k in ['id']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlayerGroupList(MutableSequence[PlayerGroup]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.PlayerGroup]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/playergroup')
for elem in response.get('list', []):
item = ContentManager.PlayerGroup(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.PlayerGroup]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Player with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.PlayerGroup) -> None:
old_ids = [elem.id for elem in self.__data]
if not value.id in old_ids:
self.__data.append(value)
class PlayerHealth:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.alerted: Optional[bool] = json.get("alerted")
self.cleared: Optional[bool] = json.get("cleared")
self.clearedDate: Optional[str] = json.get("clearedDate")
self.descriptionDebug: Optional[List[str]] = json.get("descriptionDebug")
self.descriptionDetails: Optional[List[str]] = json.get("descriptionDetails")
self.descriptionTech: Optional[List[str]] = json.get('descriptionTech')
self.descriptionUser: Optional[List[str]] = json.get("descriptionUser")
self.errorNumber: Optional[str] = json.get("errorNumber")
self.first: Optional[str] = json.get("first")
self.id: Optional[int] = json.get("id")
self.last: Optional[str] = json.get("last")
self.message: Optional[str] = json.get("message")
self.playerCount: int = json.get("playerCount", 0)
self.problemMessage: Optional[str] = json.get("problemMessage")
self.problemNumber: Optional[int] = json.get("problemNumber")
self.reported: int = json.get("reported", 0)
self.reportedPlayers: List[ContentManager.PlayerHealth.ReportedPlayer] = [ContentManager.PlayerHealth.ReportedPlayer(self.cm, elem) for elem in json.get("reportedPlayers", [])]
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ReportedPlayer:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get("id")
self.first: Optional[str] = json.get("first")
self.last: Optional[str] = json.get("last")
self.playerLogFile: Optional[str] = json.get("playerLogFile")
self.reported: int = json.get("reported", 0)
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlayerHealthList(MutableSequence[PlayerHealth]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.PlayerHealth]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/playerhealth')
for elem in response.get('list', []):
item = ContentManager.PlayerHealth(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.PlayerHealth]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.problemMessage == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.problemMessage == search:
return elem
logging.warning(f'PlayerHealth with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.PlayerHealth) -> None:
self.__data.append(value)
class PlayerMetadata:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.datatype: Optional[str] = json.get('datatype')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.order: Optional[int] = json.get('order')
self.predefinedValues: List[ContentManager.PlayerMetadata.PredefinedValue] = [ContentManager.PlayerMetadata.PredefinedValue(self, elem) for elem in json.get('predefinedValues', [])]
self.valueType: Optional[str] = json.get('valueType')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
@staticmethod
def create(cm: ContentManager, name: str, datatype: str, valuetype: str):
name = name.replace('Player.', '')
data = {
'name': name,
'datatype': datatype,
'valueType': valuetype
}
cm.request('post', '/playerMetadata', data=json.dumps(data))
class PredefinedValue:
def __init__(self, metadata: ContentManager.PlayerMetadata, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.metadata = metadata
self.unpack_json(json)
def unpack_json(self, json:Dict[str,Any]) -> None:
self.id: Optional[int] = json.get('id')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.value: Optional[str] = json.get('value')
self.variableId: Optional[int] = json.get('variableId')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['metadata']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlayerMetadataList(MutableSequence[PlayerMetadata]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.PlayerMetadata]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/playerMetadata')
for elem in response.get('list', []):
item = ContentManager.PlayerMetadata(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.PlayerMetadata]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'PlayerMetadata with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.PlayerMetadata) -> None:
self.__data.append(value)
class Player:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
distribution_server_id = get_id(json.get('distributionServer'))
owner_workgroup_id = get_id(json.get('ownerWorkgroup'))
# site_id = get_id(json.get('site'))
self.active: Optional[str] = json.get('active')
self.bandwidthThrottlingWindows: List[ContentManager.Player.BandwidthThrottlingWindow] = [ContentManager.Player.BandwidthThrottlingWindow(self, elem) for elem in json.get('bandwidthThrottlingWindows', [])]
self.customId: Optional[str] = json.get('customId')
self.description: Optional[str] = json.get('description')
self.distributionServer: Optional[ContentManager.DistributionServer] = self.cm.distributionservers.get(distribution_server_id)
self.downloadThreads: Optional[int] = json.get('downloadThreads')
self.enabled: Optional[bool] = json.get('enabled')
self.exModules: List[ContentManager.ExModule] = get_list(json.get('exModules'), self.cm.ex_modules)
self.featureLicenseType: ContentManager.Player.FeatureLicense = ContentManager.Player.FeatureLicense(self, json.get('featureLicenseType'))
self.id: Optional[int] = json.get('id')
self.intervalSnapshotEnabled: Optional[bool] = json.get('intervalSnapshotEnabled')
self.lastModified: Optional[str] = json.get('lastModified')
self.limitDefaultBandwidth: Optional[int] = json.get('limitDefaultBandwidth')
self.logLevel: Optional[str] = json.get('logLevel')
self.mac: Optional[str] = json.get('mac')
self.metadataValue: List[ContentManager.Player.MetadataValue] = [ContentManager.Player.MetadataValue(self, elem) for elem in json.get('metadataValue', [])]
self.name: Optional[str] = json.get('name')
self.numberOfDisplays: Optional[int] = json.get('numberOfDisplays')
self.ownerWorkgroup: Optional[ContentManager.Workgroup] = self.cm.workgroups.get(owner_workgroup_id)
self.pairingKey: Optional[str] = json.get('pairingKey')
self.planDeliveryMethod: Optional[str] = json.get('planDeliveryMethod')
self.planDeliveryPassword: Optional[str] = json.get('planDeliveryPassword')
self.planDeliveryUsername: Optional[str] = json.get('planDeliveryUsername')
self.playerDisplays: List[ContentManager.Player.Display] = [ContentManager.Player.Display(self, elem) for elem in json.get('playerDisplays', [])]
self.playerId: Optional[str] = json.get('playerId')
self.playerUrlOrHostName: Optional[str] = json.get('playerUrlOrHostName')
self.playergroups: List[ContentManager.PlayerGroup] = get_list(json.get('playergroups'), self.cm.playergroups)
self.pollingInterval: Optional[int] = json.get('pollingInterval')
self.pollingUnit: Optional[str] = json.get('pollingUnit')
self.previewPlayer: Optional[bool] = json.get('previewPlayer') #DEPRECATED
self.readOnly: Optional[bool] = json.get('readOnly')
self.requestLogs: Optional[bool] = json.get('requestLogs')
self.sharedWorkgroups: List[ContentManager.Workgroup] = get_list(json.get('sharedWorkgroups'), self.cm.workgroups)
# self.site: Optional[ContentManager.Site] = self.cm.sites.get(site_id)
self.timezoneOffset: Optional[str] = json.get('timezoneOffset')
self.type: Optional[str] = json.get('type')
self.unusedFilesCache: Optional[int] = json.get('unusedFilesCache')
self.usedPairingKey: Optional[str] = json.get('usedPairingKey')
self.uuid: Optional[str] = json.get('uuid')
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def unpack_json_state(self, json: Dict[str, Any]):
self.host: Optional[str] = json.get('host')
self.ip: Optional[str] = json.get('ip')
self.lastBooted: Optional[str] = json.get('lastBooted')
self.lastBootedTimestamp: Optional[str] = json.get('lastBootedTimestamp')
self.lastReported: Optional[str] = json.get('lastReported')
self.lastReportedTimestamp: Optional[str] = json.get('lastReportedTimestamp')
self.planState: Optional[str] = json.get('planState')
self.releaseString: Optional[str] = json.get('releaseString')
self.state: Optional[str] = json.get('state')
def generate_uuid(self):
params = {
'ids': self.id
}
response = self.cm.request('post', '/storage', data=json.dumps(params))
return response.get('value')
def update_metadata(self, name: str, value: Any):
if not name.startswith('Player.'):
name = f"Player.{name}"
metadata = self.cm.player_metadatas.get(name)
if metadata is None:
raise Exception(f'No metadata found with name {name}')
if self.metadataValue is None:
self.metadataValue = [ContentManager.Player.MetadataValue(self, {'value': value, 'playerMetadata': metadata.json()})]
else:
exists = False
for i, v in enumerate(self.metadataValue):
if v.playerMetadata is None:
continue
if v.playerMetadata.name == name:
exists = True
if not value is None:
self.metadataValue[i].value = value
if not exists:
self.metadataValue.append(ContentManager.Player.MetadataValue(self, {'value': value, 'playerMetadata': metadata.json()}))
def save(self):
self.cm.request('put', f'/players/{self.id}', data=json.dumps(self.json(update=True)), debug_key='update_player')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == "update" and use:
data = {k:v for k,v in data.items() if k in ['active', 'availableTargets', 'distributionServer', 'downloadThreads', 'enabled', 'id', 'lastModified', 'logLevel', 'mac', 'metadataValue', 'name', 'numberOfDisplays', 'overrideStatus', 'planDeliveryMethod', 'playerDisplays', 'pollingInterval', 'pollingUnit', 'previewPlayer', 'readOnly', 'requestLogs', 'timezoneOffset', 'type', 'unusedFilesCache', 'uuid', 'workgroups', 'ownerWorkgroup', 'bandwidthThrottlingWindows', 'limitDefaultBandwidth', 'playergroups', 'description']}
for key in list(kwargs.keys()):
if not "_" in key:
kwargs[f"player_{key}"] = kwargs.pop(key)
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
own_workgroup_id = data.get('ownerWorkgroup', {}).get("id")
if own_workgroup_id is None:
return data
else:
data.pop('ownerWorkgroup', None)
for i, elem in enumerate(data.get('workgroups', [])):
if elem.get('id') == own_workgroup_id:
data['workgroups'][i]['owner'] = True
return data
class Display:
def __init__(self, player: ContentManager.Player, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.player = player
self.unpack_json(json)
def unpack_json(self, json:Dict[str,Any]) -> None:
channel_id = get_id(json.get('channel'))
self.channel: Optional[ContentManager.Channel] = self.player.cm.channels.get(channel_id)
self.description: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.screenCounter: Optional[int] = json.get('screenCounter')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['player']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class BandwidthThrottlingWindow:
def __init__(self, player: ContentManager.Player, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.player = player
self.unpack_json(json)
def unpack_json(self, json:Dict[str,Any]) -> None:
self.day: Optional[List[str]] = json.get('day')
self.endTime: Optional[str] = json.get('endTime')
self.id: Optional[int] = json.get('id')
self.limit: Optional[int] = json.get('limit')
self.order: Optional[int] = json.get('order')
self.startTime: Optional[str] = json.get('startTime')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['player']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class FeatureLicense:
def __init__(self, player: ContentManager.Player, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.player = player
self.unpack_json(json)
def unpack_json(self, json:Dict[str,Any]) -> None:
self.alternateScheduleOptionsSupport: Optional[bool] = json.get('alternateScheduleOptionsSupport')
self.customMonitorConfigs: Optional[bool] = json.get('customMonitorConfigs')
self.deviceManagement: Optional[bool] = json.get('deviceManagement')
self.html5Support: Optional[bool] = json.get('html5Support')
self.imageSupport: Optional[bool] = json.get('imageSupport')
self.maxChannel: Optional[int] = json.get('maxChannel')
self.maxOutputs: Optional[int] = json.get('maxOutputs')
self.maxPixel: Optional[int] = json.get('maxPixel')
self.maxZone: Optional[int] = json.get('maxZone')
self.playerPlaybackAuditLogsSupport: Optional[bool] = json.get('playerPlaybackAuditLogsSupport')
self.scalaIntegrationAccess: Optional[bool] = json.get('scalaIntegrationAccess')
self.scalaScriptSupport: Optional[bool] = json.get('scalaScriptSupport')
self.statusMonitoring: Optional[str] = json.get('statusMonitoring')
self.total: Optional[int] = json.get('total')
self.triggersSupport: Optional[bool] = json.get('triggersSupport')
self.type: Optional[str] = json.get('type')
self.used: Optional[int] = json.get('used')
self.videoSupport: Optional[bool] = json.get('videoSupport')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['player']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class MetadataValue:
def __init__(self, player: ContentManager.Player, json: Optional[Dict[str, Any]]) -> None:
self.player = player
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
player_metadata_id = get_id(json.get('playerMetadata'))
self.id: Optional[int] = json.get('id')
self.playerMetadata: Optional[ContentManager.PlayerMetadata] = self.player.cm.player_metadatas.get(player_metadata_id)
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['player']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlayerList(MutableSequence[Player]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Player]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/players')
for elem in response.get('list', []):
item = ContentManager.Player(self.cm, elem)
item.unpack_json_state(self.cm.request('get', f'/players/{item.id}/state', debug_key="player_state"))
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Player]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Player with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Player) -> None:
self.__data.append(value)
""" PLAYLIST """
class Playlist:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.media: List[ContentManager.Media] = []
self.subplaylists: List[int] = []
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
created_by_id = get_id(json.get('createdBy'))
modified_by_id = get_id(json.get('modifiedBy'))
self.asSubPlaylistsCount: int = json.get('asSubPlaylistsCount', 0)
self.campaignChannelsCount: Optional[int] = json.get('campaignChannelsCount')
self.campaignMessagesCount: Optional[int] = json.get('campaignMessagesCount')
self.campaignPlaylist: Optional[bool] = json.get('campaignPlaylist')
self.categories: List[ContentManager.Category] = get_list(json.get('categories'), self.cm.categories)
self.channelsCount: int = json.get('channelsCount', 0)
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.createdBy: Optional[ContentManager.User] = self.cm.users.get(created_by_id)
self.createdByName: Optional[str] = json.get('createdByName')
self.createdDate: Optional[str] = json.get('createdDate')
self.description: Optional[str] = json.get('description')
self.durationCalculationCompleted: Optional[bool] = json.get('durationCalculationCompleted')
self.enableSmartPlaylist: Optional[bool] = json.get('enableSmartPlaylist')
self.extSourceDuration: Optional[int] = json.get('extSourceDuration')
self.healthy: Optional[bool] = json.get('healthy')
self.htmlDuration: Optional[int] = json.get('htmlDuration')
self.id: Optional[int] = json.get('id')
self.imageDuration: Optional[int] = json.get('imageDuration')
self.itemCount: int = json.get('itemCount', 0)
self.lastModified: Optional[str] = json.get('lastModified')
self.maxDuration: Optional[int] = json.get('maxDuration')
self.messagesCount: int = json.get('messagesCount', 0)
self.minDuration: Optional[int] = json.get('minDuration')
self.modifiedBy: Optional[ContentManager.User] = self.cm.users.get(modified_by_id)
self.modifiedByName: Optional[str] = json.get('modifiedByName')
self.name: Optional[str] = json.get('name')
self.pickPolicy: Optional[str] = json.get('pickPolicy')
self.playlistItems: List[ContentManager.Playlist.PlaylistItem] = [ContentManager.Playlist.PlaylistItem(self, elem) for elem in json.get('playlistItems', [])]
self.playlistType: Optional[str] = json.get('playlistType')
self.prettifyDuration: Optional[str] = json.get('prettifyDuration')
self.priority: Optional[str] = json.get('priority')
self.problemsCount: Optional[int] = json.get('problemsCount')
self.readOnly: Optional[bool] = json.get('readOnly')
self.shuffleNoRepeatType: Optional[str] = json.get('shuffleNoRepeatType')
self.shuffleNoRepeatWithin: Optional[int] = json.get('shuffleNoRepeatWithin')
self.thumbnailDownloadPath: Optional[str] = json.get('thumbnailDownloadPath')
self.thumbnailDownloadPaths: ContentManager.Playlist.ThumbnailDownloadPaths = ContentManager.Playlist.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
self.transitionDuration: Optional[int] = json.get('transitionDuration')
self.warningsCount: Optional[int] = json.get('warningsCount')
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def process(self, data: Union[int, ContentManager.Playlist], playlist_path: List[int] = []):
if isinstance(data, int):
playlist: Optional[ContentManager.Playlist] = self.cm.playlists.get(data)
id = data
else:
playlist = data
id = data.id
if id is None:
raise Exception("ID cannot be None")
if id in playlist_path:
raise Exception(f"Playlistloop detected {playlist_path}")
playlist_path.append(id)
if not playlist is None:
if playlist.playlistItems is None:
playlist_path.pop()
return
for playlistItem in playlist.playlistItems:
if not playlistItem.media is None:
if not playlistItem.media in self.media:
self.media.append(playlistItem.media)
if not playlistItem.subplaylist is None:
if not playlistItem.subplaylist in self.subplaylists:
self.subplaylists.append(playlistItem.subplaylist)
self.process(playlistItem.subplaylist, playlist_path)
playlist_path.pop()
def get_media(self) -> List[ContentManager.Media]:
if self.id is None:
raise Exception("Object needs to have ID")
self.process(self.id)
return self.media
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
def usage_info(self):
used = False
used_media: List[int] = []
media = self.get_media()
for elem in media:
if not elem.id is None and not elem.id in used_media:
used_media.append(elem.id)
if self.messagesCount > 0:
used = True
return used, self.subplaylists, used_media
class PlaylistItem:
def __init__(self, playlist: ContentManager.Playlist, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.playlist = playlist
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
media_id = get_id(json.get('media'))
sub_playlist_id = get_id(json.get('subplaylist'))
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.auditItem: Optional[bool] = json.get('auditItem')
self.conditions: List[ContentManager.Playlist.PlaylistItem.Condition] = [ContentManager.Playlist.PlaylistItem.Condition(self, elem) for elem in json.get('conditions', [])]
self.disabled: Optional[bool] = json.get('disabled')
self.duration: Optional[int] = json.get('duration')
self.durationHoursSeconds: Optional[str] = json.get('durationHoursSeconds')
self.endValidDate: Optional[str] = json.get('endValidDate')
self.id: Optional[int] = json.get('id')
self.inPoint: Optional[int] = json.get('inPoint')
self.media: Optional[ContentManager.Media] = self.playlist.cm.media.get(media_id)
self.meetAllConditions: Optional[bool] = json.get('meetAllConditions')
self.options: List[ContentManager.Playlist.PlaylistItem.Option] = [ContentManager.Playlist.PlaylistItem.Option(self, elem) for elem in json.get('options', [])]
self.outPoint: Optional[int] = json.get('outPoint')
self.playFullscreen: Optional[bool] = json.get('playFullscreen')
self.playlistItemType: Optional[str] = json.get('playlistItemType')
self.prettifyInPoint: Optional[str] = json.get('prettifyInPoint')
self.prettifyOutPoint: Optional[str] = json.get('prettifyOutPoint')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.startValidDate: Optional[str] = json.get('startValidDate')
self.status: Optional[List[str]] = json.get('status')
self.subPlaylistPickPolicy: Optional[int] = json.get('subPlaylistPickPolicy')
self.subplaylist: Optional[int] = sub_playlist_id
self.timeSchedules: List[ContentManager.Playlist.PlaylistItem.Schedule] = [ContentManager.Playlist.PlaylistItem.Schedule(self, elem) for elem in json.get('timeSchedules', [])]
self.useValidRange: Optional[bool] = json.get('useValidRange')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlist']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Schedule:
def __init__(self, playlistitem: ContentManager.Playlist.PlaylistItem, json: Dict[str, Any]) -> None:
self.playlistitem = playlistitem
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.days: Optional[List[str]] = json.get('days')
self.endTime: Optional[str] = json.get('endTime')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.startTime: Optional[str] = json.get('startTime')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlistitem']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Option:
def __init__(self, playlistitem: ContentManager.Playlist.PlaylistItem, json: Dict[str, Any]) -> None:
self.playlistitem = playlistitem
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.key: Optional[str] = json.get('key')
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlistitem']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Condition:
def __init__(self, playlistitem: ContentManager.Playlist.PlaylistItem, json: Dict[str, Any]) -> None:
self.playlistitem = playlistitem
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.type: Optional[str] = json.get('type')
self.comparator: Optional[str] = json.get('comparator')
self.value: Optional[str] = json.get('value')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.metadata: Optional[int] = get_id(json.get('metadata'))
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlistitem']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, playlist: ContentManager.Playlist, json: Optional[Dict[str, Any]]) -> None:
self.playlist = playlist
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlist']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlaylistList(MutableSequence[Playlist]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Playlist]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/playlists/all')
for elem in response.get('list', []):
item = ContentManager.Playlist(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Playlist]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Playlist with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Playlist) -> None:
self.__data.append(value)
def usage_info(self):
logging.info("Usage of the playlists can only be determined if the playlist is used within a message. When you want to determine usage of all playlists within CM then use the channel usage info")
if len(self.__data) == 0:
self.__get_data()
used_playlists: List[int] = []
used_media: List[int] = []
for playlist in self.__data:
if playlist.id is None:
logging.error("Expected channel to have id")
continue
used, temp_playlist, temp_media = playlist.usage_info()
if used:
used_playlists.append(playlist.id)
used_playlists.extend(temp_playlist)
used_media.extend(temp_media)
used_playlists = list(set(used_playlists))
used_media = list(set(used_media))
return used_playlists, used_media
""" RESOURCE """
class Resource:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.description: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.implicitResources: Optional[List[str]] = json.get('implicitResources')
self.name: Optional[str] = json.get('name')
self.parentId: Optional[int] = json.get('parentId')
self.sortOrder: Optional[int] = json.get('sortOrder')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == 'role_update' and use:
data = {k:v for k,v in data.items() if k in ['id', 'name']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ResourceList(MutableSequence[Resource]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Resource]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/roles/resources')
for elem in response.get('resources', []):
item = ContentManager.Resource(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Resource]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Resource {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Resource) -> None:
self.__data.append(value)
""" ROLE """
class Role:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.availableSeats: Optional[int] = json.get('availableSeats')
self.id: Optional[int] = json.get('id')
self.licenseRequirement: Optional[str] = json.get('licenseRequirement')
self.name: Optional[str] = json.get('name')
self.resources: List[ContentManager.Resource] = get_list(json.get('resources'), self.cm.resources)
self.system: Optional[bool] = json.get('system')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == "update" and use:
data = {k:v for k,v in data.items() if k in ['name', 'system', 'resources']}
for key in list(kwargs.keys()):
if not "_" in key:
kwargs[f"role_{key}"] = kwargs.pop(key)
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
@staticmethod
def create(cm: ContentManager, name: str, resources: List[Tuple[str, int]]):
data = {
'name': name,
'resources': [{'name': elem[0], 'id': elem[1]} for elem in resources],
'system': False
}
response = cm.request('post', '/roles', data=json.dumps(data))
response_id = response.get('id')
if response_id is None:
raise Exception("Something went wrong when creating role")
cm.request('put', f'/roles/{response_id}', data=json.dumps(data))
def save(self):
self.cm.request('put', f'/roles/{self.id}', data=json.dumps(self.json(update=True)), debug_key='update_role')
class RoleList(MutableSequence[Role]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Role]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/roles')
for elem in response.get('list', []):
item = ContentManager.Role(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Role]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Role with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Role) -> None:
self.__data.append(value)
""" TEMPLATE """
class Template:
def __init__(self, cm: ContentManager, data: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(data)
def unpack_json(self, json: Dict[str, Any]):
modified_user_id = get_id(json.get('modifiedBy'))
uploaded_user_id = get_id(json.get('uploadedBy'))
self.approvalDetail: ContentManager.Template.ApprovalDetail = ContentManager.Template.ApprovalDetail(self, json.get('approvalDetail'))
self.approvalStatus: Optional[str] = json.get('approvalStatus')
self.archived: Optional[bool] = json.get('archived')
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.campaignMedia: Optional[bool] = json.get('campaignMedia')
self.categories: List[ContentManager.Category] = get_list(json.get('categories'), self.cm.categories)
self.createdDate: Optional[str] = json.get('createdDate')
self.downloadPath: Optional[str] = json.get('downloadPath')
self.generatingThumbnail: Optional[bool] = json.get('generatingThumbnail')
self.globalTemplateFields: List[ContentManager.Template.Field] = [ContentManager.Template.Field(self, elem) for elem in json.get('globalTemplateFields', [])]
self.height: Optional[int] = json.get('height')
self.id: Optional[int] = json.get('id')
self.lastModified: Optional[str] = json.get('lastModified')
self.length: Optional[int] = json.get('length')
self.mediaId: Optional[int] = json.get('mediaId')
self.mediaItemFiles: List[ContentManager.Template.ItemFile] = [ContentManager.Template.ItemFile(self, elem) for elem in json.get('mediaItemFiles', [])]
self.mediaType: Optional[str] = json.get('mediaType')
self.messagesCount: int = json.get('messagesCount', 0)
self.modifiedBy: Optional[ContentManager.User] = self.cm.users.get(modified_user_id)
self.name: Optional[str] = json.get('name')
self.neverArchive: Optional[bool] = json.get('neverArchive')
self.numberOfFields: int = json.get('numberOfFields', 0)
self.numberOfFiles: int = json.get('numberOfFiles', 0)
self.originalCreatedDate: Optional[str] = json.get('originalCreatedDate')
self.path: Optional[str] = json.get('path')
self.playFullscreen: Optional[bool] = json.get('playFullscreen')
self.playlistsCount: int = json.get('playlistsCount', 0)
self.prettifyDuration: Optional[str] = json.get('prettifyDuration')
self.prettifyLength: Optional[str] = json.get('prettifyLength')
self.prettifyType: Optional[str] = json.get('prettifyType')
self.readOnly: Optional[bool] = json.get('readOnly')
self.revision: Optional[int] = json.get('revision')
self.startValidDate: Optional[str] = json.get('startValidDate')
self.status: Optional[str] = json.get('status')
self.templateFields: List[ContentManager.Template.Field] = [ContentManager.Template.Field(self, elem) for elem in json.get('templateFields', [])]
self.templateVersion: Optional[int] = json.get('templateVersion')
self.templatesCount: int = json.get('templatesCount', 0)
self.thumbnailDownloadPaths: ContentManager.Template.ThumbnailDownloadPaths = ContentManager.Template.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
self.uploadType: Optional[str] = json.get('uploadType')
self.uploadedBy: Optional[ContentManager.User] = self.cm.users.get(uploaded_user_id)
self.validDateStatus: Optional[str] = json.get('validDateStatus')
self.webDavPath: Optional[str] = json.get('webDavPath')
self.width: Optional[int] = json.get('width')
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, template: ContentManager.Template, json: Optional[Dict[str, Any]]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ApprovalDetail:
def __init__(self, template: ContentManager.Template, json: Optional[Dict[str, Any]]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.approvalStatus: Optional[str] = json.get('approvalStatus')
user_id = get_id(json.get('user'))
self.user: Optional[ContentManager.User] = self.template.cm.users.get(user_id)
to_approve_id = get_id(json.get('toApprove'))
self.toApprove: Optional[ContentManager.User] = self.template.cm.users.get(to_approve_id)
by_approve_id = get_id(json.get('approvedBy'))
self.approvedBy: Optional[ContentManager.User] = self.template.cm.users.get(by_approve_id)
self.messageText: Optional[str] = json.get('messageText')
self.lastModified: Optional[str] = json.get('lastModified')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ItemFile:
def __init__(self, template: ContentManager.Template, json: Dict[str, Any]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.filename: Optional[str] = json.get('filename')
self.size: Optional[int] = json.get('size')
self.prettifySize: Optional[str] = json.get('prettifySize')
self.uploadDate: Optional[str] = json.get('uploadDate')
self.version: Optional[int] = json.get('version')
self.downloadPath: Optional[str] = json.get('downloadPath')
self.originalFilename: Optional[str] = json.get('originalFilename')
self.status: Optional[str] = json.get('status')
self.uploadedBy: Optional[str] = json.get('uploadedBy')
self.md5: Optional[str] = json.get('md5')
self.thumbnailDownloadPaths: ContentManager.Template.ItemFile.ThumbnailDownloadPaths = ContentManager.Template.ItemFile.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, itemfile: ContentManager.Template.ItemFile, json: Optional[Dict[str, Any]]) -> None:
self.itemfile = itemfile
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['itemfile']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Field:
def __init__(self, template: ContentManager.Template, json: Optional[Dict[str, Any]]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.displayName: Optional[str] = json.get('displayName')
self.value: Optional[str] = json.get('value')
self.required: Optional[bool] = json.get('required')
self.type: Optional[str] = json.get('type')
self.editable: Optional[bool] = json.get('editable')
self.maxCharacters: Optional[int] = json.get('maxCharacters')
self.maxLines: Optional[int] = json.get('maxLines')
self.useDefault: Optional[bool] = json.get('useDefault')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Page:
def __init__(self, template: ContentManager.Template, json: Optional[Dict[str, Any]]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.editable: Optional[bool] = json.get('editable')
self.thumbnailPageEnabled: Optional[bool] = json.get('thumbnailPageEnabled')
self.order: Optional[int] = json.get('order')
self.thumbnailPageNo: Optional[int] = json.get('thumbnailPageNo')
self.thumbnailDownloadPaths: ContentManager.Template.Page.ThumbnailDownloadPaths = ContentManager.Template.Page.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
self.templateFields: List[ContentManager.Template.Page.Field] = [ContentManager.Template.Page.Field(self, elem) for elem in json.get('templateFields', [])]
self.idents: List[ContentManager.Template.Page.Ident] = [ContentManager.Template.Page.Ident(self, elem) for elem in json.get('idents', [])]
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, page: ContentManager.Template.Page, json: Optional[Dict[str, Any]]) -> None:
self.page = page
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['page']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Field:
def __init__(self, page: ContentManager.Template.Page, json: Optional[Dict[str, Any]]) -> None:
self.page = page
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.displayName: Optional[str] = json.get('displayName')
self.value: Optional[str] = json.get('value')
self.required: Optional[bool] = json.get('required')
self.type: Optional[str] = json.get('type')
self.editable: Optional[bool] = json.get('editable')
self.maxCharacters: Optional[int] = json.get('maxCharacters')
self.maxLines: Optional[int] = json.get('maxLines')
self.useDefault: Optional[bool] = json.get('useDefault')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['page']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Ident:
def __init__(self, page: ContentManager.Template.Page, json: Optional[Dict[str, Any]]) -> None:
self.page = page
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.label: Optional[str] = json.get('label')
self.languageCode: Optional[str] = json.get('languageCode')
self.description: Optional[str] = json.get('description')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['page']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class TemplateList(MutableSequence[Template]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Template]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/templates')
for elem in response.get('list', []):
item = ContentManager.Template(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Template]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.warning(f'Template with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Template) -> None:
self.__data.append(value)
""" USER """
class User:
def __init__(self, cm: ContentManager, data: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(data)
def unpack_json(self, json: Dict[str, Any]):
self.authenticationMethod: Optional[str] = json.get('auhtenticationMethod')
self.canChangePassword: Optional[bool] = json.get('canChangePassword')
self.dateFormat: Optional[str] = json.get('dateFormat')
self.emailaddress: Optional[str] = json.get('emailaddress')
self.enabled: Optional[bool] = json.get('enabled')
self.firstname: Optional[str] = json.get('firstname')
self.forcePasswordChange: Optional[bool] = json.get('forcePasswordChange')
self.id: Optional[int] = json.get('id')
self.isAutoMediaApprover: Optional[bool] = json.get('isAutoMediaApprover')
self.isAutoMessageApprover: Optional[bool] = json.get('isAutoMessageApprover')
self.isSuperAdministrator: Optional[bool] = json.get('isSuperAdministrator')
self.isWebserviceUser: Optional[bool] = json.get('isWebserviceUser')
self.language: Optional[str] = json.get('language')
self.languageCode: Optional[str] = json.get('languageCode')
self.lastLogin: Optional[str] = json.get('lastLogin')
self.lastname: Optional[str] = json.get('lastname')
self.name: Optional[str] = json.get('name')
self.oldPassword: Optional[str] = json.get('oldPassword')
self.password: Optional[str] = json.get('password')
self.passwordLastChanged: Optional[str] = json.get('passwordLastChanged')
self.receiveApprovalEmails: Optional[bool] = json.get('receiveApprovalEmails')
self.receiveEmailAlerts: Optional[bool] = json.get('receiveEmailAlerts')
self.roles: List[ContentManager.Role] = get_list(json.get('roles'), self.cm.roles)
self.theme: Optional[str] = json.get('theme')
self.timeFormat: Optional[str] = json.get('timeFormat')
self.userAccountWorkgroups: List[ContentManager.Workgroup] = get_list(json.get('userAccountWorkgroups'), self.cm.workgroups)
self.username: Optional[str] = json.get('username')
self.workgroup: Optional[int] = get_id(json.get('workgroup')) #DEPRECATED
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
@staticmethod
def create(cm: ContentManager, method: str, email: str, firstname: str, lastname: str, role: ContentManager.Role, username: str):
if cm.network is None:
raise Exception("Need network to create user")
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for _ in range(10))
data = {
"authenticationMethod": method,
"emailaddress": email,
"firstname": firstname,
"lastname": lastname,
"roles": [{
"id": role.id
}],
"username": username,
"receiveEmailAlerts": False,
"enabled": True,
"forcePasswordChange": True,
"password": password,
"confirmPassword": password
}
cm.request("post", f"/users/usernetworks/{cm.network.id}", data=json.dumps(data), debug_key="create_user")
return password
class UserList(MutableSequence[User]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.User]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/users')
for elem in response.get('list', []):
item = ContentManager.User(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.User]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.username == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.username == search:
return elem
logging.warning(f'User with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.User) -> None:
self.__data.append(value)
""" WORKGROUP """
class Workgroup:
def __init__(self, cm: ContentManager, data: Optional[Dict[str, Any]]) -> None:
self.cm = cm
data = data if not data is None else {}
self.unpack_json(data)
def unpack_json(self, json: Dict[str, Any]):
self.children: List[ContentManager.Workgroup] = [ContentManager.Workgroup(self.cm, elem) for elem in json.get('children', [])]
self.description: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.owner: Optional[bool] = json.get('owner')
self.parentId: Optional[int] = json.get('parentId')
self.parentName: Optional[str] = json.get('parentName')
self.userCount: Optional[int] = json.get('userCount')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == 'player_update' and use:
data = {k:v for k,v in data.items() if k in ['id']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class WorkgroupList(MutableSequence[Workgroup]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Workgroup]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/workgroups')
for elem in response.get('list', []):
item = ContentManager.Workgroup(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Workgroup]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
temp = search_children(search, elem.children, "id", "name", "children")
if not temp is None:
return temp
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
temp = search_children(search, elem.children, "id", "name", "children")
if not temp is None:
return temp
logging.warning(f'Workgroup with {search} nof found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Workgroup) -> None:
self.__data.append(value)
|
/scala_wrapper-0.0.6-py3-none-any.whl/scala_wrapper/content_manager/__init__.py
| 0.849144 | 0.297746 |
__init__.py
|
pypi
|
from __future__ import annotations
import json
import logging
import os
import shutil
from datetime import datetime
from typing import Any, Dict, List, MutableSequence, Optional, Union, overload
import ipinfo
import requests
from requests.models import Response
from scala_wrapper.utils import typedef
def get_id(value: Optional[Dict[str, Any]]) -> Optional[int]:
if not value is None:
return value.get('id')
return None
def get_name(value: Optional[Dict[str, Any]]) -> Optional[str]:
if not value is None:
return value.get('name')
return None
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.CategoryList) -> List[ContentManager.Category]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.DistributionServerList) -> List[ContentManager.DistributionServer]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.ExModuleList) -> List[ContentManager.ExModule]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.MediaList) -> List[ContentManager.Media]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.ResourceList) -> List[ContentManager.Resource]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.PlayerGroupList) -> List[ContentManager.PlayerGroup]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.PlayerList) -> List[ContentManager.Player]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.RoleList) -> List[ContentManager.Role]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.UserList) -> List[ContentManager.User]:
...
@overload
def get_list(value: Optional[List[Union[Dict[str, Any], int]]], data: ContentManager.WorkgroupList) -> List[ContentManager.Workgroup]:
...
def get_list(
value: Optional[List[Union[Dict[str, Any], int]]],
data: Union[
ContentManager.CategoryList,
ContentManager.DistributionServerList,
ContentManager.ExModuleList,
ContentManager.MediaList,
ContentManager.PlayerGroupList,
ContentManager.PlayerList,
ContentManager.ResourceList,
ContentManager.RoleList,
ContentManager.UserList,
ContentManager.WorkgroupList,
]
):
temp: List[Any] = []
if not value is None:
for item in value:
if isinstance(item, int):
d = data.get(item)
if d is None:
continue
temp.append(d)
else:
item_id = get_id(item)
if item_id is None:
item_id = get_name(item)
if item_id is None:
continue
d = data.get(item_id)
if d is None:
continue
temp.append(d)
return temp
def search_children(search: Union[int, str], children: List[Any], int_attr: str, str_attr: str, child_attr: str) -> Optional[Any]:
for elem in children:
if isinstance(search, int):
if getattr(elem, int_attr) == search:
return elem
else:
if getattr(elem, str_attr) == search:
return elem
temp = search_children(search, getattr(elem, child_attr), int_attr, str_attr, child_attr)
if not temp is None:
return temp
return None
@overload
def clean_data(data: Dict[Any, Any]) -> Optional[Dict[Any, Any]]:
...
@overload
def clean_data(data: List[Any]) -> Optional[List[Any]]:
...
def clean_data(data: Union[Dict[Any, Any], List[Any]]):
if isinstance(data, dict):
for key, value in data.copy().items():
if value is None:
del data[key]
if isinstance(value, list) or isinstance(value, dict):
c_data = clean_data(value)
if not c_data is None:
data[key] = c_data
else:
del data[key]
if len(data) > 0:
return data
else:
return None
else:
for i, elem in enumerate(data):
if elem is None:
data.remove(elem)
if isinstance(elem, list) or isinstance(elem, dict):
c_data = clean_data(elem)
if not c_data is None:
data[i] = c_data
else:
data.pop(i)
if len(data) > 0:
return data
else:
return None
class ContentManager:
def __init__(self, username: str, password: str, cm_url: str, client: Optional[str] = None, short: Optional[str] = None, client_id: Optional[int] = None, ip_handler: Optional[str] = None) -> None:
self.client = client
self.short = short
self.client_id = client_id
self.cm_url = cm_url
self.username = username
self.password = password
self.air_id = None
self.version = None
self.ip_handler = ipinfo.getHandler(ip_handler) if not ip_handler is None else None
self.last_load = datetime.now()
self.approvalstatuses = self.ApprovalStatusList(self, [])
self.categories = self.CategoryList(self, [])
self.channels = self.ChannelList(self, [])
self.distributionservers = self.DistributionServerList(self, [])
self.ex_modules = self.ExModuleList(self, [])
self.licenses = self.LicenseList(self, [])
self.media = self.MediaList(self, [])
self.networks = self.NetworkList(self, [])
self.playergroups = self.PlayerGroupList(self, [])
self.playerhealths = self.PlayerHealthList(self, [])
self.player_metadatas = self.PlayerMetadataList(self, [])
self.players = self.PlayerList(self, [])
self.playlists = self.PlaylistList(self, [])
self.resources = self.ResourceList(self, [])
self.roles = self.RoleList(self, [])
self.templates = self.TemplateList(self, [])
self.users = self.UserList(self, [])
self.workgroups = self.WorkgroupList(self, [])
self.login()
self.get_version()
""" BASIC FUNCTIONALITY """
def request(self, method: str, path: str, params: Optional[Dict[Any, Any]] = None, headers: Optional[Dict[Any, Any]] = None, data: str = '', debug_key: Optional[str] = None) -> Dict[Any, Any]:
params = params if not params is None else {}
headers = headers if not headers is None else {}
headers.update(self.header)
logging.info(f"{method} - {path}")
if method.lower() == "delete":
self.__delete(path, headers)
return {"success": True}
response_end = None
offset = 0
new = True
while True:
try:
while new:
if method.lower() == "get":
params['offset'] = offset
params['limit'] = params.get("limit") if not params.get("limit") is None else 1000
response: Response = requests.request(method, f'{self.cm_url}{path}', params=params, headers=headers, data=data)
if not response.ok:
logging.warning(f"Something went wrong when requesting {path} via {method}")
if response.status_code == 401:
logging.warning('login token expired requesting new one and trying again')
self.login()
headers.update(self.header)
logging.error(f"ERROR on {path} - code {response.status_code}")
logging.info(response.text)
continue
response_json: Union[List[Any], Dict[str, Any]] = response.json()
if isinstance(response_json, list):
response_json = {'list': response_json, 'count': 0}
if response_json.get('count', 0) < offset + params.get('limit', float('inf')):
new = False
else:
offset += params.get('limit', 0)
if response_end is None:
response_end = response_json
else:
response_end['list'].extend(response_json['list'])
if response_end is None:
raise Exception('No response')
debug_path = "cm_responses.json"
debug_path_old = "cm_responses_old.json"
if os.path.isfile(debug_path):
with open(debug_path, "r") as f:
try:
data_ = json.load(f)
shutil.copyfile(debug_path, debug_path_old)
except ValueError:
data_ = {}
pass
else:
data_ = {}
if not debug_key is None:
key = debug_key
else:
key = f'{method} - {path}'
if not key in data_.keys():
data_[key] = {}
with open(debug_path, "w") as f:
if isinstance(response_end, list):
data_[key] = typedef.process_type(response_end, data_[key], False)
json.dump(data_, f)
else:
data_[key] = typedef.type_def_dict(response_end, data_[key], False)
json.dump(data_, f)
return response_end
except requests.exceptions.ConnectionError as e:
logging.error(e)
continue
except requests.exceptions.ReadTimeout as e:
logging.error(e)
continue
def __delete(self, path: str, headers: Optional[Dict[Any, Any]] = None):
headers = headers if not headers is None else {}
headers.update(self.header)
while True:
try:
requests.delete(f'{self.cm_url}{path}', headers=headers)
return
except requests.exceptions.ConnectionError as e:
logging.error(e)
continue
except requests.exceptions.ReadTimeout as e:
logging.error(e)
continue
def login(self):
payload: Dict[str, Union[str, bool]] = {
'username': self.username,
'password': self.password,
'rememberMe': True
}
payload_str = json.dumps(payload)
headers: Dict[str, str] = {
'Content-type': 'application/json'
}
response = None
while True:
try:
response = requests.post(f'{self.cm_url}/auth/login', data=payload_str, headers=headers)
except requests.exceptions.ConnectionError as e:
logging.error(e)
continue
break
if response is None:
raise Exception('Login Failed')
else:
response_json: Dict[str, Any] = response.json()
self.api_token = response_json.get('apiToken')
if self.api_token is None:
raise Exception('No ApiToken found')
else:
self.header = {
'ApiToken': self.api_token,
'Content-Type': 'application/json'
}
self.user = self.users.append(ContentManager.User(self, response_json.get('user', {})))
self.network = self.networks.get(get_id(response_json.get('network')))
if self.network is None:
raise Exception('No Network id found')
self.token = response_json.get('token')
self.api_license_token = response_json.get('apiLicenseToken')
server_time = response_json.get('serverTime')
if not server_time is None:
self.time_dif_gmt = datetime.strptime(server_time.get('datetime', ''), '%Y-%m-%d %H:%M:%S') - datetime.strptime(server_time.get('gtmDatetime'), '%Y-%m-%d %H:%M:%S GMT')
""" SETTERS OBJECT """
def set_airtable_id(self, air_id: Optional[str]):
self.air_id = air_id
""" GETTERS ONLINE """
def get_version(self):
response = self.request('get', '/misc/productinfo')
self.version: Optional[str] = response.get('version')
""" APPROVALSTATUS """
class ApprovalStatus:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.descritpion: Optional[str] = json.get('description')
self.status: Optional[str] = json.get('status')
self.prettifyStatus: Optional[str] = json.get('prettifyStatus')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ApprovalStatusList(MutableSequence[ApprovalStatus]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.ApprovalStatus]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/approvalStatus')
for elem in response.get('list', []):
item = ContentManager.ApprovalStatus(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.ApprovalStatus]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning("Int is not possible to search")
return None
else:
if elem.status == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning("Int is not possible to search")
return None
else:
if elem.status == search:
return elem
logging.info(f'ApprovalStatus with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.ApprovalStatus) -> None:
self.__data.append(value)
""" CATEGORY """
class Category:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.children: List[ContentManager.Category] = [ContentManager.Category(self.cm, elem) for elem in json.get('children', [])]
self.description: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.parentId: Optional[int] = json.get('parentId')
def unpack_usage_json(self, json: Dict[str, Any]):
self.messagesCount: Optional[int] = json.get('messagesCount')
self.mediaCount: Optional[int] = json.get('mediaCount')
self.templatesCount: Optional[int] = json.get("templatesCount")
self.playlistsCount: Optional[int] = json.get('playlistsCount')
self.remotePublishLocationsCount: Optional[int] = json.get('remotePublishLocationsCount')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == "link" and use:
data.pop('parentId', None)
data.pop('description', None)
data['children'] = [elem.json(link=True) for elem in self.children]
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
@staticmethod
def create(cm: ContentManager, name: str, parentId: Optional[int] = None, children: Optional[Union[List[ContentManager.Category], List[int]]] = None, description: Optional[str] = None):
children = children if not children is None else []
parentId = parentId if not parentId is None else 0
if len(children) > 0:
if isinstance(children[0], int):
children_list = [cm.categories.get(elem) for elem in children if isinstance(elem, int)]
children_list = [elem for elem in children_list if not elem is None]
else:
children_list = children
else:
children_list = children
if not all(isinstance(elem, ContentManager.Category) for elem in children_list):
raise Exception("Expected all children to be of type category")
data = {
"name": name,
"parentId": parentId,
"description": description,
"children": [elem.json(link=True) for elem in children_list if isinstance(elem, ContentManager.Category)]
}
response = cm.request('post', '/categories', data=json.dumps(data))
cm.categories.append(ContentManager.Category(cm, response))
def delete(self):
self.cm.__delete(f'/categories/{self.id}', {})
def usage(self):
response = self.cm.request('get', '/categories/usage', params={'ids': self.id})
self.unpack_usage_json(response)
return response
class CategoryList(MutableSequence[Category]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Category]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/categories')
for elem in response.get('list', []):
item = ContentManager.Category(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Category]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Category with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Category) -> None:
self.__data.append(value)
""" CHANNEL """
class Channel:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
non_scheduled_playlist_id = get_id(json.get('nonScheduledPlaylist'))
self.alternateSupport: Optional[bool] = json.get('alternateSupport')
self.audioControlledByAdManager: Optional[bool] = json.get('audioControlledByAdManager')
self.campaignChannel: Optional[bool] = json.get('campaignChannel')
self.campaignClone: Optional[bool] = json.get('campaignClone')
self.description: Optional[str] = json.get('description')
self.frameset: ContentManager.Channel.FrameSet = ContentManager.Channel.FrameSet(self, json.get('frameset'))
self.id: Optional[int] = json.get('id')
self.lastModified: Optional[str] = json.get('lastModified')
self.maxFrameAllowed: Optional[int] = json.get('maxFrameAllowed')
self.maxPixelAllowed: Optional[int] = json.get('maxPixelAllowed')
self.muteAudioFromVisual: Optional[bool] = json.get("muteAudioFromVisual")
self.name: Optional[str] = json.get('name')
self.nonScheduledPlaylist: Optional[ContentManager.Playlist] = self.cm.playlists.get(non_scheduled_playlist_id)
self.playDedicatedAudioTrack: Optional[bool] = json.get('playDedicatedAudioTrack')
self.playerCount: int = json.get('playerCount', 0)
self.playerMetadataValues: List[ContentManager.Channel.MetadataValue] = [ContentManager.Channel.MetadataValue(self, elem) for elem in json.get('playerMetadataValues', [])]
self.readOnly: Optional[bool] = json.get('readOnly')
self.triggerSupport: Optional[bool] = json.get('triggerSupport')
self.type: Optional[str] = json.get('type')
self.variables: List[ContentManager.Channel.Variable] = [ContentManager.Channel.Variable(self, elem) for elem in json.get('variables', [])]
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def get_playlists(self) -> List[ContentManager.Playlist]:
temp: List[ContentManager.Playlist] = []
for frame in self.frameset.frames:
for timeslot in frame.timeslots:
if not timeslot.playlist is None:
if not timeslot.playlist in temp:
temp.append(timeslot.playlist)
if not frame.eventtriggers is None:
for eventtrigger in frame.eventtriggers:
if not eventtrigger.playlist is None:
if not eventtrigger.playlist in temp:
temp.append(eventtrigger.playlist)
return temp
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == 'player_update' and use:
data = {k:v for k,v in data.items() if k in ['campaignChannel', 'campaignClone', 'id', 'name']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class FrameSet:
def __init__(self, channel: ContentManager.Channel, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.channel = channel
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.campaignFrameset: Optional[bool] = json.get('campaignFrameset')
self.eventTriggersCount: int = json.get('eventTriggersCount', 0)
self.frames: List[ContentManager.Channel.FrameSet.Frame] = [ContentManager.Channel.FrameSet.Frame(self, elem) for elem in json.get('frames', [])]
self.framesCounter: int = json.get('framesCounter', 0)
self.height: Optional[int] = json.get('height')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.timeslotsCount: int = json.get('timeslotsCount', 0)
self.width: Optional[int] = json.get('width')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['channel']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Frame:
def __init__(self, frameset: ContentManager.Channel.FrameSet, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.frameset = frameset
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
alternate_playlist_id = get_id(json.get('alternatePlaylist'))
self.alternatePlaylist: Optional[ContentManager.Playlist] = self.frameset.channel.cm.playlists.get(alternate_playlist_id)
self.alternateType: Optional[str] = json.get('alternateType')
self.autoscale: Optional[str] = json.get('autoscale')
self.campaignTarget: Optional[bool] = json.get('campaignTarget')
self.color: Optional[str] = json.get('color')
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.eventTriggersCount: int = json.get('eventTriggersCount', 0)
self.eventtriggers: List[ContentManager.Channel.FrameSet.Frame.EventTrigger] = []
self.height: Optional[int] = json.get('height')
self.hidden: Optional[bool] = json.get('hidden')
self.id: Optional[int] = json.get('id')
self.left: Optional[int] = json.get('left')
self.name: Optional[str] = json.get('name')
self.timeTriggersCount: int = json.get('timeTriggersCount', 0)
self.timeslots: List[ContentManager.Channel.FrameSet.Frame.Timeslot] = []
self.timeslotsCount: int = json.get('timeslotsCount', 0)
self.timetriggers: List[ContentManager.Channel.FrameSet.Frame.TimeTrigger] = []
self.top: Optional[int] = json.get('top')
self.width: Optional[int] = json.get('width')
self.zOrder: Optional[int] = json.get('zOrder')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['frameset']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Timeslot:
def __init__(self, frame: ContentManager.Channel.FrameSet.Frame, json: Dict[str, Any]) -> None:
self.frame = frame
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
alternate_playlist_id = get_id(json.get('alternatePlaylist'))
playlist_id = get_id(json.get('playlist'))
self.alternatePlaylist: Optional[ContentManager.Playlist] = self.frame.frameset.channel.cm.playlists.get(alternate_playlist_id)
self.alternateType: Optional[str] = json.get('alternateType')
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.color: Optional[str] = json.get('color')
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.description: Optional[str] = json.get('description')
self.endDate: Optional[str] = json.get('endDate')
self.endTime: Optional[str] = json.get('endTime')
self.id: Optional[int] = json.get('id')
self.locked: Optional[bool] = json.get('locked')
self.monthPeriod: Optional[str] = json.get('monthPeriod')
self.name: Optional[str] = json.get('name')
self.playFullScreen: Optional[bool] = json.get('playFullScreen')
self.playlist: Optional[ContentManager.Playlist] = self.frame.frameset.channel.cm.playlists.get(playlist_id)
self.priorityClass: Optional[str] = json.get('priorityClass')
self.recurrencePattern: Optional[str] = json.get('recurrencePattern')
self.sortOrder: Optional[str] = json.get('sortOrder')
self.startDate: Optional[str] = json.get('startDate')
self.startTime: Optional[str] = json.get('startTime')
self.weekdays: Optional[List[str]] = json.get('weekdays')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['frame']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class EventTrigger:
def __init__(self, frame: ContentManager.Channel.FrameSet.Frame, json: Dict[str, Any]) -> None:
self.frame = frame
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
playlist_id = get_id(json.get('playlist'))
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.id: Optional[int] = json.get('id')
self.itemsToPick: Optional[int] = json.get('itemsToPick')
self.playFullScreen: Optional[bool] = json.get('playFullScreen')
self.playlist: Optional[ContentManager.Playlist] = self.frame.frameset.channel.cm.playlists.get(playlist_id)
self.repeatTriggerResponse: Optional[str] = json.get('repeatTriggerResponse')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.variable: ContentManager.Channel.FrameSet.Frame.EventTrigger.Variable = ContentManager.Channel.FrameSet.Frame.EventTrigger.Variable(self, json.get('variable'))
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['frame']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Variable:
def __init__(self, eventtrigger: ContentManager.Channel.FrameSet.Frame.EventTrigger, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.eventtrigger = eventtrigger
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
media_id = get_id(json.get('controlScript'))
self.controlScript: Optional[ContentManager.Media] = self.eventtrigger.frame.frameset.channel.cm.media.get(media_id)
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.sharedName: Optional[str] = json.get('sharedName')
self.type: Optional[str] = json.get('type')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['eventtrigger']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class TimeTrigger:
def __init__(self, frame: ContentManager.Channel.FrameSet.Frame, json: Dict[str, Any]) -> None:
self.frame = frame
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
playlist_id = get_id(json.get('playlist'))
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.days: Optional[List[str]] = json.get('days')
self.endDate: Optional[str] = json.get('endDate')
self.id: Optional[int] = json.get('id')
self.itemsToPick: Optional[int] = json.get('itemsToPick')
self.name: Optional[str] = json.get('name')
self.playFullScreen: Optional[bool] = json.get('playFullScreen')
self.playlist: Optional[ContentManager.Playlist] = self.frame.frameset.channel.cm.playlists.get(playlist_id)
self.recurrencePattern: Optional[str] = json.get('recurrencePattern')
self.repeatEndTime: Optional[str] = json.get('repeatEndTime')
self.repeatStartTime: Optional[str] = json.get('repeatStartTime')
self.repeatTriggerResponse: Optional[str] = json.get('repeatTriggerResponse')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.startDate: Optional[str] = json.get('startDate')
self.time: Optional[str] = json.get('time')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['frame']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Variable:
def __init__(self, channel: ContentManager.Channel, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.channel = channel
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
media_id = get_id(json.get('controlScript'))
self.controlScript: Optional[ContentManager.Media] = self.channel.cm.media.get(media_id)
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.sharedName: Optional[str] = json.get('sharedName')
self.type: Optional[str] = json.get('type')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['channel']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class MetadataValue:
def __init__(self, channel: ContentManager.Channel, json: Optional[Dict[str, Any]]) -> None:
self.channel = channel
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
player_metadata_id = get_id(json.get('playerMetadata'))
self.id: Optional[int] = json.get('id')
self.playerMetadata: Optional[ContentManager.PlayerMetadata] = self.channel.cm.player_metadatas.get(player_metadata_id)
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['channel']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ChannelList(MutableSequence[Channel]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Channel]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/channels')
for elem in response.get('list', []):
item = ContentManager.Channel(self.cm, elem)
for i, frame in enumerate(item.frameset.frames):
timeslots_response = self.cm.request('get', f'/channels/{item.id}/frames/{frame.id}/timeslots', debug_key="channel_timeslots")
eventtriggers_response = self.cm.request('get', f'/channels/{item.id}/frames/{frame.id}/eventtriggers', debug_key="channel_eventtriggers")
timetriggers_response = self.cm.request('get', f'/channels/{item.id}/frames/{frame.id}/timetriggers', debug_key="channel_timetriggers")
item.frameset.frames[i].timeslots = [ContentManager.Channel.FrameSet.Frame.Timeslot(frame, elem) for elem in timeslots_response.get('timeslots', [])]
item.frameset.frames[i].eventtriggers = [ContentManager.Channel.FrameSet.Frame.EventTrigger(frame, elem) for elem in eventtriggers_response.get('eventTriggers', [])]
item.frameset.frames[i].timetriggers = [ContentManager.Channel.FrameSet.Frame.TimeTrigger(frame, elem) for elem in timetriggers_response.get('timeTriggers', [])]
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Channel]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Channel with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Channel) -> None:
self.__data.append(value)
""" DISTRIBUTIONSERVER """
class DistributionServer:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.auditSettings: ContentManager.DistributionServer.AuditSettings = ContentManager.DistributionServer.AuditSettings(self, json.get('auditSettings'))
self.broadcastServer: ContentManager.DistributionServer.BroadcastServer = ContentManager.DistributionServer.BroadcastServer(self, json.get('broadcastServer'))
self.description: Optional[str] = json.get('description')
self.driver: Optional[str] = json.get('driver')
self.driverOptions: List[ContentManager.DistributionServer.DriverOptions] = [ContentManager.DistributionServer.DriverOptions(self, elem) for elem in json.get('driverOptions', [])]
self.iadeaServer: ContentManager.DistributionServer.IadeaServer = ContentManager.DistributionServer.IadeaServer(self, json.get('iadeaServer'))
self.id: Optional[int] = json.get('id')
self.monitoringSettings: ContentManager.DistributionServer.MonitoringSettings = ContentManager.DistributionServer.MonitoringSettings(self, json.get('monitoringSettings'))
self.name: Optional[str] = json.get('name')
self.omnicastServer: ContentManager.DistributionServer.OmnicastServer = ContentManager.DistributionServer.OmnicastServer(self, json.get('omnicastServer'))
self.schedules: List[ContentManager.DistributionServer.Schedule] = [ContentManager.DistributionServer.Schedule(self, elem) for elem in json.get('schedules', [])]
self.snapshotSettings: ContentManager.DistributionServer.SnapshotSettings = ContentManager.DistributionServer.SnapshotSettings(self, json.get('snapshotSettings'))
self.synchronization: Optional[str] = json.get('synchronization')
self.uuid: Optional[str] = json.get('uuid')
# self.distributions Do not see added value to add this
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == "player_update" and use:
data = {k:v for k,v in data.items() if k in ['driver', 'id', 'name', 'snapshotSettings']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class AuditSettings:
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.enabled: Optional[bool] = json.get('enabled')
self.uploadFrequency: Optional[str] = json.get('uploadFrequency')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class BroadcastServer:
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.delivery: Optional[str] = json.get('delivery')
self.lastStatus: Optional[str] = json.get('lastStatus')
self.logLevel: Optional[int] = json.get('logLevel')
self.macAddress: Optional[str] = json.get('macAddress')
self.password: Optional[str] = json.get('password')
self.planRevision: Optional[int] = json.get('planRevision')
self.playerCacheSize: Optional[int] = json.get('playerCacheSize')
self.pollingInterval: Optional[int] = json.get('pollingInterval')
self.serverUrl: Optional[str] = json.get('serverUrl')
self.username: Optional[str] = json.get('username')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class DriverOptions:
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.key: Optional[str] = json.get('key')
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class IadeaServer:
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.children: List[ContentManager.DistributionServer.IadeaServer] = [ContentManager.DistributionServer.IadeaServer(self.server, elem) for elem in json.get('children', [])]
self.heartbeatErrorRetryRate: Optional[int] = json.get('heartbeatErrorRetryRate')
self.logLevel: Optional[int] = json.get('logLevel')
self.macAddress: Optional[str] = json.get('macAddress')
self.parent: Optional[int] = get_id(json.get('parent'))
self.planErrorRepollingRate: Optional[int] = json.get('planErrorRepollingRate')
self.planPollingRate: Optional[int] = json.get('planPollingRate')
self.planRevision: Optional[int] = json.get('planRevision')
self.planStatusErrorRetryRate: Optional[int] = json.get('planStatusErrorRetryRate')
self.playerHeartbeatRate: Optional[int] = json.get('playerHeartbeatRate')
self.scheduleExpansionDays: Optional[int] = json.get('scheduleExpansionDays')
self.scheduleRefreshTime: Optional[str] = json.get('scheduleRefreshTime')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class MonitoringSettings:
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.diskSpaceReserve: Optional[int] = json.get('diskSpaceReserve')
self.enabled: Optional[bool] = json.get('enabled')
self.heartbeatRate: Optional[int] = json.get('heartbeatRate')
self.overdueRate: Optional[int] = json.get('overdueRate')
self.planStatusInterval: Optional[int] = json.get('planStatusInterval')
self.purgeLogsAfter: Optional[int] = json.get('purgeLogsAfter')
self.uploadLogs: Optional[bool] = json.get('uploadLogs')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class OmnicastServer:
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.url: Optional[str] = json.get('url')
self.username: Optional[str] = json.get('username')
self.password: Optional[str] = json.get('password')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Schedule:
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
player_group_id = get_id(json.get('playerGroup'))
self.dayOfWeek: Optional[str] = json.get('dayOfWeek')
self.hours: Optional[str] = json.get('hours')
self.id: Optional[int] = json.get('id')
self.minutes: Optional[str] = json.get('minutes')
self.playerGroup: Optional[ContentManager.PlayerGroup] = self.server.cm.playergroups.get(player_group_id)
self.seconds: Optional[str] = json.get('seconds')
self.type: Optional[str] = json.get('type')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class SnapshotSettings:
def __init__(self, server: ContentManager.DistributionServer, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.server = server
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.connectionValid: Optional[bool] = json.get('connectionValid')
self.enabled: Optional[bool] = json.get('enabled')
self.interval: Optional[int] = json.get('interval')
self.intervalNumSnapshots: Optional[int] = json.get('intervalNumSnapshots')
self.intervalProfile: Optional[str] = json.get('intervalProfile')
self.onDemandProfile: Optional[str] = json.get('onDemandProfile')
self.onEventProfile: Optional[str] = json.get('onEventProfile')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['server']
for name, use in kwargs.items():
if name == "player_update" and use:
data = {k:v for k,v in data.items() if k in ['connectionValid', 'enabled', 'interval', 'intervalNumSnapshots', 'intervalProfile', 'onDemandProfile', 'onEventProfile']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class DistributionServerList(MutableSequence[DistributionServer]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.DistributionServer]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/distributions')
for elem in response.get('list', []):
item = ContentManager.DistributionServer(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.DistributionServer]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'DistributionServer with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.DistributionServer) -> None:
self.__data.append(value)
""" EXMODULE """
class ExModule:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.description: Optional[str] = json.get('description')
self.name: Optional[str] = json.get('name')
self.total: Optional[int] = json.get('total')
self.used: Optional[int] = json.get('used')
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ExModuleList(MutableSequence[ExModule]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.ExModule]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/players/modules')
for elem in response.get('list', []):
item = ContentManager.ExModule(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.ExModule]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning('Int not possible for exModule')
return None
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning('Int not possible for exModule')
return None
else:
if elem.name == search:
return elem
logging.info(f'ExModule with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.ExModule) -> None:
self.__data.append(value)
""" LICENSE """
class License:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
# self.featureLicenses TODO
# self.playerLicenses TODO
self.advantageCoverageUntil: Optional[str] = json.get('advantageCoverageUntil')
self.basicDesignerSeats: Optional[int] = json.get('basicDesignerSeats')
self.campaignSeats: Optional[int] = json.get('campaignSeats')
self.campaignTargets: Optional[int] = json.get('campaignTargets')
self.countSubNetworks: Optional[int] = json.get('countSubNetworks')
self.dongleId: Optional[str] = json.get("dongleId")
self.exModules: List[ContentManager.ExModule] = [elem for elem in [self.cm.ex_modules.get(get_name(elem)) for elem in json.get('exModules', [])] if not elem is None]
self.hasAdManager: Optional[bool] = json.get('hasAdManager')
self.isBetaDongle: Optional[bool] = json.get("isBetaDongle")
self.isSoftDongle: Optional[bool] = json.get('isSoftDongle')
self.isTrial: Optional[bool] = json.get('isTrial')
self.isUsageUnlimited: Optional[bool] = json.get('isUsageUnlimited')
self.name : Optional[str] = json.get('name')
self.playerCals: Optional[int] = json.get('playerCals')
self.playerCalsUnlimited: Optional[bool] = json.get('playerCalsUnlimited')
self.playerClientAccessLicenses: Optional[str] = json.get('playerClientAccessLicenses')
self.premiumDesignerSeats: Optional[int] = json.get('premiumDesignerSeats')
self.productId: Optional[str] = json.get("productId")
self.professionalDesignerSeats: Optional[int] = json.get('professionalDesignerSeats')
self.scalaMaintenanceExpired: Optional[bool] = json.get('scalaMaintenanceExpired')
self.scalaOutOfMaintenance: Optional[bool] = json.get('scalaOutOfMaintenance')
self.softDongleLicenseTo: Optional[str] = json.get('softDongleLicenseTo')
self.standardDesignerSeats: Optional[int] = json.get('standardDesignerSeats')
self.trailDaysLeft: Optional[int] = json.get('trialDaysLeft')
self.usageUntil: Optional[str] = json.get('usageUntil')
self.usedCount: Optional[int] = json.get('usedCount')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class LicenseList(MutableSequence[License]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.License]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
if self.cm.network is None:
raise Exception('Need current network')
response: Dict[str, Any] = self.cm.request('get', f'/license/networks/{self.cm.network.id}')
if response.get('list') is None:
item = ContentManager.License(self.cm, response)
self.__data.append(item)
else:
for elem in response.get('list', []):
item = ContentManager.License(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.License]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning("Int search not possible for license")
return None
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
logging.warning("Int search not possible for license")
return None
else:
if elem.name == search:
return elem
logging.info(f'License with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.License) -> None:
self.__data.append(value)
""" MEDIA """
class Media:
def __init__(self, cm: ContentManager, data: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(data)
def unpack_json(self, json: Dict[str, Any]):
created_user_id = get_id(json.get('createdBy'))
modified_user_id = get_id(json.get('modifiedBy'))
template_id = get_id(json.get('template'))
uploaded_user_id = get_id(json.get('uploadedBy'))
self.approval: ContentManager.Media.Approval = ContentManager.Media.Approval(self, json.get('approval'))
self.approvalDetail: ContentManager.Media.ApprovalDetail = ContentManager.Media.ApprovalDetail(self, json.get('approvalDetail'))
self.approvalStatus: Optional[str] = json.get('approvalStatus')
self.archived: Optional[bool] = json.get('archived')
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.backgroundColor: Optional[str] = json.get('backgroundColor')
self.broadcastPriority: Optional[str] = json.get('broadcastPriority')
self.campaignMedia: Optional[bool] = json.get('campaignMedia')
self.categories: List[ContentManager.Category] = get_list(json.get('categories'), self.cm.categories)
self.createdBy: Optional[ContentManager.User] = self.cm.users.get(created_user_id)
self.createdDate: Optional[str] = json.get('createdDate')
self.description: Optional[str] = json.get('description')
self.downloadPath: Optional[str] = json.get('downloadPath')
self.duration: Optional[int] = json.get('duration')
self.endValidDate: Optional[str] = json.get('endValidDate')
self.fields: List[ContentManager.Media.Field] = [ContentManager.Media.Field(self, elem) for elem in json.get('fields', [])]
self.generatingThumbnail: Optional[bool] = json.get('generatingThumbnail')
self.hasSnapshot: Optional[bool] = json.get('hasSnapshot')
self.hasUnapprovedElements: Optional[bool] = json.get('hasUnapprovedElements')
self.height: Optional[int] = json.get('height')
self.id: Optional[int] = json.get('id')
self.input: Optional[str] = json.get('input')
self.lastModified: Optional[str] = json.get('lastModified')
self.length: Optional[int] = json.get('length')
self.mediaItemFiles: List[ContentManager.Media.ItemFile] = [ContentManager.Media.ItemFile(self, elem) for elem in json.get('mediaItemFiles', [])]
self.mediaType: Optional[str] = json.get('mediaType')
self.messagesCount: int = json.get('messagesCount', 0)
self.modifiedBy: Optional[ContentManager.User] = self.cm.users.get(modified_user_id)
self.name: Optional[str] = json.get('name')
self.neverArchive: Optional[bool] = json.get('neverArchive')
self.originalCreatedDate: Optional[str] = json.get("originalCreatedDate")
self.pages: Optional[int] = json.get('pages')
self.path: Optional[str] = json.get('path')
self.playFullscreen: Optional[bool] = json.get('playFullscreen')
self.playlistsCount: int = json.get('playlistsCount', 0)
self.prettifyDuration: Optional[str] = json.get('prettifyDuration')
self.prettifyLength: Optional[str] = json.get('prettifyLength')
self.prettifyType: Optional[str] = json.get('prettifyType')
self.readOnly: Optional[bool] = json.get('readOnly')
self.revision: Optional[int] = json.get('revision')
self.saveAndApprove: Optional[bool] = json.get('saveAndApprove')
self.snapshotInQueue: Optional[bool] = json.get('snapshotInQueue')
self.startValidDate: Optional[str] = json.get('startValidDate')
self.status: Optional[str] = json.get('status')
self.template: Optional[ContentManager.Template] = self.cm.templates.get(template_id)
self.templatesCount: int = json.get('templatesCount', 0)
self.thumbnailDownloadPaths: ContentManager.Media.ThumbnailDownloadPaths = ContentManager.Media.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
self.uploadType: Optional[str] = json.get('uploadType')
self.uploadedBy: Optional[ContentManager.User] = self.cm.users.get(uploaded_user_id)
self.uri: Optional[str] = json.get('uri')
self.validDateStatus: Optional[str] = json.get('validDateStatus')
self.volume: Optional[int] = json.get('volume')
self.webDavPath: Optional[str] = json.get('webDavPath')
self.width: Optional[int] = json.get('width')
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, media: ContentManager.Media, json: Optional[Dict[str, Any]]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ApprovalDetail:
def __init__(self, media: ContentManager.Media, json: Optional[Dict[str, Any]]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.approvalStatus: Optional[str] = json.get('approvalStatus')
user_id = get_id(json.get('user'))
self.user: Optional[ContentManager.User] = self.media.cm.users.get(user_id)
to_approve_id = get_id(json.get('toApprove'))
self.toApprove: Optional[ContentManager.User] = self.media.cm.users.get(to_approve_id)
by_approve_id = get_id(json.get('approvedBy'))
self.approvedBy: Optional[ContentManager.User] = self.media.cm.users.get(by_approve_id)
self.messageText: Optional[str] = json.get('messageText')
self.lastModified: Optional[str] = json.get('lastModified')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ItemFile:
def __init__(self, media: ContentManager.Media, json: Dict[str, Any]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.filename: Optional[str] = json.get('filename')
self.size: Optional[int] = json.get('size')
self.prettifySize: Optional[str] = json.get('prettifySize')
self.uploadDate: Optional[str] = json.get('uploadDate')
self.version: Optional[int] = json.get('version')
self.downloadPath: Optional[str] = json.get('downloadPath')
self.originalFilename: Optional[str] = json.get('originalFilename')
self.status: Optional[str] = json.get('status')
self.uploadedBy: Optional[str] = json.get('uploadedBy')
self.md5: Optional[str] = json.get('md5')
self.thumbnailDownloadPaths: Optional[ContentManager.Media.ItemFile.ThumbnailDownloadPaths] = ContentManager.Media.ItemFile.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, itemfile: ContentManager.Media.ItemFile, json: Optional[Dict[str, Any]]) -> None:
self.itemfile = itemfile
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['itemfile']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Approval:
def __init__(self, media: ContentManager.Media, json: Optional[Dict[str, Any]]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.action: Optional[str] = json.get('action')
self.userId: Optional[int] = json.get('integer')
self.messageText: Optional[str] = json.get('messageText')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Field:
def __init__(self, media: ContentManager.Media, json: Optional[Dict[str, Any]]) -> None:
self.media = media
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.displayName: Optional[str] = json.get('displayName')
self.value: Optional[str] = json.get('value')
self.templateId: Optional[int] = json.get('templateId')
self.required: Optional[bool] = json.get('required')
self.type: Optional[str] = json.get('type')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class MediaList(MutableSequence[Media]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Media]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/media')
for elem in response.get('list', []):
item = ContentManager.Media(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Media]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Media with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Media) -> None:
self.__data.append(value)
""" NETWORK """
class Network:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.active: Optional[bool] = json.get('active')
self.approvalMedia: Optional[bool] = json.get('approvalMedia')
self.approvalMessage: Optional[bool] = json.get('approvalMessage')
self.autoThumbnailGeneration: Optional[bool] = json.get('autoThumbnailGeneration')
self.automaticPlaylistDurationCalculation: Optional[bool] = json.get('automaticPlaylistDurationCalculation')
self.firstDay: Optional[str] = json.get('firstDay')
self.id: Optional[int] = json.get('id')
self.licenseState: Optional[str] = json.get('licenseState')
self.maxDatabaseAge: Optional[int] = json.get('maxDatabaseAge')
self.maxDownloadThreads: Optional[int] = json.get('maxDownloadThreads')
self.name: Optional[str] = json.get('name')
self.newsFeed: Optional[bool] = json.get('newsFeed')
self.newsFeedUrl: Optional[str] = json.get('newsFeedUrl')
self.passwordCheckCharTypes: Optional[bool] = json.get('passwordCheckCharTypes')
self.passwordMinimumLength: Optional[int] = json.get('passwordMinimumLength')
self.passwordUseLowercase: Optional[bool] = json.get('passwordUseLowercase')
self.passwordUseNonAlphanumeric: Optional[bool] = json.get('passwordUseNonAlphanumeric')
self.passwordUseNumbers: Optional[bool] = json.get('passwordUseNumbers')
self.passwordUseUppercase: Optional[bool] = json.get('passwordUseUppercase')
self.playbackAuditParser: Optional[bool] = json.get('playbackAuditParser')
self.purgeDaysPlanGenHistory: Optional[int] = json.get('purgeDaysPlanGenHistory')
self.senderEmailAddress: Optional[str] = json.get('snederEmailAddress')
self.sessionTimeout: Optional[int] = json.get('sessionTimeout')
self.showMessageFieldsInMultiplePages: Optional[bool] = json.get('showMessageFieldsInMultiplePages')
self.smtpAuthentication: Optional[bool] = json.get('smtpAuthentication')
self.smtpEnabled: Optional[bool] = json.get('smtpEnabled')
self.smtpPort: Optional[int] = json.get('smtpPort')
self.smtpServerAddress: Optional[str] = json.get('smtpServerAddress')
self.smtpSsl: Optional[bool] = json.get('smtpSsl')
self.smtpUsername: Optional[str] = json.get('smtpUsername')
self.userPasswordExpiresIn: Optional[int] = json.get('userPasswordExpiresIn')
self.userPasswordExpiresInMinutes: Optional[bool] = json.get('userPasswordExpiresInMinutes')
self.viewReport: Optional[bool] = json.get('viewReport')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class NetworkList(MutableSequence[Network]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Network]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/networks')
for elem in response.get('list', []):
item = ContentManager.Network(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Network]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Network with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Network) -> None:
self.__data.append(value)
""" PLAYER """
class PlayerGroup:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.descritpion: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.numberOfPlayers: Optional[int] = json.get('numberOfPlayers')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == 'player_update' and use:
data = {k:v for k,v in data.items() if k in ['id']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlayerGroupList(MutableSequence[PlayerGroup]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.PlayerGroup]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/playergroup')
for elem in response.get('list', []):
item = ContentManager.PlayerGroup(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.PlayerGroup]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Player with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.PlayerGroup) -> None:
old_ids = [elem.id for elem in self.__data]
if not value.id in old_ids:
self.__data.append(value)
class PlayerHealth:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.alerted: Optional[bool] = json.get("alerted")
self.cleared: Optional[bool] = json.get("cleared")
self.clearedDate: Optional[str] = json.get("clearedDate")
self.descriptionDebug: Optional[List[str]] = json.get("descriptionDebug")
self.descriptionDetails: Optional[List[str]] = json.get("descriptionDetails")
self.descriptionTech: Optional[List[str]] = json.get('descriptionTech')
self.descriptionUser: Optional[List[str]] = json.get("descriptionUser")
self.errorNumber: Optional[str] = json.get("errorNumber")
self.first: Optional[str] = json.get("first")
self.id: Optional[int] = json.get("id")
self.last: Optional[str] = json.get("last")
self.message: Optional[str] = json.get("message")
self.playerCount: int = json.get("playerCount", 0)
self.problemMessage: Optional[str] = json.get("problemMessage")
self.problemNumber: Optional[int] = json.get("problemNumber")
self.reported: int = json.get("reported", 0)
self.reportedPlayers: List[ContentManager.PlayerHealth.ReportedPlayer] = [ContentManager.PlayerHealth.ReportedPlayer(self.cm, elem) for elem in json.get("reportedPlayers", [])]
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ReportedPlayer:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get("id")
self.first: Optional[str] = json.get("first")
self.last: Optional[str] = json.get("last")
self.playerLogFile: Optional[str] = json.get("playerLogFile")
self.reported: int = json.get("reported", 0)
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlayerHealthList(MutableSequence[PlayerHealth]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.PlayerHealth]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/playerhealth')
for elem in response.get('list', []):
item = ContentManager.PlayerHealth(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.PlayerHealth]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.problemMessage == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.problemMessage == search:
return elem
logging.info(f'PlayerHealth with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.PlayerHealth) -> None:
self.__data.append(value)
class PlayerMetadata:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.datatype: Optional[str] = json.get('datatype')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.order: Optional[int] = json.get('order')
self.predefinedValues: List[ContentManager.PlayerMetadata.PredefinedValue] = [ContentManager.PlayerMetadata.PredefinedValue(self, elem) for elem in json.get('predefinedValues', [])]
self.valueType: Optional[str] = json.get('valueType')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
@staticmethod
def create(cm: ContentManager, name: str, datatype: str, valuetype: str):
name = name.replace('Player.', '')
data = {
'name': name,
'datatype': datatype,
'valueType': valuetype
}
cm.request('post', '/playerMetadata', data=json.dumps(data))
class PredefinedValue:
def __init__(self, metadata: ContentManager.PlayerMetadata, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.metadata = metadata
self.unpack_json(json)
def unpack_json(self, json:Dict[str,Any]) -> None:
self.id: Optional[int] = json.get('id')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.value: Optional[str] = json.get('value')
self.variableId: Optional[int] = json.get('variableId')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['metadata']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlayerMetadataList(MutableSequence[PlayerMetadata]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.PlayerMetadata]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/playerMetadata')
for elem in response.get('list', []):
item = ContentManager.PlayerMetadata(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.PlayerMetadata]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'PlayerMetadata with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.PlayerMetadata) -> None:
self.__data.append(value)
class Player:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
distribution_server_id = get_id(json.get('distributionServer'))
owner_workgroup_id = get_id(json.get('ownerWorkgroup'))
# site_id = get_id(json.get('site'))
self.active: Optional[str] = json.get('active')
self.bandwidthThrottlingWindows: List[ContentManager.Player.BandwidthThrottlingWindow] = [ContentManager.Player.BandwidthThrottlingWindow(self, elem) for elem in json.get('bandwidthThrottlingWindows', [])]
self.customId: Optional[str] = json.get('customId')
self.description: Optional[str] = json.get('description')
self.distributionServer: Optional[ContentManager.DistributionServer] = self.cm.distributionservers.get(distribution_server_id)
self.downloadThreads: Optional[int] = json.get('downloadThreads')
self.enabled: Optional[bool] = json.get('enabled')
self.exModules: List[ContentManager.ExModule] = get_list(json.get('exModules'), self.cm.ex_modules)
self.featureLicenseType: ContentManager.Player.FeatureLicense = ContentManager.Player.FeatureLicense(self, json.get('featureLicenseType'))
self.id: Optional[int] = json.get('id')
self.intervalSnapshotEnabled: Optional[bool] = json.get('intervalSnapshotEnabled')
self.lastModified: Optional[str] = json.get('lastModified')
self.limitDefaultBandwidth: Optional[int] = json.get('limitDefaultBandwidth')
self.logLevel: Optional[str] = json.get('logLevel')
self.mac: Optional[str] = json.get('mac')
self.metadataValue: List[ContentManager.Player.MetadataValue] = [ContentManager.Player.MetadataValue(self, elem) for elem in json.get('metadataValue', [])]
self.name: Optional[str] = json.get('name')
self.numberOfDisplays: Optional[int] = json.get('numberOfDisplays')
self.ownerWorkgroup: Optional[ContentManager.Workgroup] = self.cm.workgroups.get(owner_workgroup_id)
self.pairingKey: Optional[str] = json.get('pairingKey')
self.planDeliveryMethod: Optional[str] = json.get('planDeliveryMethod')
self.planDeliveryPassword: Optional[str] = json.get('planDeliveryPassword')
self.planDeliveryUsername: Optional[str] = json.get('planDeliveryUsername')
self.playerDisplays: List[ContentManager.Player.Display] = [ContentManager.Player.Display(self, elem) for elem in json.get('playerDisplays', [])]
self.playerId: Optional[str] = json.get('playerId')
self.playerUrlOrHostName: Optional[str] = json.get('playerUrlOrHostName')
self.playergroups: List[ContentManager.PlayerGroup] = get_list(json.get('playergroups'), self.cm.playergroups)
self.pollingInterval: Optional[int] = json.get('pollingInterval')
self.pollingUnit: Optional[str] = json.get('pollingUnit')
self.previewPlayer: Optional[bool] = json.get('previewPlayer') #DEPRECATED
self.readOnly: Optional[bool] = json.get('readOnly')
self.requestLogs: Optional[bool] = json.get('requestLogs')
self.sharedWorkgroups: List[ContentManager.Workgroup] = get_list(json.get('sharedWorkgroups'), self.cm.workgroups)
# self.site: Optional[ContentManager.Site] = self.cm.sites.get(site_id)
self.timezoneOffset: Optional[str] = json.get('timezoneOffset')
self.type: Optional[str] = json.get('type')
self.unusedFilesCache: Optional[int] = json.get('unusedFilesCache')
self.usedPairingKey: Optional[str] = json.get('usedPairingKey')
self.uuid: Optional[str] = json.get('uuid')
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def unpack_json_state(self, json: Dict[str, Any]):
self.host: Optional[str] = json.get('host')
self.ip: Optional[str] = json.get('ip')
self.lastBooted: Optional[str] = json.get('lastBooted')
self.lastBootedTimestamp: Optional[str] = json.get('lastBootedTimestamp')
self.lastReported: Optional[str] = json.get('lastReported')
self.lastReportedTimestamp: Optional[str] = json.get('lastReportedTimestamp')
self.planState: Optional[str] = json.get('planState')
self.releaseString: Optional[str] = json.get('releaseString')
self.state: Optional[str] = json.get('state')
def generate_uuid(self):
params = {
'ids': self.id
}
response = self.cm.request('post', '/storage', data=json.dumps(params))
return response.get('value')
def update_metadata(self, name: str, value: Any):
if not name.startswith('Player.'):
name = f"Player.{name}"
metadata = self.cm.player_metadatas.get(name)
if metadata is None:
raise Exception(f'No metadata found with name {name}')
if self.metadataValue is None:
self.metadataValue = [ContentManager.Player.MetadataValue(self, {'value': value, 'playerMetadata': metadata.json()})]
else:
exists = False
for i, v in enumerate(self.metadataValue):
if v.playerMetadata is None:
continue
if v.playerMetadata.name == name:
exists = True
if not value is None:
self.metadataValue[i].value = value
if not exists:
self.metadataValue.append(ContentManager.Player.MetadataValue(self, {'value': value, 'playerMetadata': metadata.json()}))
def save(self):
self.cm.request('put', f'/players/{self.id}', data=json.dumps(self.json(update=True)), debug_key='update_player')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == "update" and use:
data = {k:v for k,v in data.items() if k in ['active', 'availableTargets', 'distributionServer', 'downloadThreads', 'enabled', 'id', 'lastModified', 'logLevel', 'mac', 'metadataValue', 'name', 'numberOfDisplays', 'overrideStatus', 'planDeliveryMethod', 'playerDisplays', 'pollingInterval', 'pollingUnit', 'previewPlayer', 'readOnly', 'requestLogs', 'timezoneOffset', 'type', 'unusedFilesCache', 'uuid', 'workgroups', 'ownerWorkgroup', 'bandwidthThrottlingWindows', 'limitDefaultBandwidth', 'playergroups', 'description']}
for key in list(kwargs.keys()):
if not "_" in key:
kwargs[f"player_{key}"] = kwargs.pop(key)
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
own_workgroup_id = data.get('ownerWorkgroup', {}).get("id")
if own_workgroup_id is None:
return data
else:
data.pop('ownerWorkgroup', None)
for i, elem in enumerate(data.get('workgroups', [])):
if elem.get('id') == own_workgroup_id:
data['workgroups'][i]['owner'] = True
return data
class Display:
def __init__(self, player: ContentManager.Player, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.player = player
self.unpack_json(json)
def unpack_json(self, json:Dict[str,Any]) -> None:
channel_id = get_id(json.get('channel'))
self.channel: Optional[ContentManager.Channel] = self.player.cm.channels.get(channel_id)
self.description: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.screenCounter: Optional[int] = json.get('screenCounter')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['player']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class BandwidthThrottlingWindow:
def __init__(self, player: ContentManager.Player, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.player = player
self.unpack_json(json)
def unpack_json(self, json:Dict[str,Any]) -> None:
self.day: Optional[List[str]] = json.get('day')
self.endTime: Optional[str] = json.get('endTime')
self.id: Optional[int] = json.get('id')
self.limit: Optional[int] = json.get('limit')
self.order: Optional[int] = json.get('order')
self.startTime: Optional[str] = json.get('startTime')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['player']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class FeatureLicense:
def __init__(self, player: ContentManager.Player, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.player = player
self.unpack_json(json)
def unpack_json(self, json:Dict[str,Any]) -> None:
self.alternateScheduleOptionsSupport: Optional[bool] = json.get('alternateScheduleOptionsSupport')
self.customMonitorConfigs: Optional[bool] = json.get('customMonitorConfigs')
self.deviceManagement: Optional[bool] = json.get('deviceManagement')
self.html5Support: Optional[bool] = json.get('html5Support')
self.imageSupport: Optional[bool] = json.get('imageSupport')
self.maxChannel: Optional[int] = json.get('maxChannel')
self.maxOutputs: Optional[int] = json.get('maxOutputs')
self.maxPixel: Optional[int] = json.get('maxPixel')
self.maxZone: Optional[int] = json.get('maxZone')
self.playerPlaybackAuditLogsSupport: Optional[bool] = json.get('playerPlaybackAuditLogsSupport')
self.scalaIntegrationAccess: Optional[bool] = json.get('scalaIntegrationAccess')
self.scalaScriptSupport: Optional[bool] = json.get('scalaScriptSupport')
self.statusMonitoring: Optional[str] = json.get('statusMonitoring')
self.total: Optional[int] = json.get('total')
self.triggersSupport: Optional[bool] = json.get('triggersSupport')
self.type: Optional[str] = json.get('type')
self.used: Optional[int] = json.get('used')
self.videoSupport: Optional[bool] = json.get('videoSupport')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['player']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class MetadataValue:
def __init__(self, player: ContentManager.Player, json: Optional[Dict[str, Any]]) -> None:
self.player = player
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
player_metadata_id = get_id(json.get('playerMetadata'))
self.id: Optional[int] = json.get('id')
self.playerMetadata: Optional[ContentManager.PlayerMetadata] = self.player.cm.player_metadatas.get(player_metadata_id)
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['player']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlayerList(MutableSequence[Player]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Player]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/players')
for elem in response.get('list', []):
item = ContentManager.Player(self.cm, elem)
item.unpack_json_state(self.cm.request('get', f'/players/{item.id}/state', debug_key="player_state"))
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Player]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Player with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Player) -> None:
self.__data.append(value)
""" PLAYLIST """
class Playlist:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.media: List[ContentManager.Media] = []
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
created_by_id = get_id(json.get('createdBy'))
modified_by_id = get_id(json.get('modifiedBy'))
self.asSubPlaylistsCount: int = json.get('asSubPlaylistsCount', 0)
self.campaignChannelsCount: Optional[int] = json.get('campaignChannelsCount')
self.campaignMessagesCount: Optional[int] = json.get('campaignMessagesCount')
self.campaignPlaylist: Optional[bool] = json.get('campaignPlaylist')
self.categories: List[ContentManager.Category] = get_list(json.get('categories'), self.cm.categories)
self.channelsCount: int = json.get('channelsCount', 0)
self.controlledByAdManager: Optional[bool] = json.get('controlledByAdManager')
self.createdBy: Optional[ContentManager.User] = self.cm.users.get(created_by_id)
self.createdByName: Optional[str] = json.get('createdByName')
self.createdDate: Optional[str] = json.get('createdDate')
self.description: Optional[str] = json.get('description')
self.durationCalculationCompleted: Optional[bool] = json.get('durationCalculationCompleted')
self.enableSmartPlaylist: Optional[bool] = json.get('enableSmartPlaylist')
self.extSourceDuration: Optional[int] = json.get('extSourceDuration')
self.healthy: Optional[bool] = json.get('healthy')
self.htmlDuration: Optional[int] = json.get('htmlDuration')
self.id: Optional[int] = json.get('id')
self.imageDuration: Optional[int] = json.get('imageDuration')
self.itemCount: int = json.get('itemCount', 0)
self.lastModified: Optional[str] = json.get('lastModified')
self.maxDuration: Optional[int] = json.get('maxDuration')
self.messagesCount: int = json.get('messagesCount', 0)
self.minDuration: Optional[int] = json.get('minDuration')
self.modifiedBy: Optional[ContentManager.User] = self.cm.users.get(modified_by_id)
self.modifiedByName: Optional[str] = json.get('modifiedByName')
self.name: Optional[str] = json.get('name')
self.pickPolicy: Optional[str] = json.get('pickPolicy')
self.playlistItems: List[ContentManager.Playlist.PlaylistItem] = [ContentManager.Playlist.PlaylistItem(self, elem) for elem in json.get('playlistItems', [])]
self.playlistType: Optional[str] = json.get('playlistType')
self.prettifyDuration: Optional[str] = json.get('prettifyDuration')
self.priority: Optional[str] = json.get('priority')
self.problemsCount: Optional[int] = json.get('problemsCount')
self.readOnly: Optional[bool] = json.get('readOnly')
self.shuffleNoRepeatType: Optional[str] = json.get('shuffleNoRepeatType')
self.shuffleNoRepeatWithin: Optional[int] = json.get('shuffleNoRepeatWithin')
self.thumbnailDownloadPath: Optional[str] = json.get('thumbnailDownloadPath')
self.thumbnailDownloadPaths: ContentManager.Playlist.ThumbnailDownloadPaths = ContentManager.Playlist.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
self.transitionDuration: Optional[int] = json.get('transitionDuration')
self.warningsCount: Optional[int] = json.get('warningsCount')
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def process(self, data: Union[int, ContentManager.Playlist], playlist_path: List[int] = []):
if isinstance(data, int):
playlist: Optional[ContentManager.Playlist] = self.cm.playlists.get(data)
id = data
else:
playlist = data
id = data.id
if id is None:
raise Exception("ID cannot be None")
if id in playlist_path:
raise Exception(f"Playlistloop detected {playlist_path}")
playlist_path.append(id)
if not playlist is None:
if playlist.playlistItems is None:
playlist_path.pop()
return
for playlistItem in playlist.playlistItems:
if not playlistItem.media is None:
if not playlistItem.media in self.media:
self.media.append(playlistItem.media)
if not playlistItem.subplaylist is None:
self.process(playlistItem.subplaylist, playlist_path)
playlist_path.pop()
def get_media(self) -> List[ContentManager.Media]:
if self.id is None:
raise Exception("Object needs to have ID")
self.process(self.id)
return self.media
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
del data['media']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlaylistItem:
def __init__(self, playlist: ContentManager.Playlist, json: Optional[Dict[str, Any]]) -> None:
json = json if not json is None else {}
self.playlist = playlist
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
media_id = get_id(json.get('media'))
sub_playlist_id = get_id(json.get('subplaylist'))
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.auditItem: Optional[bool] = json.get('auditItem')
self.conditions: List[ContentManager.Playlist.PlaylistItem.Condition] = [ContentManager.Playlist.PlaylistItem.Condition(self, elem) for elem in json.get('conditions', [])]
self.disabled: Optional[bool] = json.get('disabled')
self.duration: Optional[int] = json.get('duration')
self.durationHoursSeconds: Optional[str] = json.get('durationHoursSeconds')
self.endValidDate: Optional[str] = json.get('endValidDate')
self.id: Optional[int] = json.get('id')
self.inPoint: Optional[int] = json.get('inPoint')
self.media: Optional[ContentManager.Media] = self.playlist.cm.media.get(media_id)
self.meetAllConditions: Optional[bool] = json.get('meetAllConditions')
self.options: List[ContentManager.Playlist.PlaylistItem.Option] = [ContentManager.Playlist.PlaylistItem.Option(self, elem) for elem in json.get('options', [])]
self.outPoint: Optional[int] = json.get('outPoint')
self.playFullscreen: Optional[bool] = json.get('playFullscreen')
self.playlistItemType: Optional[str] = json.get('playlistItemType')
self.prettifyInPoint: Optional[str] = json.get('prettifyInPoint')
self.prettifyOutPoint: Optional[str] = json.get('prettifyOutPoint')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.startValidDate: Optional[str] = json.get('startValidDate')
self.status: Optional[List[str]] = json.get('status')
self.subPlaylistPickPolicy: Optional[int] = json.get('subPlaylistPickPolicy')
self.subplaylist: Optional[int] = sub_playlist_id
self.timeSchedules: List[ContentManager.Playlist.PlaylistItem.Schedule] = [ContentManager.Playlist.PlaylistItem.Schedule(self, elem) for elem in json.get('timeSchedules', [])]
self.useValidRange: Optional[bool] = json.get('useValidRange')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlist']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Schedule:
def __init__(self, playlistitem: ContentManager.Playlist.PlaylistItem, json: Dict[str, Any]) -> None:
self.playlistitem = playlistitem
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.days: Optional[List[str]] = json.get('days')
self.endTime: Optional[str] = json.get('endTime')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.startTime: Optional[str] = json.get('startTime')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlistitem']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Option:
def __init__(self, playlistitem: ContentManager.Playlist.PlaylistItem, json: Dict[str, Any]) -> None:
self.playlistitem = playlistitem
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.key: Optional[str] = json.get('key')
self.value: Optional[str] = json.get('value')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlistitem']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Condition:
def __init__(self, playlistitem: ContentManager.Playlist.PlaylistItem, json: Dict[str, Any]) -> None:
self.playlistitem = playlistitem
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.type: Optional[str] = json.get('type')
self.comparator: Optional[str] = json.get('comparator')
self.value: Optional[str] = json.get('value')
self.sortOrder: Optional[int] = json.get('sortOrder')
self.metadata: Optional[int] = get_id(json.get('metadata'))
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlistitem']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, playlist: ContentManager.Playlist, json: Optional[Dict[str, Any]]) -> None:
self.playlist = playlist
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['playlist']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class PlaylistList(MutableSequence[Playlist]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Playlist]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/playlists/all')
for elem in response.get('list', []):
item = ContentManager.Playlist(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Playlist]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Playlist with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Playlist) -> None:
self.__data.append(value)
""" RESOURCE """
class Resource:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.descritpion: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.implicitResources: Optional[List[str]] = json.get('implicitResources')
self.name: Optional[str] = json.get('name')
self.parentId: Optional[int] = json.get('parentId')
self.sortOrder: Optional[int] = json.get('sortOrder')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ResourceList(MutableSequence[Resource]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Resource]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/roles/resources')
for elem in response.get('resources', []):
item = ContentManager.Resource(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Resource]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Resource {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Resource) -> None:
self.__data.append(value)
""" ROLE """
class Role:
def __init__(self, cm: ContentManager, json: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.availableSeats: Optional[int] = json.get('availableSeats')
self.id: Optional[int] = json.get('id')
self.licenseRequirement: Optional[str] = json.get('licenseRequirement')
self.name: Optional[str] = json.get('name')
self.resources: List[ContentManager.Resource] = get_list(json.get('resources'), self.cm.resources)
self.system: Optional[bool] = json.get('system')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class RoleList(MutableSequence[Role]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Role]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/roles')
for elem in response.get('list', []):
item = ContentManager.Role(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Role]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Role with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Role) -> None:
self.__data.append(value)
""" TEMPLATE """
class Template:
def __init__(self, cm: ContentManager, data: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(data)
def unpack_json(self, json: Dict[str, Any]):
modified_user_id = get_id(json.get('modifiedBy'))
uploaded_user_id = get_id(json.get('uploadedBy'))
self.approvalDetail: ContentManager.Template.ApprovalDetail = ContentManager.Template.ApprovalDetail(self, json.get('approvalDetail'))
self.approvalStatus: Optional[str] = json.get('approvalStatus')
self.archived: Optional[bool] = json.get('archived')
self.audioDucking: Optional[bool] = json.get('audioDucking')
self.campaignMedia: Optional[bool] = json.get('campaignMedia')
self.categories: List[ContentManager.Category] = get_list(json.get('categories'), self.cm.categories)
self.createdDate: Optional[str] = json.get('createdDate')
self.downloadPath: Optional[str] = json.get('downloadPath')
self.generatingThumbnail: Optional[bool] = json.get('generatingThumbnail')
self.globalTemplateFields: List[ContentManager.Template.Field] = [ContentManager.Template.Field(self, elem) for elem in json.get('globalTemplateFields', [])]
self.height: Optional[int] = json.get('height')
self.id: Optional[int] = json.get('id')
self.lastModified: Optional[str] = json.get('lastModified')
self.length: Optional[int] = json.get('length')
self.mediaId: Optional[int] = json.get('mediaId')
self.mediaItemFiles: List[ContentManager.Template.ItemFile] = [ContentManager.Template.ItemFile(self, elem) for elem in json.get('mediaItemFiles', [])]
self.mediaType: Optional[str] = json.get('mediaType')
self.messagesCount: int = json.get('messagesCount', 0)
self.modifiedBy: Optional[ContentManager.User] = self.cm.users.get(modified_user_id)
self.name: Optional[str] = json.get('name')
self.neverArchive: Optional[bool] = json.get('neverArchive')
self.numberOfFields: int = json.get('numberOfFields', 0)
self.numberOfFiles: int = json.get('numberOfFiles', 0)
self.originalCreatedDate: Optional[str] = json.get('originalCreatedDate')
self.path: Optional[str] = json.get('path')
self.playFullscreen: Optional[bool] = json.get('playFullscreen')
self.playlistsCount: int = json.get('playlistsCount', 0)
self.prettifyDuration: Optional[str] = json.get('prettifyDuration')
self.prettifyLength: Optional[str] = json.get('prettifyLength')
self.prettifyType: Optional[str] = json.get('prettifyType')
self.readOnly: Optional[bool] = json.get('readOnly')
self.revision: Optional[int] = json.get('revision')
self.startValidDate: Optional[str] = json.get('startValidDate')
self.status: Optional[str] = json.get('status')
self.templateFields: List[ContentManager.Template.Field] = [ContentManager.Template.Field(self, elem) for elem in json.get('templateFields', [])]
self.templateVersion: Optional[int] = json.get('templateVersion')
self.templatesCount: int = json.get('templatesCount', 0)
self.thumbnailDownloadPaths: ContentManager.Template.ThumbnailDownloadPaths = ContentManager.Template.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
self.uploadType: Optional[str] = json.get('uploadType')
self.uploadedBy: Optional[ContentManager.User] = self.cm.users.get(uploaded_user_id)
self.validDateStatus: Optional[str] = json.get('validDateStatus')
self.webDavPath: Optional[str] = json.get('webDavPath')
self.width: Optional[int] = json.get('width')
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, template: ContentManager.Template, json: Optional[Dict[str, Any]]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ApprovalDetail:
def __init__(self, template: ContentManager.Template, json: Optional[Dict[str, Any]]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.approvalStatus: Optional[str] = json.get('approvalStatus')
user_id = get_id(json.get('user'))
self.user: Optional[ContentManager.User] = self.template.cm.users.get(user_id)
to_approve_id = get_id(json.get('toApprove'))
self.toApprove: Optional[ContentManager.User] = self.template.cm.users.get(to_approve_id)
by_approve_id = get_id(json.get('approvedBy'))
self.approvedBy: Optional[ContentManager.User] = self.template.cm.users.get(by_approve_id)
self.messageText: Optional[str] = json.get('messageText')
self.lastModified: Optional[str] = json.get('lastModified')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ItemFile:
def __init__(self, template: ContentManager.Template, json: Dict[str, Any]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.filename: Optional[str] = json.get('filename')
self.size: Optional[int] = json.get('size')
self.prettifySize: Optional[str] = json.get('prettifySize')
self.uploadDate: Optional[str] = json.get('uploadDate')
self.version: Optional[int] = json.get('version')
self.downloadPath: Optional[str] = json.get('downloadPath')
self.originalFilename: Optional[str] = json.get('originalFilename')
self.status: Optional[str] = json.get('status')
self.uploadedBy: Optional[str] = json.get('uploadedBy')
self.md5: Optional[str] = json.get('md5')
self.thumbnailDownloadPaths: ContentManager.Template.ItemFile.ThumbnailDownloadPaths = ContentManager.Template.ItemFile.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, itemfile: ContentManager.Template.ItemFile, json: Optional[Dict[str, Any]]) -> None:
self.itemfile = itemfile
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['itemfile']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Field:
def __init__(self, template: ContentManager.Template, json: Optional[Dict[str, Any]]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.displayName: Optional[str] = json.get('displayName')
self.value: Optional[str] = json.get('value')
self.required: Optional[bool] = json.get('required')
self.type: Optional[str] = json.get('type')
self.editable: Optional[bool] = json.get('editable')
self.maxCharacters: Optional[int] = json.get('maxCharacters')
self.maxLines: Optional[int] = json.get('maxLines')
self.useDefault: Optional[bool] = json.get('useDefault')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Page:
def __init__(self, template: ContentManager.Template, json: Optional[Dict[str, Any]]) -> None:
self.template = template
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.editable: Optional[bool] = json.get('editable')
self.thumbnailPageEnabled: Optional[bool] = json.get('thumbnailPageEnabled')
self.order: Optional[int] = json.get('order')
self.thumbnailPageNo: Optional[int] = json.get('thumbnailPageNo')
self.thumbnailDownloadPaths: ContentManager.Template.Page.ThumbnailDownloadPaths = ContentManager.Template.Page.ThumbnailDownloadPaths(self, json.get('thumbnailDownloadPaths'))
self.templateFields: List[ContentManager.Template.Page.Field] = [ContentManager.Template.Page.Field(self, elem) for elem in json.get('templateFields', [])]
self.idents: List[ContentManager.Template.Page.Ident] = [ContentManager.Template.Page.Ident(self, elem) for elem in json.get('idents', [])]
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['template']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class ThumbnailDownloadPaths:
def __init__(self, page: ContentManager.Template.Page, json: Optional[Dict[str, Any]]) -> None:
self.page = page
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.extraSmall: Optional[str] = json.get('extraSmall')
self.small: Optional[str] = json.get('small')
self.mediumSmall: Optional[str] = json.get('mediumSmall')
self.medium: Optional[str] = json.get('medium')
self.large: Optional[str] = json.get('large')
### UNUSED BY FIRST
self.custom: Optional[str] = json.get('custom')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['page']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Field:
def __init__(self, page: ContentManager.Template.Page, json: Optional[Dict[str, Any]]) -> None:
self.page = page
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.displayName: Optional[str] = json.get('displayName')
self.value: Optional[str] = json.get('value')
self.required: Optional[bool] = json.get('required')
self.type: Optional[str] = json.get('type')
self.editable: Optional[bool] = json.get('editable')
self.maxCharacters: Optional[int] = json.get('maxCharacters')
self.maxLines: Optional[int] = json.get('maxLines')
self.useDefault: Optional[bool] = json.get('useDefault')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['page']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class Ident:
def __init__(self, page: ContentManager.Template.Page, json: Optional[Dict[str, Any]]) -> None:
self.page = page
json = json if not json is None else {}
self.unpack_json(json)
def unpack_json(self, json: Dict[str, Any]):
self.id: Optional[int] = json.get('id')
self.label: Optional[str] = json.get('label')
self.languageCode: Optional[str] = json.get('languageCode')
self.description: Optional[str] = json.get('description')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['page']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class TemplateList(MutableSequence[Template]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Template]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/templates')
for elem in response.get('list', []):
item = ContentManager.Template(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Template]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
logging.info(f'Template with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Template) -> None:
self.__data.append(value)
""" USER """
class User:
def __init__(self, cm: ContentManager, data: Dict[str, Any]) -> None:
self.cm = cm
self.unpack_json(data)
def unpack_json(self, json: Dict[str, Any]):
self.authenticationMethod: Optional[str] = json.get('auhtenticationMethod')
self.canChangePassword: Optional[bool] = json.get('canChangePassword')
self.dateFormat: Optional[str] = json.get('dateFormat')
self.emailaddress: Optional[str] = json.get('emailaddress')
self.enabled: Optional[bool] = json.get('enabled')
self.firstname: Optional[str] = json.get('firstname')
self.forcePasswordChange: Optional[bool] = json.get('forcePasswordChange')
self.id: Optional[int] = json.get('id')
self.isAutoMediaApprover: Optional[bool] = json.get('isAutoMediaApprover')
self.isAutoMessageApprover: Optional[bool] = json.get('isAutoMessageApprover')
self.isSuperAdministrator: Optional[bool] = json.get('isSuperAdministrator')
self.isWebserviceUser: Optional[bool] = json.get('isWebserviceUser')
self.language: Optional[str] = json.get('language')
self.languageCode: Optional[str] = json.get('languageCode')
self.lastLogin: Optional[str] = json.get('lastLogin')
self.lastname: Optional[str] = json.get('lastname')
self.name: Optional[str] = json.get('name')
self.oldPassword: Optional[str] = json.get('oldPassword')
self.password: Optional[str] = json.get('password')
self.passwordLastChanged: Optional[str] = json.get('passwordLastChanged')
self.receiveApprovalEmails: Optional[bool] = json.get('receiveApprovalEmails')
self.receiveEmailAlerts: Optional[bool] = json.get('receiveEmailAlerts')
self.roles: List[ContentManager.Role] = get_list(json.get('roles'), self.cm.roles)
self.theme: Optional[str] = json.get('theme')
self.timeFormat: Optional[str] = json.get('timeFormat')
self.userAccountWorkgroups: List[ContentManager.Workgroup] = get_list(json.get('userAccountWorkgroups'), self.cm.workgroups)
self.username: Optional[str] = json.get('username')
self.workgroup: Optional[int] = get_id(json.get('workgroup')) #DEPRECATED
self.workgroups: List[ContentManager.Workgroup] = get_list(json.get('workgroups'), self.cm.workgroups)
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
#testing
pass
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class UserList(MutableSequence[User]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.User]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/users')
for elem in response.get('list', []):
item = ContentManager.User(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.User]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.username == search:
return elem
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.username == search:
return elem
logging.info(f'User with {search} not found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.User) -> None:
self.__data.append(value)
""" WORKGROUP """
class Workgroup:
def __init__(self, cm: ContentManager, data: Optional[Dict[str, Any]]) -> None:
self.cm = cm
data = data if not data is None else {}
self.unpack_json(data)
def unpack_json(self, json: Dict[str, Any]):
self.children: List[ContentManager.Workgroup] = [ContentManager.Workgroup(self.cm, elem) for elem in json.get('children', [])]
self.description: Optional[str] = json.get('description')
self.id: Optional[int] = json.get('id')
self.name: Optional[str] = json.get('name')
self.owner: Optional[bool] = json.get('owner')
self.parentId: Optional[int] = json.get('parentId')
self.parentName: Optional[str] = json.get('parentName')
self.userCount: Optional[int] = json.get('userCount')
def json(self, **kwargs: bool):
data = vars(self)
data = data.copy()
del data['cm']
for name, use in kwargs.items():
if name == 'player_update' and use:
data = {k:v for k,v in data.items() if k in ['id']}
for k, v in data.items():
try:
if isinstance(data[k], list):
for i, elem in enumerate(data[k]):
try:
data[k][i] = elem.json(**kwargs)
except Exception:
continue
else:
data[k] = v.json(**kwargs)
except Exception:
continue
data = clean_data(data)
if data is None:
return data
return data
class WorkgroupList(MutableSequence[Workgroup]):
def __init__(self, cm: ContentManager, init_list: Optional[List[ContentManager.Workgroup]] = None) -> None:
super().__init__()
self.cm = cm
if init_list is None:
self.__get_data()
else:
self.__data = init_list
def __get_data(self):
response: Dict[str, Any] = self.cm.request('get', '/workgroups')
for elem in response.get('list', []):
item = ContentManager.Workgroup(self.cm, elem)
self.__data.append(item)
def get(self, search: Union[int, str, None]) -> Optional[ContentManager.Workgroup]:
if search is None:
return None
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
temp = search_children(search, elem.children, "id", "name", "children")
if not temp is None:
return temp
self.__get_data()
for elem in self.__data:
if isinstance(search, int):
if elem.id == search:
return elem
else:
if elem.name == search:
return elem
temp = search_children(search, elem.children, "id", "name", "children")
if not temp is None:
return temp
logging.info(f'Workgroup with {search} nof found')
return None
def __len__(self) -> int:
return len(self.__data)
def __iter__(self):
if len(self.__data) == 0:
self.__get_data()
for elem in self.__data:
yield elem
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__class__(self.cm, self.__data[i])
else:
return self.__data[i]
def __delitem__(self, i: int):
del self.__data[i]
def __setitem__(self, i: int, value):
self.__data[i] = value
def insert(self, i: int, value) -> None:
self.__data.insert(i, value)
def append(self, value: ContentManager.Workgroup) -> None:
self.__data.append(value)
|
/scala_wrapper-0.0.6-py3-none-any.whl/scala/content_manager/__init__.py
| 0.741206 | 0.171061 |
__init__.py
|
pypi
|
Cuckoo filters
--------------
A Cuckoo filter is a data structure for probabilistic set-membership
queries with a low false positive probability (FPP). As an improvement
over the classic Bloom filter, items can be added or removed into
Cuckoo filters at will. A Cuckoo filter also utilizes space more
efficiently.
Cuckoo filters were originally described in:
Fan, B., Andersen, D. G., Kaminsky, M., & Mitzenmacher, M. D. (2014, December).
Cuckoo filter: Practically better than bloom.
In Proceedings of the 10th ACM International on Conference on emerging Networking
Experiments and Technologies (pp. 75-88). ACM.
This package implements the basic Cuckoo filter as well as several
derivations built on top of it.
Installation
------------
The package can be installed from
[PyPI](https://pypi.org/project/scalable-cuckoo-filter)
```
pip install scalable-cuckoo-filter
```
Classic Cuckoo filter
---------------------
```python
import math
from random import randrange
from cuckoo.filter import CuckooFilter
capacity = 1000000
error_rate = 0.000001
# Create a classic Cuckoo filter with a fixed capacity of 1000000
# buckets
cuckoo = CuckooFilter(capacity=capacity, error_rate=error_rate)
bucket_size = 6
# Setting the bucket size is optional, the bigger the bucket,
# the more number of items a filter can hold, and the longer
# the fingerprint needs to be to stay at the same error rate
cuckoo = CuckooFilter(capacity=capacity, error_rate=error_rate, bucket_size=bucket_size)
# The fingerprint length is computed using the following formula:
fingerprint_size = int(math.ceil(math.log(1.0 / error_rate, 2) + math.log(2 * bucket_size, 2)))
for _ in range(1, 100000):
item = str(randrange(1, 1000000000))
cuckoo.insert(item)
if cuckoo.contains(item):
print '{} has been added'.format(item)
cuckoo.delete(item)
if not cuckoo.contains(item):
print '{} has been removed'.format(item)
```
Bitarray Cuckoo filter
----------------------
A classic Cuckoo filter is implemented using a Python list of Bucket
objects. Each Bucket, again, stores a fixed list of fingerprints. The
implementation is easy and straightforward but unnecessary wasteful
for applications that needs to keep hundreds of millions of items.
The bitarray Cuckoo filter is built to deal with such situation. The
whole filter is compressed into a bitarray to minimize memory usage.
For example, a bitarray Cuckoo filter with capacity of 100.000.000,
bucket size of 4, and error rate of 0.000001 will requires:
- 23-bit fingerprint, computed using the above formula.
- 9.200.000.000 bits = 1.08 GB = capacity * bucket size * fingerprint.
And it can theoretically store up to 400.000.000 items at full capacity.
```python
import math
from random import randrange
from cuckoo.filter import BCuckooFilter
capacity = 1000000
error_rate = 0.000001
# Create a bit array Cuckoo filter with a fixed capacity of 1000000
# buckets
cuckoo = BCuckooFilter(capacity=capacity, error_rate=error_rate)
bucket_size = 6
# Setting the bucket size is optional, the bigger the bucket,
# the more number of items a filter can hold, and the longer
# the fingerprint needs to be to stay at the same error rate
cuckoo = BCuckooFilter(capacity=capacity, error_rate=error_rate, bucket_size=bucket_size)
# The fingerprint length is computed using the following formula:
fingerprint_size = int(math.ceil(math.log(1.0 / error_rate, 2) + math.log(2 * bucket_size, 2)))
for _ in range(1, 100000):
item = str(randrange(1, 1000000000))
cuckoo.insert(item)
if cuckoo.contains(item):
print '{} has been added'.format(item)
cuckoo.delete(item)
if not cuckoo.contains(item):
print '{} has been removed'.format(item)
```
Scalable Cuckoo filter
----------------------
Having a fix capacity is a problem when using Cuckoo filter in practice.
Allocating too much capacity and it goes to waste while allocating too
little and it degrades the filter performance and causes FP. Therefore,
the scalable Cuckoo filter is introduced as an attempt to solve the fix
capacity issue.
Inspired by Scalable Bloom filter, Scalable Cuckoo filter utilizes
multiple filters to scale the capacity dynamically. When an existing
filter approaches its capacity, a new one, double in size, will be
created. A scalable Cuckoo filter will handle all usual operations
seamlessly and transparently.
Internally, scalable Cuckoo filter uses bitarray Cuckoo filter for
efficiency although it can be changed easily.
```python
import math
from random import randrange
from cuckoo.filter import ScalableCuckooFilter
initial_capacity = 1000000
error_rate = 0.000001
# Create a scalable Cuckoo filter with an initial capacity of 1000000
# buckets
cuckoo = ScalableCuckooFilter(initial_capacity=initial_capacity, error_rate=error_rate)
bucket_size = 6
# Setting the bucket size is optional, the bigger the bucket,
# the more number of items a filter can hold, and the longer
# the fingerprint needs to be to stay at the same error rate
cuckoo = ScalableCuckooFilter(initial_capacity=initial_capacity, error_rate=error_rate, bucket_size=bucket_size)
# The fingerprint length is computed using the following formula:
fingerprint_size = int(math.ceil(math.log(1.0 / error_rate, 2) + math.log(2 * bucket_size, 2)))
for _ in range(1, 100000):
item = str(randrange(1, 1000000000))
cuckoo.insert(item)
if cuckoo.contains(item):
print '{} has been added'.format(item)
cuckoo.delete(item)
if not cuckoo.contains(item):
print '{} has been removed'.format(item)
```
|
/scalable-cuckoo-filter-1.1.tar.gz/scalable-cuckoo-filter-1.1/README.md
| 0.614857 | 0.879147 |
README.md
|
pypi
|
import yaml
from marshmallow import Schema, fields, EXCLUDE, validates_schema
from marshmallow.exceptions import ValidationError
class ExcludeUnknownSchema(Schema):
""" Remove unknown keys from loaded dictionary
"""
class Meta:
""" Exclude unknown properties.
"""
unknown = EXCLUDE
class MetadataSchema(Schema):
""" Schema for a pipeline's metadata object.
"""
queue = fields.String(required=True,
description="Default queue for all pipeline tasks.",
example="default-queue-name")
processorQueue = fields.String(
required=False,
description="Default processor queue for all pipeline tasks.",
example="default-processor-queue-name",
default="celery"
)
maxRetry = fields.Integer(
required=False,
description="A number. Maximum number of retries before giving up. "
"A value of None means task will retry forever. "
"By default, this option is set to 3.",
default=3,
example=3)
maxTtl = fields.Integer(required=False,
description="The soft time limit, in seconds, "
"for this task. When not set the "
"workers default is used. The hard "
"time limit will be derived from this"
"field, by adding 10 seconds.",
default=60,
example=60)
retryBackoff = fields.Integer(
required=False,
description="A number. If this option is set , it is used as a delay"
" factor. For example, if this option is set to 3, the"
" first retry will delay 3 seconds, the second will delay"
" 6 seconds, the third will delay 12 seconds, the fourth"
" will delay 24 seconds, and so on. By default, this"
" option is set to False, and autoretries will not"
" be delayed.",
default=3,
example=3)
retryJitter = fields.Boolean(
required=False,
description="A boolean. Jitter is used to introduce randomness into "
"exponential backoff delays, to prevent all tasks in the "
"queue from being executed simultaneously. If this option "
"is set to True, the delay value calculated by "
"retry_backoff is treated as a maximum, and the actual "
"delay value will be a random number between zero and that "
"maximum. By default, this option is set to True.",
default=False,
example=True)
retryBackoffMax = fields.Integer(
required=False,
description="A boolean. Jitter is used to introduce randomness into "
"exponential backoff delays, to prevent all tasks in the "
"queue from being executed simultaneously. If this option "
"is set to True, the delay value calculated by "
"retry_backoff is treated as a maximum, and the actual "
"delay value will be a random number between zero and "
"that maximum. By default, this option is set to True.",
default=600,
example=600)
class TaskDefinitionsSchema(ExcludeUnknownSchema):
""" Schema for a single task's configuration
"""
handler = fields.String(required=True,
description="Path to the worker task definition",
example="client.workers.my_task")
maxTtl = fields.Integer(required=False,
description="Max TTL for a task in seconds.",
default=60,
example=60)
queue = fields.String(required=False,
description="Non-default queue for this task.",
example="custom-queue-name")
class PipelineConfigSchemaV1(Schema):
""" Overall pipeline configuration schema
"""
metadata = fields.Nested(
MetadataSchema,
required=True,
description="Metadata and configuration information for this pipeline."
)
dagAdjacency = fields.Dict(
keys=fields.String(
required=True,
description=
"Task's node name. *MUST* match key in taskDefinitions dict.",
example="node_a"),
values=fields.List(
fields.String(
required=True,
description=
"Task's node name. *Must* match key in taskDefinitions dict.")
),
required=True,
description="The DAG Adjacency definition.")
taskDefinitions = fields.Dict(
keys=fields.String(
required=True,
description=
"Task's node name. *Must* match related key in dagAdjacency.",
example="node_a"),
values=fields.Nested(
TaskDefinitionsSchema,
required=True,
description="Definition of each task in the pipeline.",
example={
'handler': 'abc.task',
'maxRetry': 1
}),
required=True,
description="Configuration for each node defined in DAG.")
class BasePipelineSchema(ExcludeUnknownSchema):
__schema_version__ = None
name = fields.String(required=True, description="Pipeline name")
description = fields.String(required=False, missing=None,
description="Description of the pipeline.",
example="A valuable pipeline.")
schemaVersion = fields.Integer(required=True)
config = fields.Dict(required=True)
@classmethod
def get_by_version(cls, version):
for subclass in cls.__subclasses__():
if subclass.__schema_version__ == version:
return subclass
return None
@classmethod
def get_latest(cls):
max_version = 0
max_class = None
for subclass in cls.__subclasses__():
if subclass.__schema_version__ > max_version:
max_version = max_version
max_class = subclass
return max_class
@validates_schema
def validate_pipeline(self, data, **kwargs):
schema_version = data['schemaVersion']
PipelineSchema = BasePipelineSchema.get_by_version(schema_version)
schema = PipelineSchema(exclude=['name', 'description'])
schema.load(data)
class PipelineSchemaV1(BasePipelineSchema):
__schema_version__ = 1
class Meta:
unknown = EXCLUDE
config = fields.Nested(
PipelineConfigSchemaV1,
required=True,
description="Metadata and configuration information for this pipeline."
)
def validate_pipeline(self, data, **kwargs):
# We need to add this function to avoid infinite recursion since
# the BasePipelineSchema class above uses the same method for
# validation
pass
class PipelineConfigValidator(object):
""" Validate a pipeline configuration.
This is stored as a string in the database under `PipelineConfig.config`
in order to keep it easy for custom features to be added over time.
This model represents the required / valid features so we can
programmatically validate when saving, updating, viewing.
"""
def __init__(self, config_dict: dict = None, config_yaml: str = None,
schema_version: int = None):
super().__init__()
# We validate this as a dictionary. Turn into dictionary if provided
# as yaml.
if config_dict is not None:
self.config = config_dict
elif config_yaml is not None:
self.config = yaml.safe_load(config_yaml)
if schema_version is None:
PipelineSchema = BasePipelineSchema.get_latest()
else:
PipelineSchema = BasePipelineSchema.get_by_version(schema_version)
self.is_valid = False
self.validated_config = {}
self.validation_errors = {}
try:
# https://github.com/marshmallow-code/marshmallow/issues/377
# See issue above when migrating to marshmallow 3
pcs = PipelineSchema._declared_fields['config'].schema
self.validated_config = pcs.load(self.config)
self.is_valid = True
except ValidationError as e:
self.validation_errors = e.messages
raise e
except Exception as e:
raise e
|
/scalable-pypeline-1.2.3.tar.gz/scalable-pypeline-1.2.3/pypeline/pipeline_config_schema.py
| 0.827654 | 0.262074 |
pipeline_config_schema.py
|
pypi
|
import re
import os
import logging
import pkg_resources
import yaml
from yaml.loader import SafeLoader
from marshmallow import Schema, fields, pre_load, EXCLUDE, INCLUDE,\
validates_schema
from marshmallow.validate import OneOf
from marshmallow.exceptions import ValidationError
from pypeline.utils.module_utils import SermosModuleLoader, normalized_pkg_name
from pypeline.constants import SERMOS_YAML_PATH, SERMOS_CLIENT_PKG_NAME
from pypeline.pipeline_config_schema import BasePipelineSchema
from pypeline.schedule_config_schema import BaseScheduleSchema
logger = logging.getLogger(__name__)
class InvalidPackagePath(Exception):
pass
class InvalidSermosConfig(Exception):
pass
class MissingSermosConfig(Exception):
pass
class ExcludeUnknownSchema(Schema):
class Meta:
unknown = EXCLUDE
class NameSchema(Schema):
""" Validated name string field.
"""
name = fields.String(
required=True,
description="Name for service or image. Must include "
"only alphanumeric characters along with `_` and `-`.",
example="my-service-name")
@pre_load
def validate_characters(self, item, **kwargs):
""" Ensure name field conforms to allowed characters
"""
valid_chars = r'^[\w\d\-\_]+$'
if not bool(re.match(valid_chars, item['name'])):
raise ValueError(
f"Invalid name: {item['name']}. Only alphanumeric characters "
"allowed along with `-` and `_`.")
return item
class SermosRegisteredTaskDetailConfigSchema(Schema):
handler = fields.String(
required=True,
description="Full path to the Method handles work / pipeline tasks.",
example="sermos_customer_client.workers.worker_group.useful_worker")
event = fields.Raw(
required=False,
unknown=INCLUDE,
description="Arbitrary user data, passed through `event` arg in task.")
class SermosCeleryWorkerConfigSchema(Schema):
""" Attributes for a celery worker. This worker will run all of the
pipelines and scheduled tasks.
"""
registeredTasks = fields.List(
fields.Nested(SermosRegisteredTaskDetailConfigSchema, required=True),
required=False,
_required=True,
description="List of task handlers to register for to your Sermos app."
)
class SermosServiceConfigSchema(ExcludeUnknownSchema,
SermosCeleryWorkerConfigSchema, NameSchema):
""" Base service config object definition for workers.
"""
pass
class SermosYamlSchema(ExcludeUnknownSchema):
""" The primary `sermos.yaml` file schema. This defines all available
properties in a valid Sermos configuration file.
"""
serviceConfig = fields.List(
fields.Nested(SermosServiceConfigSchema,
required=True,
description="Core service configuration."),
description="List of workers for Sermos to manage.",
required=True)
pipelines = fields.Dict(keys=fields.String(),
values=fields.Nested(BasePipelineSchema),
description="List of pipelines",
required=False)
scheduledTasks = fields.Dict(keys=fields.String(),
values=fields.Nested(BaseScheduleSchema),
description="List of scheduled tasks",
required=False)
def validate_errors(self, schema: Schema, value: dict):
""" Run Marshmallow validate() and raise if any errors
"""
schema = schema()
errors = schema.validate(value)
if len(errors.keys()) > 0:
raise ValidationError(errors)
@validates_schema
def validate_schema(self, data, **kwargs):
""" Additional validation.
Nested fields that are not required are not validated by Marshmallow
by default. Do a single level down of validation for now.
imageConfig can provide *either* an install command for Sermos
to use to build the image for customer *or* a Docker repository
for Sermos to pull.
"""
# Vaidate nested
key_schema_pairs = (
('serviceConfig', SermosServiceConfigSchema),
)
for k_s in key_schema_pairs:
val = data.get(k_s[0], None)
if val is not None:
if type(val) == list:
for v in val:
self.validate_errors(k_s[1], v)
else:
self.validate_errors(k_s[1], val)
# Validate the services. We list every service schema field as not
# required in order to use them as mixins for a generic service object,
# however, they ARE required, so validate here using the custom
# metadata property `_required`. Default to value of `required`.
for service in data.get('serviceConfig'):
schema = SermosCeleryWorkerConfigSchema
for field in schema().fields:
try:
if schema().fields[field].metadata.get(
'_required',
getattr(schema().fields[field], 'required')):
assert field in service
except AssertionError:
raise ValidationError(
f"`{field}` missing in worker definition.")
# Validate unique pipeline ids
if 'pipelines' in data:
pipeline_ids = set()
for pipeline_id, pipeline_data in data['pipelines'].items():
if pipeline_id in pipeline_ids:
raise ValidationError("All pipeline ids must be unique!")
pipeline_ids.add(pipeline_id)
schema_version = pipeline_data['schemaVersion']
PipelineSchema = \
BasePipelineSchema.get_by_version(schema_version)
self.validate_errors(PipelineSchema, pipeline_data)
# Validate unique scheduled tasks names
if 'scheduledTasks' in data:
task_ids = set()
for task_id, task_data in data['scheduledTasks'].items():
if task_id in task_ids:
raise ValidationError("All schedule ids must be unique!")
task_ids.add(task_id)
schema_version = task_data['schemaVersion']
TaskSchema = BaseScheduleSchema.get_by_version(schema_version)
self.validate_errors(TaskSchema, task_data)
class YamlPatternConstructor():
""" Adds a pattern resolver + constructor to PyYaml.
Typical/deault usage is for parsing environment variables
in a yaml file but this can be used for any pattern you provide.
See: https://pyyaml.org/wiki/PyYAMLDocumentation
"""
def __init__(self,
env_var_pattern: str = None,
add_constructor: bool = True):
self.env_var_pattern = env_var_pattern
if self.env_var_pattern is None:
# Default pattern is: ${VAR:default}
self.env_var_pattern = r'^\$\{(.*)\}$'
self.path_matcher = re.compile(self.env_var_pattern)
if add_constructor:
self.add_constructor()
def _path_constructor(self, loader, node):
""" Extract the matched value, expand env variable,
and replace the match
TODO: Would need to update this (specifically the parsing) if any
pattern other than our default (or a highly compatible variation)
is provided.
"""
# Try to match the correct env variable pattern in this node's value
# If the value does not match the pattern, return None (which means
# this node will not be parsed for ENV variables and instead just
# returned as-is).
env_var_name = re.match(self.env_var_pattern, node.value)
try:
env_var_name = env_var_name.group(1)
except AttributeError:
return None
# If we get down here, then the 'node.value' matches our specified
# pattern, so try to parse. env_var_name is the value inside ${...}.
# Split on `:`, which is our delimiter for default values.
env_var_name_split = env_var_name.split(':')
# Attempt to retrieve the environment variable...from the environment
env_var = os.environ.get(env_var_name_split[0], None)
if env_var is None: # Nothing found in environment
# If a default was provided (e.g. VAR:default), return that.
# We join anything after first element because the default
# value might be a URL or something with a colon in it
# which would have 'split' above
if len(env_var_name_split) > 1:
return ":".join(env_var_name_split[1:])
return 'unset' # Return 'unset' if not in environ nor default
return env_var
def add_constructor(self):
""" Initialize PyYaml with ability to resolve/load environment
variables defined in a yaml template when they exist in
the environment.
Add to SafeLoader in addition to standard Loader.
"""
# Add the `!env_var` tag to any scalar (value) that matches the
# pattern self.path_matcher. This allows the template to be much more
# intuitive vs needing to add !env_var to the beginning of each value
yaml.add_implicit_resolver('!env_var', self.path_matcher)
yaml.add_implicit_resolver('!env_var',
self.path_matcher,
Loader=SafeLoader)
# Add constructor for the tag `!env_var`, which is a function that
# converts a node of a YAML representation graph to a native Python
# object.
yaml.add_constructor('!env_var', self._path_constructor)
yaml.add_constructor('!env_var',
self._path_constructor,
Loader=SafeLoader)
def parse_config_file(sermos_yaml: str):
""" Parse the `sermos.yaml` file when it's been loaded.
Arguments:
sermos_yaml (required): String of loaded sermos.yaml file.
"""
YamlPatternConstructor() # Add our env variable parser
try:
sermos_yaml_schema = SermosYamlSchema()
# First suss out yaml issues
sermos_config = yaml.safe_load(sermos_yaml)
# Then schema issues
sermos_config = sermos_yaml_schema.load(sermos_config)
except ValidationError as e:
msg = "Invalid Sermos configuration due to {}"\
.format(e.messages)
logger.error(msg)
raise InvalidSermosConfig(msg)
except Exception as e:
msg = "Invalid Sermos configuration, likely due to invalid "\
"YAML formatting ..."
logger.exception("{} {}".format(msg, e))
raise InvalidSermosConfig(msg)
return sermos_config
def _get_pkg_name(pkg_name: str) -> str:
""" Retrieve the normalized package name.
"""
if pkg_name is None:
pkg_name = SERMOS_CLIENT_PKG_NAME # From environment
if pkg_name is None:
return None
return normalized_pkg_name(pkg_name)
def load_sermos_config(pkg_name: str = None,
sermos_yaml_filename: str = None,
as_dict: bool = True):
""" Load and parse the `sermos.yaml` file. Issue usable exceptions for
known error modes so bootstrapping can handle appropriately.
Arguments:
pkg_name (required): Directory name for your Python
package. e.g. my_package_name . If none provided, will check
environment for `SERMOS_CLIENT_PKG_NAME`. If not found,
will exit.
sermos_yaml_filename (optional): Relative path to find your
`sermos.yaml` configuration file. Defaults to `sermos.yaml`
which should be found inside your `pkg_name`
as_dict (optional): If true (default), return the loaded sermos
configuration as a dictionary. If false, return the loaded
string value of the yaml file.
"""
if sermos_yaml_filename is None:
sermos_yaml_filename = SERMOS_YAML_PATH
logger.info(f"Loading `sermos.yaml` from package `{pkg_name}` "
f"and file location `{sermos_yaml_filename}` ...")
sermos_config = None
pkg_name = _get_pkg_name(pkg_name)
if pkg_name is None: # Nothing to retrieve at this point
logger.warning("Unable to retrieve sermos.yaml configuration ...")
return sermos_config
try:
sermos_config_path = pkg_resources.resource_filename(
pkg_name, sermos_yaml_filename)
except Exception as e:
msg = "Either pkg_name ({}) or sermos_yaml_filename ({}) is "\
"invalid ...".format(pkg_name, sermos_yaml_filename)
logger.error("{} ... {}".format(msg, e))
raise InvalidPackagePath(e)
try:
with open(sermos_config_path, 'r') as f:
sermos_yaml = f.read()
sermos_config = parse_config_file(sermos_yaml)
except InvalidSermosConfig as e:
raise
except FileNotFoundError as e:
msg = "Sermos config file could not be found at path {} ...".format(
sermos_config_path)
raise MissingSermosConfig(msg)
except Exception as e:
raise e
if as_dict:
return sermos_config
return yaml.safe_dump(sermos_config)
def load_client_config_and_version(pkg_name: str = None,
sermos_yaml_filename: str = None):
""" Load and parse the `sermos.yaml` file and a client package's version.
Arguments:
pkg_name (required): Directory name for your Python
package. e.g. my_package_name . If none provided, will check
environment for `SERMOS_CLIENT_PKG_NAME`. If not found,
will exit.
sermos_yaml_filename (optional): Relative path to find your
`sermos.yaml` configuration file. Defaults to `sermos.yaml`
which should be found inside your `pkg_name`
as_dict (optional): If true (default), return the loaded sermos
configuration as a dictionary. If false, return the loaded
string value of the yaml file.
For this to work properly, the provided package must be installed in the
same environment as this Sermos package and it must have a `__version__`
variable inside its `__init__.py` file, e.g. `__version__ = '0.0.0'`
"""
sermos_config = None
client_version = None
pkg_name = _get_pkg_name(pkg_name)
try:
loader = SermosModuleLoader()
pkg = loader.get_module(pkg_name + '.__init__')
client_version = getattr(pkg, '__version__', '0.0.0')
sermos_config = load_sermos_config(pkg_name, sermos_yaml_filename)
except MissingSermosConfig as e:
logger.error(e)
except InvalidSermosConfig as e:
logger.error(e)
except InvalidPackagePath as e:
logger.error(e)
except Exception as e:
logger.error("Unable to load client's pkg __version__ or "
"{} config file for package: {} ... {}".format(
sermos_yaml_filename, pkg_name, e))
return sermos_config, client_version
|
/scalable-pypeline-1.2.3.tar.gz/scalable-pypeline-1.2.3/pypeline/sermos_yaml.py
| 0.615666 | 0.182881 |
sermos_yaml.py
|
pypi
|
import os
from boto3 import Session
import logging
logger = logging.getLogger(__name__)
class KeyGenerator(object):
""" Common functions for key generators.
"""
def __init__(self):
super(KeyGenerator, self).__init__()
self.hidden_files = ('.DS_Store', '.git', 'Icon', '.Dropbox')
def get_file_key(self, file_obj):
""" Required for each specific generator - how to extract key
"""
return file_obj
def get_file_name(self, file_obj):
""" Required for each specific generator - how to extract file name
"""
return file_obj
def get_file_size(self, base_path, file_obj):
""" Required for each specific generator - how to find file size (BYTES)
"""
return 0
def get_final_path(self, base_path, file_name, return_full_path):
""" Required for each specific generator - create final file path that
is added to list.
"""
if return_full_path:
return os.path.normpath(base_path + '/' + file_name)
return file_name
def list_iterator(self, all_files, base_path, limit=None, offset=None,
size_limit=None, return_full_path=True,
skip_common_hidden=True):
""" accept vars from everywhere to handle offset/limit/size logic
"""
filtered_files = []
try:
# Compile list of all files within limit/offset if those exist
idx = -1
listed_files = 0
offset_reached = False
for f in all_files:
this_key = self.get_file_key(f)
this_filename = self.get_file_name(f)
if skip_common_hidden and this_filename in self.hidden_files:
continue
idx += 1
if offset and idx >= int(offset):
offset_reached = True
if (limit and listed_files >= int(limit))\
or (offset and not offset_reached):
continue
# Verify filesize. Having some issues with large PDFs (process
# simply killed). So allow option of skipping files above certain
# size in megabytes.
if size_limit is not None:
size_in_bytes = self.get_file_size(base_path, f)
if size_in_bytes > size_limit:
continue
filtered_files.append(
self.get_final_path(base_path, this_key, return_full_path)
)
listed_files += 1
except Exception as e:
logger.error("Unable to list objects: {0}".format(e))
return filtered_files
class S3KeyGenerator(KeyGenerator):
""" Produce a list of object keys from S3.
"""
def __init__(self, aws_access_key_id, aws_secret_access_key,
aws_region='us-east-1'):
super(S3KeyGenerator, self).__init__()
session = Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=aws_region
)
self.s3 = session.client('s3')
def get_file_key(self, file_obj):
""" Get file key from s3 object """
return file_obj.get('Key', None)
def get_file_name(self, file_obj):
""" Get file name from s3 object """
if file_obj is not None:
key = file_obj.get('Key', None)
if key is not None:
return key.split('/')[-1]
return None
def get_file_size(self, base_path, file_obj):
""" Return file size of s3 object """
return file_obj.get('Size', 0)
# All files in bucket
# Range of files with an offset
def list_files(self, bucket, folder='', limit=None, offset=None,
size_limit=None, return_full_path=True,
skip_common_hidden=True):
""" Lists files inside an S3 bucket+folder
Note: This does not guarantee any sort of order. Boto+S3 does not
provide an interface for sorting results, so that would need
to happen in memory.
limit will include a maximum of 'limit' values
offset will start including values only after 'offset' keys
size_limit will not include files over a specific size (in bytes)
skip_common_hidden will exclude common hidden files
return_full_path will include 'bucket/' in key.
"""
files = []
try:
file_data = self.s3.list_objects_v2(
Bucket=bucket, Delimiter='/', Prefix=folder)
files = self.list_iterator(
file_data['Contents'],
bucket,
limit=limit,
offset=offset,
size_limit=size_limit,
return_full_path=return_full_path,
skip_common_hidden=skip_common_hidden
)
except Exception as e:
logger.error("Unable to list objects: {0}".format(e))
return files
class LocalKeyGenerator(KeyGenerator):
""" Generic generator to produce a list of file names from filesystem.
"""
def __init__(self):
super(LocalKeyGenerator, self).__init__()
def get_file_key(self, file_obj):
""" Get file key from local object """
return file_obj
def get_file_name(self, file_obj):
""" Get file name from local object """
return file_obj
def get_file_size(self, base_path, file_obj):
""" Get file size from local object """
full_path = os.path.normpath(base_path + '/' + file_obj)
try:
return os.stat(full_path).st_size
except Exception as e:
logger.error("File {0} not found ...".format(full_path))
return 0
def list_files(self, folder_path, limit=None, offset=None,
size_limit=None, return_full_path=True,
skip_common_hidden=True):
""" Lists all file names inside a path.
skip_common_hidden will exclude common hidden files
return_full_path will include path in addition to filename
"""
files = []
try:
file_data = os.listdir(folder_path)
files = self.list_iterator(
file_data,
folder_path,
limit=limit,
offset=offset,
size_limit=size_limit,
return_full_path=return_full_path,
skip_common_hidden=skip_common_hidden
)
except Exception as e:
logger.error("Unable to list objects: {0}".format(e))
return files
|
/scalable-pypeline-1.2.3.tar.gz/scalable-pypeline-1.2.3/pypeline/generators.py
| 0.561335 | 0.158369 |
generators.py
|
pypi
|
import logging
import networkx as nx
from typing import List, Union
logger = logging.getLogger(__name__)
def get_execution_graph(
config: dict,
adjacency_key: str = 'dagAdjacency',
task_definitions_key: str = 'taskDefinitions') -> nx.DiGraph:
""" Generate a directed graph based on a pipeline config's adjacency list
and task definitions.
`dagAdjacency` is a dictionary containing all nodes and downstream
nodes.
`taskDefinitions` is a dictionary containing metadata required for
each node such as the worker, model version, etc. This metadata is
attached to each node so it can be retrieved directly from the graph.
"""
G = nx.DiGraph()
# Get our adjacency list and task definitions
adjacency_dict = config.get(adjacency_key, {})
task_definitions = config.get(task_definitions_key, {})
if len(adjacency_dict.keys()) == 0:
logger.warning('Adjacency definition `{}` was not found ...'.format(
adjacency_key))
# Build the graph
for node in adjacency_dict.keys():
adjacent_nodes = adjacency_dict[node]
# If no adjacent nodes, then this is a terminal node
if len(adjacent_nodes) == 0:
G.add_node(node, attr_dict=task_definitions.get(node, {}))
continue
# Otherwise, we'll add an edge from this node to all adjacent nodes
# and add the task defnition metadata to the edge
G.add_edges_from([(node, n, task_definitions.get(n, {}))
for n in adjacent_nodes])
return G
def find_entry_points(G: nx.DiGraph) -> List[str]:
""" Find the entrypoint(s) for this graph.
An entrypoint is one for which no predecessors exist.
"""
result = []
for node in G.nodes:
if len(list(G.predecessors(node))) == 0:
result.append(node)
return result
def find_successors(G: nx.DiGraph,
nodes: Union[List[str], str],
dedup: bool = True) -> Union[List[str], List[List[str]]]:
""" Find the next point(s) for graph node(s).
If dedeup is True (default), return a single list of deduplicated
values. This is useful when creating a task chain that is comprised
of groups that can execute concurrently. If two upstream tasks in the
chain each invoke the same downstream task later in the chain, then
there is no reason to run that downstream task twice.
Examples:
`G`:
t1:
- t3
t2:
- t3
- t4
t4:
- t5
`nodes`: [t1, t2]
Return with dedup==True: [t3, t4]
Return with dedup==False: [[t3], [t3, t4]]
"""
if type(nodes) != list:
nodes = [nodes]
successors = []
for node in nodes:
successors.append(list(G.successors(node)))
# Return as-is if we're not deduplicating.
if not dedup:
return successors
# Deduplicate the list of successors.
deduped_successors = []
for group in successors:
group = [group] if type(group) != list else group
for node in group:
if node not in deduped_successors:
deduped_successors.append(node)
successors = deduped_successors
return successors
def get_chainable_tasks(G: nx.DiGraph,
starting_nodes: List[str] = None,
graph_tasks: list = []) -> List[str]:
""" Recursive function to get a list of grouped nodes that can be used
in a task chain.
Recursive portion is for everything other than first entrypoint(s)
wherein we can re-call this method with the starting node(s) being the
nodes in the graph that are successors to the entrypoint(s), each
batch of starting nodes is a group, essentially, so return value is
something like:
[
[t1, t2],
[t3, t4],
[t5]
]
"""
if starting_nodes is None:
starting_nodes = find_entry_points(G)
graph_tasks.append(starting_nodes)
successors = find_successors(G, starting_nodes)
if len(successors) == 0:
return graph_tasks
graph_tasks.append(successors)
return get_chainable_tasks(G, successors, graph_tasks)
def find_all_nodes(G: nx.DiGraph) -> List[str]:
""" Get a list of all nodes in the graph.
"""
return list(G.nodes)
def find_all_edges(G: nx.DiGraph) -> List[str]:
""" Get a list of all edges in the graph.
"""
return list(G.edges)
|
/scalable-pypeline-1.2.3.tar.gz/scalable-pypeline-1.2.3/pypeline/utils/graph_utils.py
| 0.847021 | 0.592195 |
graph_utils.py
|
pypi
|
import pandas as pd
import rich
from rich.progress import Progress
from .exceptions import DataValidationError
from .exporter import Exporter
from .loader import Loader
from .steps import Step
from .validations import DataValidation
class DataPipeline:
def __init__(
self,
steps: list[Step],
loader: Loader,
exporter: Exporter,
name: str = "Unnamed pipeline",
validations: list[DataValidation] | None = None,
) -> None:
self.exporter = exporter
self.loader = loader
self.name = name
self._steps = steps
self._validations = validations or []
def run(self) -> None:
rich.print(f"\n\nRunning pipeline: [bold green]{self.name}[/bold green]")
dataset = self.loader.load()
with Progress() as progress:
for step in progress.track(self._steps, description="Apply steps..."):
dataset = step.process(dataset.copy())
progress.console.print(
f"Step [bold]{step.get_name()}[/bold] done with "
f"data shape: {dataset.shape[0]} rows, {dataset.shape[1]} columns"
)
output_data = self._apply_all_dtypes(dataset)
self._validate_data(output_data)
self.exporter.export(output_data)
def _validate_data(self, output_data: pd.DataFrame) -> None:
if not self._validations:
rich.print("No validation to apply, skipping...")
return
rich.print(f"Validating data with {len(self._validations)} validation(s)...")
with Progress() as progress:
for validation in progress.track(self._validations, description="Apply validations..."):
if not validation.is_valid(output_data):
raise DataValidationError(f"Validation {validation.get_name()} failed")
rich.print(f"Validation [bold]{validation.get_name()}[/bold] [green]passed[/green]")
def _apply_all_dtypes(self, data: pd.DataFrame) -> pd.DataFrame:
dtypes = {}
for step in self._steps:
dtypes |= step.get_dtypes()
dtypes = {column: dtype for column, dtype in dtypes.items() if column in data.columns}
return data.astype(dtypes)
|
/scalde_data_factory-0.0.1-py3-none-any.whl/data_factory/pipeline.py
| 0.616936 | 0.259843 |
pipeline.py
|
pypi
|
import os
import numpy as np
import pandas as pd
import scipy
from scipy.sparse import issparse
import torch
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
from torch.utils.data import DataLoader
from anndata import AnnData
import scanpy as sc
import episcanpy as epi
from sklearn.preprocessing import maxabs_scale, MaxAbsScaler
from glob import glob
np.warnings.filterwarnings('ignore')
DATA_PATH = os.path.expanduser("~")+'/.scalex/'
CHUNK_SIZE = 20000
def read_mtx(path):
"""
Read mtx format data folder including:
matrix file: e.g. count.mtx or matrix.mtx
barcode file: e.g. barcode.txt
feature file: e.g. feature.txt
"""
for filename in glob(path+'/*'):
if ('count' in filename or 'matrix' in filename or 'data' in filename) and ('mtx' in filename):
adata = sc.read_mtx(filename).T
for filename in glob(path+'/*'):
if 'barcode' in filename:
barcode = pd.read_csv(filename, sep='\t', header=None).iloc[:, -1].values
print(len(barcode), adata.shape[0])
if len(barcode) != adata.shape[0]:
adata = adata.transpose()
adata.obs = pd.DataFrame(index=barcode)
if 'gene' in filename or 'peaks' in filename or 'feature' in filename:
gene = pd.read_csv(filename, sep='\t', header=None).iloc[:, -1].values
if len(gene) != adata.shape[1]:
adata = adata.transpose()
adata.var = pd.DataFrame(index=gene)
return adata
def load_file(path):
"""
Load single cell dataset from file
"""
if os.path.exists(DATA_PATH+path+'.h5ad'):
adata = sc.read_h5ad(DATA_PATH+path+'.h5ad')
elif os.path.isdir(path): # mtx format
adata = read_mtx(path)
elif os.path.isfile(path):
if path.endswith(('.csv', '.csv.gz')):
adata = sc.read_csv(path).T
elif path.endswith(('.txt', '.txt.gz', '.tsv', '.tsv.gz')):
df = pd.read_csv(path, sep='\t', index_col=0).T
adata = AnnData(df.values, dict(obs_names=df.index.values), dict(var_names=df.columns.values))
elif path.endswith('.h5ad'):
adata = sc.read_h5ad(path)
else:
raise ValueError("File {} not exists".format(path))
if not issparse(adata.X):
adata.X = scipy.sparse.csr_matrix(adata.X)
adata.var_names_make_unique()
return adata
def load_files(root):
"""
Load single cell dataset from files
"""
if root.split('/')[-1] == '*':
adata = []
for root in sorted(glob(root)):
adata.append(load_file(root))
return AnnData.concatenate(*adata, batch_key='sub_batch', index_unique=None)
else:
return load_file(root)
def concat_data(
data_list,
batch_categories=None,
join='inner',
batch_key='batch',
index_unique=None,
save=None
):
"""
Concat multiple datasets
"""
if len(data_list) == 1:
return load_files(data_list[0])
elif isinstance(data_list, str):
return load_files(data_list)
adata_list = []
for root in data_list:
adata = load_files(root)
adata_list.append(adata)
if batch_categories is None:
batch_categories = list(map(str, range(len(adata_list))))
else:
assert len(adata_list) == len(batch_categories)
[print(b, adata.shape) for adata,b in zip(adata_list, batch_categories)]
concat = AnnData.concatenate(*adata_list, join=join, batch_key=batch_key,
batch_categories=batch_categories, index_unique=index_unique)
if save:
concat.write(save, compression='gzip')
return concat
def preprocessing_atac(
adata,
min_genes=200,
min_cells=0.01,
n_top_genes=30000,
target_sum=None,
chunk_size=CHUNK_SIZE,
log=None
):
"""
preprocessing
"""
print('Raw dataset shape: {}'.format(adata.shape))
if log: log.info('Preprocessing')
if not issparse(adata.X):
adata.X = scipy.sparse.csr_matrix(adata.X)
adata.X[adata.X>1] = 1
if log: log.info('Filtering cells')
sc.pp.filter_cells(adata, min_genes=min_genes)
if log: log.info('Filtering genes')
if min_cells < 1:
min_cells = min_cells * adata.shape[0]
sc.pp.filter_genes(adata, min_cells=min_cells)
if log: log.info('Finding variable features')
adata = epi.pp.select_var_feature(adata, nb_features=n_top_genes, show=False, copy=True)
if log: log.infor('Normalizing total per cell')
sc.pp.normalize_total(adata, target_sum=target_sum)
if log: log.info('Batch specific maxabs scaling')
adata.X = maxabs_scale(adata.X)
# adata = batch_scale(adata, chunk_size=chunk_size)
print('Processed dataset shape: {}'.format(adata.shape))
return adata
def batch_scale(adata, chunk_size=CHUNK_SIZE):
"""
Batch-specific scale data
"""
for b in adata.obs['batch'].unique():
idx = np.where(adata.obs['batch']==b)[0]
scaler = MaxAbsScaler(copy=False).fit(adata.X[idx])
for i in range(len(idx)//chunk_size+1):
adata.X[idx[i*chunk_size:(i+1)*chunk_size]] = scaler.transform(adata.X[idx[i*chunk_size:(i+1)*chunk_size]])
return adata
def reindex(adata, genes):
"""
Reindex AnnData with gene list
"""
idx = [i for i, g in enumerate(genes) if g in adata.var_names]
print('There are {} gene in selected genes'.format(len(idx)))
new_X = scipy.sparse.csr_matrix((adata.shape[0], len(genes)))
new_X[:, idx] = adata[:, genes[idx]].X
adata = AnnData(new_X, obs=adata.obs, var={'var_names':genes})
return adata
class SingleCellDataset(Dataset):
"""
Dataset for dataloader
"""
def __init__(self, adata):
self.adata = adata
self.shape = adata.shape
def __len__(self):
return self.adata.X.shape[0]
def __getitem__(self, idx):
x = self.adata.X[idx].toarray().squeeze()
domain_id = self.adata.obs['batch'].cat.codes[idx]
# return x, domain_id, idx
return x
def load_dataset(
data_list,
batch_categories=None,
join='inner',
batch_key='batch',
batch_name='batch',
min_genes=600,
min_cells=3,
n_top_genes=2000,
batch_size=64,
chunk_size=CHUNK_SIZE,
log=None,
transpose=False,
):
"""
Load dataset with preprocessing
"""
adata = concat_data(data_list, batch_categories, join=join, batch_key=batch_key)
if log: log.info('Raw dataset shape: {}'.format(adata.shape))
if batch_name!='batch':
adata.obs['batch'] = adata.obs[batch_name]
if 'batch' not in adata.obs:
adata.obs['batch'] = 'batch'
adata.obs['batch'] = adata.obs['batch'].astype('category')
adata = preprocessing_atac(
adata,
min_genes=min_genes,
min_cells=min_cells,
chunk_size=chunk_size,
log=log,
)
scdata = SingleCellDataset(adata) # Wrap AnnData into Pytorch Dataset
trainloader = DataLoader(
scdata,
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=4
)
# batch_sampler = BatchSampler(batch_size, adata.obs['batch'], drop_last=False)
testloader = DataLoader(scdata, batch_size=batch_size, drop_last=False, shuffle=False)
# testloader = DataLoader(scdata, batch_sampler=batch_sampler)
return adata, trainloader, testloader
|
/scale_atac-1.1.0-py3-none-any.whl/scale/dataset.py
| 0.460046 | 0.318426 |
dataset.py
|
pypi
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.optim.lr_scheduler import MultiStepLR, ExponentialLR, ReduceLROnPlateau
import time
import math
import numpy as np
from tqdm import tqdm, trange
from itertools import repeat
from sklearn.mixture import GaussianMixture
from .layer import Encoder, Decoder, build_mlp, DeterministicWarmup
from .loss import elbo, elbo_SCALE
class VAE(nn.Module):
def __init__(self, dims, bn=False, dropout=0, binary=True):
"""
Variational Autoencoder [Kingma 2013] model
consisting of an encoder/decoder pair for which
a variational distribution is fitted to the
encoder. Also known as the M1 model in [Kingma 2014].
:param dims: x, z and hidden dimensions of the networks
"""
super(VAE, self).__init__()
[x_dim, z_dim, encode_dim, decode_dim] = dims
self.binary = binary
if binary:
decode_activation = nn.Sigmoid()
else:
decode_activation = None
self.encoder = Encoder([x_dim, encode_dim, z_dim], bn=bn, dropout=dropout)
self.decoder = Decoder([z_dim, decode_dim, x_dim], bn=bn, dropout=dropout, output_activation=decode_activation)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize weights
"""
for m in self.modules():
if isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, y=None):
"""
Runs a data point through the model in order
to provide its reconstruction and q distribution
parameters.
:param x: input data
:return: reconstructed input
"""
z, mu, logvar = self.encoder(x)
recon_x = self.decoder(z)
return recon_x
def loss_function(self, x):
z, mu, logvar = self.encoder(x)
recon_x = self.decoder(z)
likelihood, kl_loss = elbo(recon_x, x, (mu, logvar), binary=self.binary)
return (-likelihood, kl_loss)
def predict(self, dataloader, device='cpu', method='kmeans'):
"""
Predict assignments applying k-means on latent feature
Input:
x, data matrix
Return:
predicted cluster assignments
"""
if method == 'kmeans':
from sklearn.cluster import KMeans, MiniBatchKMeans, AgglomerativeClustering
feature = self.encodeBatch(dataloader, device)
kmeans = KMeans(n_clusters=self.n_centroids, n_init=20, random_state=0)
pred = kmeans.fit_predict(feature)
elif method == 'gmm':
logits = self.encodeBatch(dataloader, device, out='logit')
pred = np.argmax(logits, axis=1)
return pred
def load_model(self, path):
pretrained_dict = torch.load(path, map_location=lambda storage, loc: storage)
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
def fit(self, dataloader,
lr=0.002,
weight_decay=5e-4,
device='cpu',
beta = 1,
n = 200,
max_iter=30000,
verbose=True,
patience=100,
outdir='./'
):
self.to(device)
optimizer = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)
Beta = DeterministicWarmup(n=n, t_max=beta)
iteration = 0
n_epoch = int(np.ceil(max_iter/len(dataloader)))
early_stopping = EarlyStopping(patience=patience, outdir=outdir)
with tqdm(range(n_epoch), total=n_epoch, desc='Epochs') as tq:
for epoch in tq:
# epoch_loss = 0
epoch_recon_loss, epoch_kl_loss = 0, 0
tk0 = tqdm(enumerate(dataloader), total=len(dataloader), leave=False, desc='Iterations')
for i, x in tk0:
# epoch_lr = adjust_learning_rate(lr, optimizer, iteration)
x = x.float().to(device)
optimizer.zero_grad()
recon_loss, kl_loss = self.loss_function(x)
# loss = (recon_loss + next(Beta) * kl_loss)/len(x);
loss = (recon_loss + kl_loss)/len(x)
loss.backward()
torch.nn.utils.clip_grad_norm(self.parameters(), 10) # clip
optimizer.step()
epoch_kl_loss += kl_loss.item()
epoch_recon_loss += recon_loss.item()
tk0.set_postfix_str('loss={:.3f} recon_loss={:.3f} kl_loss={:.3f}'.format(
loss, recon_loss/len(x), kl_loss/len(x)))
tk0.update(1)
iteration+=1
tq.set_postfix_str('recon_loss {:.3f} kl_loss {:.3f}'.format(
epoch_recon_loss/((i+1)*len(x)), epoch_kl_loss/((i+1)*len(x))))
def encodeBatch(self, dataloader, device='cpu', out='z', transforms=None):
output = []
for x in dataloader:
x = x.view(x.size(0), -1).float().to(device)
z, mu, logvar = self.encoder(x)
if out == 'z':
output.append(z.detach().cpu())
elif out == 'x':
recon_x = self.decoder(z)
output.append(recon_x.detach().cpu().data)
elif out == 'logit':
output.append(self.get_gamma(z)[0].cpu().detach().data)
output = torch.cat(output).numpy()
return output
class SCALE(VAE):
def __init__(self, dims, n_centroids):
super(SCALE, self).__init__(dims)
self.n_centroids = n_centroids
z_dim = dims[1]
# init c_params
self.pi = nn.Parameter(torch.ones(n_centroids)/n_centroids) # pc
self.mu_c = nn.Parameter(torch.zeros(z_dim, n_centroids)) # mu
self.var_c = nn.Parameter(torch.ones(z_dim, n_centroids)) # sigma^2
def loss_function(self, x):
z, mu, logvar = self.encoder(x)
recon_x = self.decoder(z)
gamma, mu_c, var_c, pi = self.get_gamma(z) #, self.n_centroids, c_params)
likelihood, kl_loss = elbo_SCALE(recon_x, x, gamma, (mu_c, var_c, pi), (mu, logvar), binary=self.binary)
return -likelihood, kl_loss
def get_gamma(self, z):
"""
Inference c from z
gamma is q(c|x)
q(c|x) = p(c|z) = p(c)p(c|z)/p(z)
"""
n_centroids = self.n_centroids
N = z.size(0)
z = z.unsqueeze(2).expand(z.size(0), z.size(1), n_centroids)
pi = self.pi.repeat(N, 1) # NxK
# pi = torch.clamp(self.pi.repeat(N,1), 1e-10, 1) # NxK
mu_c = self.mu_c.repeat(N,1,1) # NxDxK
var_c = self.var_c.repeat(N,1,1) + 1e-8 # NxDxK
# p(c,z) = p(c)*p(z|c) as p_c_z
p_c_z = torch.exp(torch.log(pi) - torch.sum(0.5*torch.log(2*math.pi*var_c) + (z-mu_c)**2/(2*var_c), dim=1)) + 1e-10
gamma = p_c_z / torch.sum(p_c_z, dim=1, keepdim=True)
return gamma, mu_c, var_c, pi
def init_gmm_params(self, dataloader, device='cpu'):
"""
Init SCALE model with GMM model parameters
"""
gmm = GaussianMixture(n_components=self.n_centroids, covariance_type='diag')
z = self.encodeBatch(dataloader, device)
gmm.fit(z)
self.mu_c.data.copy_(torch.from_numpy(gmm.means_.T.astype(np.float32)))
self.var_c.data.copy_(torch.from_numpy(gmm.covariances_.T.astype(np.float32)))
def adjust_learning_rate(init_lr, optimizer, iteration):
lr = max(init_lr * (0.9 ** (iteration//10)), 0.0002)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
import os
class EarlyStopping:
"""Early stops the training if loss doesn't improve after a given patience."""
def __init__(self, patience=10, verbose=False, outdir='./'):
"""
Args:
patience (int): How long to wait after last time loss improved.
Default: 10
verbose (bool): If True, prints a message for each loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.loss_min = np.Inf
self.model_file = os.path.join(outdir, 'model.pt')
def __call__(self, loss, model):
if np.isnan(loss):
self.early_stop = True
score = -loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(loss, model)
elif score < self.best_score:
self.counter += 1
if self.verbose:
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
model.load_model(self.model_file)
else:
self.best_score = score
self.save_checkpoint(loss, model)
self.counter = 0
def save_checkpoint(self, loss, model):
'''Saves model when loss decrease.'''
if self.verbose:
print(f'Loss decreased ({self.loss_min:.6f} --> {loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.model_file)
self.loss_min = loss
|
/scale_atac-1.1.0-py3-none-any.whl/scale/model.py
| 0.914958 | 0.358129 |
model.py
|
pypi
|
import numpy as np
import pandas as pd
import scipy as sp
def jsd(p, q, base=np.e):
"""
Jensen Shannon_divergence
"""
## convert to np.array
p, q = np.asarray(p), np.asarray(q)
## normalize p, q to probabilities
p, q = p/p.sum(), q/q.sum()
m = 1./2*(p + q)
return sp.stats.entropy(p,m, base=base)/2. + sp.stats.entropy(q, m, base=base)/2.
def jsd_sp(p, q, base=np.e):
"""
Define specificity score:
score = 1 - sqrt(jsd(p, q))
"""
return 1- jsd(p, q, base=np.e)**0.5
def log2norm(e):
"""
log2(e+1) normalization
"""
loge = np.log2(e+1)
return loge/sum(loge)
def predefined_pattern(t, labels):
q = np.zeros(len(labels))
q[np.where(labels==t)[0]] = 1
return q
def vec_specificity_score(e, t, labels):
"""
Calculate a vector specificity for cluster t
"""
e = log2norm(e)
et = log2norm(predefined_pattern(t, labels))
return jsd_sp(e, et)
def mat_specificity_score(mat, labels):
"""
Calculate all peaks or genes specificity across all clusters
Return:
peaks/genes x clusters dataframe
"""
scores = []
for i in np.unique(labels):
score = mat.apply(lambda x: vec_specificity_score(x, i, labels), axis=1)
scores.append(score)
return pd.concat(scores, axis=1)
def cluster_specific(score_mat, classes=None, top=0):
"""
Identify top specific peaks for each cluster
Input:
score_mat calculated by mat_specificity_score
Return:
specific peaks index and peaks labels
"""
scores = score_mat.max(1)
peak_labels = np.argmax(score_mat.values, axis=1)
inds = []
labels = []
if classes is None:
classes = np.unique(peak_labels)
for i in classes:
index = np.where(peak_labels==i)[0]
ind = np.argsort(scores[index])[-top:]
ind = index[ind]
inds.append(ind)
labels.append(peak_labels[ind])
return np.concatenate(inds), np.concatenate(labels)
|
/scale_atac-1.1.0-py3-none-any.whl/scale/specifity.py
| 0.635336 | 0.536434 |
specifity.py
|
pypi
|
import json
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Generic, Sequence, TypeVar
from launch_api.core import Service
from launch_api.types import I, JsonVal, O
logger = logging.getLogger("service")
__all__: Sequence[str] = (
# re-export
"Service",
# data type generic parameter
"D",
# service sub-structure
"RequestHandler",
"ResponseHandler",
"FullService",
)
D = TypeVar("D")
class RequestHandler(Generic[D, I], ABC):
"""Responsible for converting protocol-formatted data (D) into a service's input type (I)."""
@abstractmethod
def deserialize(self, request: D) -> I:
raise NotImplementedError()
class ResponseHandler(Generic[D, O], ABC):
"""Responsible for converting a service's output (O) into the protocol's data format (D)."""
@abstractmethod
def serialize(self, response: O) -> D:
raise NotImplementedError()
@dataclass
class FullService(Generic[D, I, O], Service[D, D]):
"""The thing that we technically run: service + payload serializers.
Its the service logic + knowing how to encode & decode things into the transit data format.
Default implementation is to use JSON formatted strings.
"""
service: Service[I, O]
request_handler: RequestHandler[D, I]
response_handler: ResponseHandler[D, O]
# cannot be overridden!
def call(self, request: D) -> D:
# deserialize JSON into format service can handle
try:
input_: I = self.request_handler.deserialize(request)
except:
logger.exception(
f"Could not deserialize request ({type(request)=} | {request=})"
)
raise
# run service logic
try:
output: O = self.service.call(input_)
except:
logger.exception(
f"Could not perform service calculation ({type(input_)=})"
)
raise
# serialize service output into JSON
try:
response: JsonVal = self.response_handler.serialize(output)
except:
logger.exception(
f"Could not serialize service output ({type(output)=} | {output=})"
)
raise
# callers get JSON response
return response
class JsonService(FullService[str, JsonVal, JsonVal]):
"""This is what all std-ml-srv services are, effectively.
+ all services accept and return JSON-formatable values
+ all protocols encode the data a JSON-formatted strings
"""
def __init__(self, service: Service[I, O]) -> None:
super().__init__(
service=service,
request_handler=_J,
response_handler=_J,
)
class JsonHandler(RequestHandler[str, JsonVal], ResponseHandler[str, JsonVal]):
def serialize(self, response: JsonVal) -> str:
return json.dumps(response)
def deserialize(self, request: str) -> JsonVal:
return json.loads(request)
_J = JsonHandler()
def default_full_service(s: Service) -> FullService:
return JsonService(s)
|
/scale_launch-0.3.3-py3-none-any.whl/launch/clientlib/service.py
| 0.844329 | 0.207536 |
service.py
|
pypi
|
from dataclasses import dataclass
from logging import Logger
from typing import List, Sequence
import numpy as np
from launch_api.loader import Loader, LoaderSpec
from launch_api.model import B, Model
from launch_api.model_service.types import InferenceService, Processor
from launch_api.types import I, O
__all__: Sequence[str] = (
"InferenceServiceImpl",
"LoaderInferenceServiceImpl",
)
@dataclass
class InferenceServiceImpl(InferenceService[I, O, B]):
processor: Processor[I, O, B]
model: Model[B]
logger: Logger
def preprocess(self, request: I) -> B:
return self.processor.preprocess(request)
def postprocess(self, pred: B) -> List[O]:
return self.processor.postprocess(pred)
def __call__(self, batch: B) -> B:
if isinstance(batch, np.ndarray):
return self.model(batch)
else:
return self.model(**batch)
def call(self, request: I) -> O:
try:
model_input = self.preprocess(request)
except Exception:
self.logger.exception(f"Failed to preprocess ({request=})")
raise
try:
pred = self.__call__(model_input)
except Exception:
self.logger.exception(
"Failed inference on request "
f"({type(model_input)=}, {request=})"
)
raise
try:
response = self.postprocess(pred)
except Exception:
self.logger.exception(
"Failed to postprocess prediction "
f"({type(pred)=}, {type(model_input)=}, {request=})"
)
raise
return response
@dataclass(frozen=True)
class LoaderInferenceServiceImpl(Loader[InferenceService[I, O, B]]):
processor: LoaderSpec[Loader[Processor[I, O, B]]]
model: LoaderSpec[Loader[Model[B]]]
logger: Logger
def load(self) -> InferenceService[I, O, B]:
self.logger.info(f"Using processor loader:\n{self.processor}")
self.logger.info(f"Using model loader:\n{self.model}")
processor_loader = self.processor.construct()
self.logger.info(f"Made processor loader: {processor_loader}")
model_loader = self.model.construct()
self.logger.info(f"Made model loader: {model_loader}")
try:
batcher = processor_loader.load()
except Exception:
self.logger.exception(
f"Could not create processor from {type(processor_loader)}"
)
raise
else:
self.logger.info(f"Created processor: {batcher}")
try:
model = model_loader.load()
except Exception:
self.logger.exception(
f"Could not create model from {type(model_loader)}"
)
raise
else:
self.logger.info(f"Created model: {model}")
return InferenceServiceImpl(batcher, model, self.logger)
|
/scale_launch-0.3.3-py3-none-any.whl/launch/clientlib/model_service/implementation.py
| 0.858259 | 0.167151 |
implementation.py
|
pypi
|
from dataclasses import dataclass
from logging import Logger
from typing import List, Sequence
import numpy as np
from launch_api.batching.types import B, BatchableService, Batcher, Model
from launch_api.loader import Loader, LoaderSpec
from launch_api.types import I, O
__all__: Sequence[str] = (
"BatchableServiceImpl",
"LoaderBatchableServiceImpl",
)
@dataclass
class BatchableServiceImpl(BatchableService[I, O, B]):
batcher: Batcher[I, O, B]
model: Model[B]
logger: Logger
def batch(self, requests: List[I]) -> B:
return self.batcher.batch(requests)
def unbatch(self, preds: B) -> List[O]:
return self.batcher.unbatch(preds)
def __call__(self, batch: B) -> B:
if isinstance(batch, np.ndarray):
return self.model(batch)
else:
return self.model(**batch)
def call(self, requests: List[I]) -> List[O]:
try:
batch = self.batch(requests)
except Exception:
self.logger.exception(
f"Failed to preprocess & batch requests "
f"({len(requests)=}, {requests=})"
)
raise
try:
preds = self.__call__(batch)
except Exception:
self.logger.exception(
f"Failed inference on request batch "
f"({type(batch)=}, {len(requests)=}, {requests=})"
)
raise
try:
responses = self.unbatch(preds)
except Exception:
self.logger.exception(
f"Failed to postprocess prediction batch "
f"({type(preds)=}, {type(batch)=}, {len(requests)=}, {requests=}))"
)
raise
return responses
@dataclass(frozen=True)
class LoaderBatchableServiceImpl(Loader[BatchableService[I, O, B]]):
batcher: LoaderSpec[Loader[Batcher[I, O, B]]]
model: LoaderSpec[Loader[Model[B]]]
logger: Logger
def load(self) -> BatchableService[I, O, B]:
self.logger.info(f"Using batcher loader:\n{self.batcher}")
self.logger.info(f"Using model loader:\n{self.model}")
batcher_loader = self.batcher.construct()
self.logger.info(f"Made batcher loader: {batcher_loader}")
model_loader = self.model.construct()
self.logger.info(f"Made model loader: {model_loader}")
try:
batcher = batcher_loader.load()
except Exception:
self.logger.exception(
f"Could not create batcher from {type(batcher_loader)}"
)
raise
else:
self.logger.info(f"Created batcher: {batcher}")
try:
model = model_loader.load()
except Exception:
self.logger.exception(
f"Could not create model from {type(model_loader)}"
)
raise
else:
self.logger.info(f"Created model: {model}")
return BatchableServiceImpl(batcher, model, self.logger)
|
/scale_launch-0.3.3-py3-none-any.whl/launch/clientlib/batching/implementation.py
| 0.86196 | 0.187411 |
implementation.py
|
pypi
|
import numpy as np
import json
import os
from scale_lidar_io import LidarScene, Transform
from .view_utils import open_viewer, open_new_viewer
from .awsHandler import get_secret, get_db_connection, get_signed_url
from bson.objectid import ObjectId
from pyquaternion import Quaternion
import base64
import requests
from .lidarLiteHelper import get_points, get_cameras, combine_scenes, load_bs4
class DebugLidarScene(LidarScene):
'''
New Viewer method using open3d, this will work with python 3.8
'''
def preview(self, frame_id=None, file=None, aggregated_frames=10):
if file:
np.save(file, self.get_all_world_points())
else:
return open_new_viewer(self.get_all_world_points(), aggregated_frames)
def get_all_world_points(self):
all_points = []
for idx, frame in enumerate(self.frames):
points = frame.get_world_points()
points[:, 3] = idx
all_points.append(points)
return all_points
'''
This method use PPTK, old Viewer implementation, leaving it here as a fallback
I'm seing issue with the new viewer on pointclopuds with +150k points, need more testing but just in case I leave this as an alternative
I've tested this method with 400k pointscloud and it worked fine
this method will run only on python 3.6.0 (pptk requirement)
'''
def preview_motion(self, file=None):
if file:
np.save(file, self.get_all_world_points())
else:
return open_viewer(np.vstack(self.get_all_world_points()), point_size=0.005)
'''
Method to load the task attachments and add the data to the scene
'''
def load_task_attachments(self, task_id, attachments, load_images):
cached = False # Flag used to create the cache data if necessary.
if os.path.exists(".cache"): # check for cache folder
if os.path.exists(os.path.join(".cache", f"{task_id}")): # check for cache folder for this task
cached = True
else:
os.makedirs(os.path.join(".cache", f"{task_id}")) # create a folder per each task
else:
os.makedirs(f".cache/{task_id}") # create a folder per each task
for index, attachment in enumerate(attachments):
# store in local cache folder if necessary
if not cached:
print(f"Loading attachment {index}")
signed_url = get_signed_url(attachment)
r = requests.get(signed_url)
attachment_data = r.json()
# Load point
points = np.frombuffer(base64.b64decode(attachment_data['points']), 'float32').reshape([-1, 3])
intensity = np.frombuffer(base64.b64decode(attachment_data['intensities']), 'float32').reshape([-1, 1])
points = np.hstack((points,intensity)) # add intensity to the points
np.save(os.path.join(".cache", f"{task_id}", f"frame-{index}.npy"), points) # save points into file
del attachment_data['points'] # delete base64 points to store only the calibration data
del attachment_data['intensities']
with open(os.path.join(".cache", f"{task_id}", f"calibration-{index}.json"), 'w') as outfile: # save calibration file
json.dump(attachment_data, outfile)
if load_images: # maybe working on local we don't want to load the images to work faster
for camera_index, image in enumerate(attachment_data['images']):
print(f"Loading image {camera_index} of {len(attachment_data['images'])}")
signed_image = get_signed_url(image['image_url'])
r = requests.get(signed_image, stream=True)
if r.status_code == 200:
with open(os.path.join(".cache", f"{task_id}", f"camera-{camera_index}-{index}.jpg"), 'wb') as f: # Save camera images
for chunk in r:
f.write(chunk)
# load points and device calibration values
with open(os.path.join(".cache", f"{task_id}", f"calibration-{index}.json")) as json_file:
calibration_data = json.load(json_file)
device_position = list(calibration_data['device_position'].values()) if 'device_position' in calibration_data else list(calibration_data['devicePosition'].values())
device_transformation = Transform.from_Rt(
Quaternion(np.array(list(calibration_data['device_heading'].values())) if 'device_heading' in calibration_data else [1,0,0,0]),
np.array(device_position)
)
# load device data
self.get_frame(index).apply_transform(device_transformation) # add frame transformation
# load points
self.get_frame(index).add_points(np.load(os.path.join(".cache", f"{task_id}", f"frame-{index}.npy")), transform=device_transformation.inverse) # need to remove the frame transformation
if load_images: # maybe working on local we don't want to load the images to work faster
if index == 0: # we calibate the cameras just one time (first frame)
with open(os.path.join(".cache", f"{task_id}", f"calibration-{index}.json")) as json_file:
calibration_data = json.load(json_file)
for camera_index, image in enumerate(calibration_data['images']):
distortion = [value for value in [image[key] if key in image.keys() else 0 for key in ['k1', 'k2', 'p1', 'p2', 'k3', 'k4']] ] # sometime we don't have all the cohefficients
position = list(image['position'].values())
self.get_camera(camera_index).calibrate(
pose= Transform().from_Rt(
Quaternion(image['heading']['w'], image['heading']['x'], image['heading']['y'], image['heading']['z']).rotation_matrix,
np.array([position[0], position[1], position[2]])
),
K=np.array([ [image['fx'],0,image['cx']],
[0, image['fy'],image['cy']],
[0,0,1]]),
D=distortion,
scale_factor=image['scale_factor'] if 'scale_factor' in image else 1,
skew=image['skew'] if 'skew' in image else 0,
model=image['camera_model'] if 'camera_model' in image else 'brown_conrady')
self.get_camera(camera_index).apply_transform(device_transformation.inverse) # need to remove the frame transformation
# load camera images
for camera_index, image in enumerate(calibration_data['images']):
self.get_frame(index).get_image(camera_index).load_file(os.path.join(".cache", f"{task_id}", f"camera-{camera_index}-{index}.jpg"))
'''
Method to load a task from a task ID
'''
def load_from_task(self, task_id=None, frames=0, load_images=True):
if task_id:
print("Connecting to DB")
self.db = get_db_connection()
subtasks = self.db['subtasks']
results = subtasks.find({'task': ObjectId(task_id)})
for r in results:
if 'attachments' in r['params']:
attachments = r['params']['attachments'][:frames] if frames > 0 else r['params']['attachments'] # limit number of loaded frames
elif 'full_attachments' in r['params']:
attachments = r['params']['full_attachments'][:frames] if frames > 0 else r['params']['full_attachments']
else:
raise RuntimeError(f'Could not find attachments or full_attachments in task {task_id} params')
print(f"Loading {len(attachments)} frames")
self.load_task_attachments(task_id, attachments, load_images)
return self
def to_bs4(self, path, filename, camera="all"):
get_points(self, path)
get_cameras(self, path, camera)
combine_scenes(camera, path, filename)
def from_bs4(self, file):
load_bs4(self, file)
|
/scale_lidar_io_debug-0.2.0.tar.gz/scale_lidar_io_debug-0.2.0/scale_lidar_io_debug/scene.py
| 0.429429 | 0.190385 |
scene.py
|
pypi
|
import numpy as np
import ujson
class JSONBinaryEncoder:
block_size = 4
def fill_block(self, data, fill=b'\00'):
return data + fill * (self.block_size - len(data) % self.block_size)
def encode_object(self, obj, keys=None, items=None, buffer=None, **params):
keys = keys or []
items = items or []
buffer = buffer or bytes(self.block_size)
if isinstance(obj, dict):
obj = dict(obj)
for k, v in obj.items():
obj[k], items, buffer = self.encode_object(v, keys + [k], items, buffer, **params)
if isinstance(obj, list):
obj = list(obj)
for k, v in enumerate(obj):
obj[k], items, buffer = self.encode_object(v, keys + [k], items, buffer, **params)
elif isinstance(obj, bytes):
offset = len(buffer)
items.append(dict(params, keys=keys, length=len(obj), offset=offset))
buffer += self.fill_block(obj)
obj = ''
elif isinstance(obj, np.ndarray):
obj, dtype, shape = obj.tobytes(), obj.dtype.name, obj.shape
obj, items, buffer = self.encode_object(obj, keys, items, buffer, dtype=dtype, shape=shape)
return obj, items, buffer
def set_nested(self, obj: dict, keys: list, value):
while len(keys) > 1:
obj = obj[keys.pop(0)]
obj[keys[0]] = value
def dumps(self, obj: dict):
obj, items, buffer = self.encode_object(obj)
header = dict(obj)
header['$items'] = items
encoded_header = self.fill_block(ujson.dumps(header).encode('utf-8'), b' ')
return encoded_header + buffer
def loads(self, data: bytes):
header_length = data.find(bytes(1))
obj = ujson.loads(data[:header_length].decode('utf-8'))
items = obj.pop('$items')
data = data[header_length:]
for item in items:
value = data[item['offset']:item['offset'] + item['length']]
if 'dtype' in item:
value = np.frombuffer(value, dtype=item['dtype'])
if 'shape' in item:
value = value.reshape(item['shape'])
self.set_nested(obj, item['keys'], value)
return obj
def write_file(self, file_path: str, obj: dict):
with open(file_path, 'wb') as fp:
fp.write(self.dumps(obj))
def read_file(self, file_path: str) -> dict:
with open(file_path, 'rb') as fp:
return self.loads(fp.read())
|
/scale_lidar_io_debug-0.2.0.tar.gz/scale_lidar_io_debug-0.2.0/scale_lidar_io_debug/JSONBinaryEncoder.py
| 0.496338 | 0.236737 |
JSONBinaryEncoder.py
|
pypi
|
from abc import ABCMeta, abstractmethod
import numpy as np
import open3d as o3d
import pandas as pd
from laspy.file import File
_FIELDS = ["x", "y", "z", "i", "d"]
class Importer:
"""Points importer/helper"""
class Base(metaclass=ABCMeta):
"""Abstract importer class to be inherited for data type specific implementation"""
def __init__(self, fp: str) -> None:
"""Constructor to be called for preparing filepaths
:param fp: Relative or absolute path to file to be loaded in explicit `load` method
:type fp: str
"""
self._filepath: str = fp
self._data: pd.DataFrame = pd.DataFrame(columns=_FIELDS)
def load(self, **kwargs) -> None:
self._load(**kwargs)
@property
def data(self):
return self._data.to_numpy()
@abstractmethod
def _load(self, **kwargs) -> None:
...
class CSV(Base, metaclass=ABCMeta):
"""Metaclass for specific CSV Importer implementations.
Due to non-standardized csv format, it is recommended to write more specific implementations on a case-by-case basis.
"""
@abstractmethod
def _load(self, **kwargs):
...
class OrderedCSV(CSV):
"""Expects a csv file with or without header, but assumes correct order of columns / values as follows: `[x, y, z, i, d]`
Cuts off any exceeding column count.
"""
def _load(self, **kwargs):
self._data = pd.read_csv(self._filepath, **kwargs)
c_count = len(self._data.columns)
if c_count > 5: # Remove columns exceeding count of FIELDS
self._data = self._data.drop(self._data.columns[5:], axis=1)
c_count = 5
self._data.columns = _FIELDS[:c_count]
class NamedCSV(CSV):
"""Expects a csv file with header row and column names matching `x`, `y`, `z`, `i`, `d`
Dismisses any columns not matching any of the expected names. Case-sensitive.
"""
def _load(self, **kwargs):
self._data = pd.read_csv(self._filepath, **kwargs)
exists = []
for c in _FIELDS:
if c in self._data.columns:
exists.append(c)
self._data = self._data[exists]
class PCD(Base):
"""Uses open3d library to read pcd files, expects `[x, y, z]`, dismisses other columns."""
def _load(self, **kwargs):
self._data = pd.DataFrame(
np.asarray(o3d.io.read_point_cloud(self._filepath).points)[:, :3],
columns=_FIELDS[:3],
)
class LAS(Base):
"""Uses Laspy library to read .las file and expects properties `x`, `y`, `z`, `intensity` to be present."""
def _load(self, **kwargs):
las_file = File(self._filepath, mode="r")
self._data = pd.DataFrame(
np.column_stack(
[las_file.x, las_file.y, las_file.z, las_file.intensity]
),
columns=["x", "y", "z", "i"],
)
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/connectors.py
| 0.900248 | 0.279432 |
connectors.py
|
pypi
|
import zipfile
from multiprocessing.pool import ThreadPool
from functools import partial
from io import BytesIO
from typing import MutableMapping, List, Dict
from tqdm import tqdm
import numpy as np
import pandas as pd
import ujson
from scaleapi.tasks import Task, TaskType
from .camera import LidarCamera
from .image import LidarImage
from .frame import LidarFrame
from .connectors import Importer
from .transform import Transform
from .helper import s3_smart_upload, get_signed_url
from .protobuf_helper import create_scene_from_protobufs
UPLOAD_POOL_SIZE = 8
class LidarScene:
"""LidarScene object representing all frames in a scene.
Scene properties:
- cameras: List of cameras
- frames: List of frames
- base_url: Url used to host the data in S3
"""
def __init__(self):
"""
:rtype: object
"""
self.cameras: MutableMapping[LidarCamera] = pd.Series()
self.frames: MutableMapping[LidarFrame] = pd.Series()
self.base_url = None
self.scale_file_attachments = None
@classmethod
def from_protobufs(cls, protobufs: List[str]):
"""
Create a LidarScene object from a list of protobuf files
:param protobufs: Filepaths to the .pb files, one per frame
:type protobufs: List[str]
Note: this function expects a point cloud in ego coordinates
Returns: LidarScene
"""
return create_scene_from_protobufs(cls(), protobufs)
def get_camera(self, camera_id=None, index: int = None) -> LidarCamera:
"""Get a camera by id (or index) or create one if it does not exist
:param camera_id: The camera id
:type camera_id: str, int
:param index: The camera index
:type index: int
:return: LidarCamera
:rtype: LidarCamera
"""
assert (
camera_id is not None or index is not None
), "id or index must be specified"
if camera_id is None:
camera_id = self.cameras.index[index]
if camera_id not in self.cameras:
if isinstance(camera_id, int):
self.cameras.index = self.cameras.index.astype(int)
self.cameras[camera_id] = LidarCamera(camera_id)
return self.cameras[camera_id]
def get_frame(self, frame_id=None, index: int = None) -> LidarFrame:
"""Get a frame by id (or index) or create one if it does not exist
:param frame_id: The frame id
:type frame_id: str, int
:param index: The frame index
:type index: int
:return: LidarFrame
:rtype: LidarFrame
"""
assert (
frame_id is not None or index is not None
), "id or index must be specified"
if frame_id is None:
frame_id = self.frames.index[index]
if frame_id not in self.frames:
if isinstance(frame_id, int):
self.frames.index = self.frames.index.astype(int)
self.frames[frame_id] = LidarFrame(frame_id, cameras=self.cameras)
return self.frames[frame_id]
def apply_transforms(self, world_transforms: List[Transform]):
"""Apply transformations to all the frames (the number of Transformation should match the number of frames)
:param world_transforms: List of Transform
:type world_transforms: list(Transform)
"""
assert len(world_transforms) != len(
self.frames
), "world_transforms should have the same length as frames"
for idx in range(len(self.frames)):
self.get_frame(index=idx).apply_transform(world_transforms[idx])
def filter_points(self, min_intensity=None, min_intensity_percentile=None):
"""Filter points based on intensity
:param min_intensity: Minimun intensity allowed
:type min_intensity: int
:param min_intensity_percentile: Minimun percentile allowed (use np.percentile)
:type min_intensity_percentile: int
"""
for frame in self.frames:
frame.filter_points(min_intensity, min_intensity_percentile)
def get_projected_image(
self, camera_id, color_mode="intensity", frames_index=range(0, 1), **kwargs
):
"""Get camera_id image with projected points, (**Legacy method**)
:param camera_id: Camera id/Name/Identifier
:type camera_id: str, int
:param color_mode: Color mode, default ``default``, modes are: 'depth', 'intensity' and 'default'
:type color_mode: str
:param frames_index: Project points for a range of frames, default `first frame`
:type frames_index: range
:returns: Image with points projected
:rtype: Image
"""
all_points = np.array([]).reshape(0, 4)
for idx in frames_index:
points = np.array(self.frames[idx].points)[:, :4]
print(points.shape)
points[:, 3] = idx
all_points = np.concatenate((all_points, points), axis=0)
return self.cameras[camera_id].get_projected_image(
self.frames[frames_index[0]].get_image(camera_id),
all_points,
self.frames[frames_index[0]].transform,
color_mode,
)
def apply_transform(self, world_transform: Transform):
"""Apply a Transformation to all the frames
:param world_transform: Transform to apply to all the frames
:type world_transform: Transform
"""
for idx in range(len(self.frames)):
self.get_frame(index=idx).apply_transform(world_transform)
def make_transforms_relative(self):
"""Make all the frame transform relative to the first transform/frame. This will set the first transform to position (0,0,0) and heading (1,0,0,0)"""
offset = self.get_frame(index=0).transform.inverse
for frame in self.frames:
frame.transform = offset @ frame.transform
def downsample_scene(self, voxel_size_mm: int = 250):
"""Downsamples all frames according to voxel size"""
for idx in range(len(self.frames)):
self.get_frame(index=idx).downsample_frame(voxel_size_mm=voxel_size_mm)
def to_dict(self, base_url: str = None) -> dict:
"""Return a dictionary with the frame urls using the base_url as base.
:param base_url: This url will be concatenated with the frames name, e.g.: `'%s/frame-%s.json' % (base_url, frame.id)`
:type base_url: str
:return: Dictionary with the frame urls data
:rtype: dict
"""
if base_url is None:
base_url = self.base_url
return dict(
frames=["%s/frame-%s.json" % (base_url, frame.id) for frame in self.frames]
)
def s3_upload(
self,
bucket: str,
path=None,
mock_upload: float = False,
use_threads: float = True,
):
"""Save scene in S3
:param bucket: S3 Bucket name
:type bucket: str
:param path: Path to store data
:type key: str
:param mock_upload: To avoid upload the data to S3 (defualt ``False``)
:type mock_upload: float
:param use_threads: In order to upload multiple files at the same time using threads (defualt ``True``)
:type use_threads: float
:return: Scene S3 url
:rtype: str
"""
self.base_url = f"s3://{bucket}/{path}"
print("Uploading scene to S3: %s" % self.base_url)
scene_dict = self.to_dict(self.base_url)
poses_csv = pd.DataFrame(
self.frames.map(lambda f: list(f.transform.matrix.reshape(-1))).to_dict()
).T.to_csv(header=False)
if not mock_upload:
# Upload scene json file
s3_smart_upload(
bucket=bucket,
key=f"{path}/scene.json",
fileobj=BytesIO(bytes(ujson.dumps(scene_dict), encoding="utf-8")),
content_type="application/json",
)
# Upload ego2world csv file
s3_smart_upload(
bucket=bucket,
key=f"{path}/ego2world.csv",
fileobj=BytesIO(bytes(poses_csv, encoding="utf-8")),
content_type="text/plain",
)
if use_threads:
p = ThreadPool(processes=UPLOAD_POOL_SIZE)
func = partial(LidarFrame.s3_upload, bucket=bucket, path=path)
p.map(func, self.frames)
else:
for frame in self.frames:
frame.s3_upload(bucket, path)
signed_url = get_signed_url(bucket, f"{path}/scene.json")
print(f"Scene uploaded: {signed_url}")
return self.base_url
def scale_file_upload(
self, project_name: str,
):
"""Save scene in Scale file
:param project_name: File project name
:type project_name: str
:param verbose: Set to false to not show the progress bar
:return: Scene file url
:rtype: str
"""
print("Uploading scene to Scale file")
p = ThreadPool(processes=UPLOAD_POOL_SIZE)
func = partial(LidarFrame.scale_file_upload, project_name=project_name)
self.scale_file_attachments = p.map(func, self.frames)
print(
f"Finishes uploading scene to Scale file, uploaded {len(self.scale_file_attachments)} frames"
)
return self.scale_file_attachments
def save_task(self, filepath: str, template=None):
"""Save the entire scene (with frame and images) in zipfile format to local filepath
:param filepath: File name and path in which the scene should be saved
:type filepath: str
"""
print("Saving scene:", filepath)
with zipfile.ZipFile(filepath, mode="w") as out:
# Save task
scene_dict = self.to_dict()
task_dict = dict(
template or {}, attachment_type="json", attachments=scene_dict["frames"]
)
out.writestr("task.json", ujson.dumps(task_dict))
# Save frames
for frame in self.frames:
# Save points
data = frame.to_json(self.base_url)
out.writestr(
"frame-%s.json" % frame.id, data, compress_type=zipfile.ZIP_DEFLATED
)
# Save frame images
for camera_id, image in frame.images.items():
if not image.image_path:
assert NotImplementedError(
"Only file-imported Images supported"
)
out.write(
image.image_path, "image-%s-%s.jpg" % (camera_id, frame.id)
)
print("Scene saved.")
def create_task(
self, template: Dict = None, task_type: TaskType = TaskType.LidarAnnotation
) -> Task:
"""Create a Scale platform task from the configured scene
:param template: Dictionary of payload for task creation (https://private-docs.scale.com/?python#parameters), attachments data will be filled automatically.
:type template: dict
:param task_type: Select a Scale API endpoint top upload data to, currently supports 'lidarannotation', 'lidarsegmentation', and 'lidartopdown'. Defaults to 'lidarannotation'.
:type task_type: str
:return: Task object with related information. Inherited from `scaleapi.Task` object.
:rtype: Task
"""
if task_type == TaskType.LidarAnnotation:
from .task import LidarAnnotationTask
return LidarAnnotationTask.from_scene(self, template)
elif task_type == TaskType.LidarTopdown:
from .task import LidarTopDownTask
return LidarTopDownTask.from_scene(self, template)
elif task_type == TaskType.LidarSegmentation:
from .task import LidarSegmentationTask
return LidarSegmentationTask.from_scene(self, template)
else:
raise NotImplementedError(
f"Specified task_type {task_type} is not supported"
)
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/scene.py
| 0.887747 | 0.308125 |
scene.py
|
pypi
|
import pandas as pd
import ujson
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import numpy as np
from io import BytesIO
from pyquaternion import Quaternion
from typing import Any
from scipy.spatial.transform import Rotation as R
import open3d as o3d
from .camera import LidarCamera
from .image import LidarImage
from .connectors import Importer
from .transform import Transform
from .helper import (
s3_smart_upload,
format_lidar_point,
format_point,
format_quaternion,
scale_file_upload,
)
class LidarFrame:
"""Frame object represents the point cloud, image and calibration data contained in a single frame
Frame properties:
- id: Frame id, used to identify the frame
- cameras: List of LidarCamera
- images: List of LidarImage
- points: Pointcloud for this frame
- radar_points: Radar points for this frame
- colors: Colors for each point on the pointcloud for this frame
- transform: Pose/ transform of this frame
"""
def __init__(self, frame_id, cameras):
self.id = frame_id
self.cameras: pd.Series[Any, LidarCamera] = cameras
self.images: pd.Series[Any, LidarImage] = pd.Series(dtype=object)
self.points: np.ndarray = np.zeros((0, 5), dtype=float)
self.radar_points: np.ndarray = np.zeros((0, 3), dtype=float)
self.colors: np.ndarray = np.zeros((0, 3), dtype=float)
self.transform = Transform()
def get_image(self, camera_id) -> LidarImage:
"""Get image by camera_id or create one if it does not exist
:param camera_id: Camera id
:type camera_id: str, int
:returns: LidarImage object
:rtype: LidarImage
"""
assert camera_id in self.cameras, "Camera not found"
if camera_id not in self.images:
if isinstance(camera_id, int):
self.images.index = self.images.index.astype(int)
self.images[camera_id] = LidarImage(camera=self.cameras[camera_id])
return self.images[camera_id]
def add_points_from_connector(
self, connector: Importer, transform: Transform = None, intensity=1, sensor_id=0
):
"""Use Importer output to add points to the frame
:param connector: Importer used to load the points
:type connector: Importer
:param transform: Transform that should be applied to the points
:type transform: Transform
:param intensity: If the points list does not include intensity, this value will be used as intensity for all the points (default ``1``)
:type intensity: int
:param sensor_id: Sensor id, used in case that you have more than one lidar sensor. (Default ``0``)
:type sensor_id: int
"""
self.add_points(connector.data, transform, intensity, sensor_id)
def add_radar_points(self, points: np.array):
"""Add radar points to the frame, structure:
.. highlight:: python
.. code-block:: python
radar_points = np.array([
[
[0.30694541, 0.27853175, 0.51152715], // position - x,y,z
[0.80424087, 0.24164057, 0.45256181], // direction - x,y,z
[0.73596422] // size
],
...
])
:param points: List of radar points data
:type points: np.array
"""
assert np.array(points).shape[1] == 3, "Radar points length is not 3"
self.radar_points = points
def add_points(
self, points: np.array, transform: Transform = None, intensity=1, sensor_id=0
):
"""Add points to the frame, structure: np.array with dimension 1 and shape (N,3) or (N,4) (N being the number of point in the frame)
Points with intensity:
.. highlight:: python
.. code-block:: python
points = np.array([
[0.30694541, 0.27853175, 0.51152715, 0.4],
[0.80424087, 0.24164057, 0.45256181, 1],
...
])
Points without intensity:
.. highlight:: python
.. code-block:: python
points = np.array([
[0.30694541, 0.27853175, 0.51152715],
[0.80424087, 0.24164057, 0.45256181],
...
])
:param points: List of points
:type points: np.array
:param transform: Transform that should be applied to the points
:type transform: Transform
:param intensity: If the points list doesn't include intensity, this value will be used as intensity for all the points (default ``1``)
:type intensity: int
:param sensor_id: Sensor id, used in case that you have more than one lidar sensor. (Default ``0``)
:type sensor_id: int
"""
if points.ndim == 1:
points = np.array([points])
if points.shape[1] == 3:
points = np.hstack(
[points, np.ones((points.shape[0], 1)) * intensity])
if points.shape[1] == 4:
if sensor_id != 0:
points = np.hstack(
[points, np.ones((points.shape[0], 1)) * sensor_id])
else:
points = np.hstack([points, np.zeros((points.shape[0], 1))])
if points.shape[1] == 5:
if sensor_id != 0:
points = np.hstack(
[points, np.ones((points.shape[0], 1)) * sensor_id])
else:
points = np.hstack([points, np.zeros((points.shape[0], 1))])
if transform is not None:
points = transform.apply(points)
self.points = np.vstack([self.points, points])
def add_colors(self, colors: np.ndarray):
"""Add colors to the pointcloud. This list should follow the same order as the point list.
Each color should be in RGB with values between 0 and 255.
.. highlight:: python
.. code-block:: python
colors = np.array([
[10, 200, 230],
[0, 0, 255],
...
])
:param colors: List of colors
:type colors: np.ndarray
"""
self.colors = np.vstack([self.colors, colors])
def add_debug_lines(self, intensity: int = 1, length: int = 5, device: int = 0):
"""Add debug lines.
This will add a line starting from each camera position to the direction it is facing. This will use the camera position in this frame.
:param intensity: Intensity of the points from the debugging line, default ``1``
:type intensity: int
:param length: Length of the line, default ``5`` points
:type length: int
:param device: Device id fror the points added, default ``0``
:type device: int
"""
x_line = np.array([np.array([length * k / 100, 0, 0])
for k in range(0, 100)])
for camera in self.cameras:
self.add_points(
x_line, transform=camera.world_transform, intensity=intensity
)
def get_world_points(self):
"""Return the list of points with the frame transformation applied
:returns: List of points in world coordinates
:rtype: np.array
"""
return np.hstack(
[
self.transform @ self.points[:, :3],
self.points[:, 3:4],
self.points[:, 4:5],
self.points[:, 5:6],
]
)
# leeaving this method as legacy / old code dependency
def get_projected_image(self, camera_id, color_mode: str = "default", **kwargs):
"""Get camera_id image with projected points
:param camera_id: Camera id/Name/Identifier
:type camera_id: str, int
:param color_mode: Color mode, default ``default``, modes are: 'depth', 'intensity' and 'default'
:type color_mode: str
:returns: Image with the points projected
:rtype: PIL.Image
"""
return self.cameras[camera_id].get_projected_image(
self.get_image(camera_id), self.points, self.transform, color_mode, **kwargs
)
def manual_calibration(
self, camera_id, intrinsics_ratio: int = 1000, extrinsics_ratio: int = 10
):
"""Open a window with the camera with the points projected over it. The window also display dials to change the camera intrinsic and extrinsic values. The new values for the camera calibration will be display as matrices on the terminal.
:param camera_id: Camera id/Name/Identifier
:type camera_id: str, int
:param intrinsics_ratio: Range of possible values for the intrinsic, center value will be the current one.
:type intrinsics_ratio: int
:param extrinsics_ratio: Range of possible values for the extrinsic, center value will be the current one.
:type extrinsics_ratio: int
"""
fig = plt.figure(constrained_layout=True)
intrinsics = ["fx", "fy", "cx", "cy"]
extrinsics_position = ["x", "y", "z"]
extrinsics_heading = ["qw", "qx", "qy", "qz"]
heights = np.concatenate(
(np.array([7]), np.array([0.2 for x in range(0, 7)])), axis=0
)
gs = fig.add_gridspec(
ncols=3,
nrows=1 + len(extrinsics_heading) + len(extrinsics_position),
height_ratios=heights,
)
imgObj = fig.add_subplot(gs[0, :])
imgObj.imshow(
self.cameras[camera_id].get_projected_image(
self.get_image(
camera_id), self.points, self.transform, "depth", 1
)
)
for index, key in enumerate(intrinsics):
globals()[f"ax{key}"] = fig.add_subplot(gs[index + 1, 0])
value = getattr(self.cameras[camera_id], key)
globals()[key] = Slider(
globals()[f"ax{key}"],
f"{key}",
value - intrinsics_ratio,
value + intrinsics_ratio,
valinit=value,
)
for index, key in enumerate(extrinsics_position):
globals()[f"ax{key}"] = fig.add_subplot(gs[index + 1, 1])
value = getattr(self.cameras[camera_id], "position")
globals()[key] = Slider(
globals()[f"ax{key}"],
f"{key}",
value[index] - extrinsics_ratio,
value[index] + extrinsics_ratio,
valinit=value[index],
)
for index, key in enumerate(extrinsics_heading):
globals()[f"ax{key}"] = fig.add_subplot(
gs[index + len(extrinsics_position) + 1, 1]
)
value = getattr(self.cameras[camera_id], "rotation")
value = Quaternion(R.from_matrix(value).as_quat())
globals()[key] = Slider(
globals()[f"ax{key}"],
f"{key}",
value[index] - extrinsics_ratio,
value[index] + extrinsics_ratio,
valinit=value[index],
)
def update(val):
self.cameras[camera_id].calibrate(
K=np.array(
[[fx.val, 0, cx.val], [0, fy.val, cy.val], [0, 0, 1]]),
pose=Transform.from_Rt(
Quaternion(qw.val, qx.val, qy.val, qz.val).rotation_matrix,
[x.val, y.val, z.val],
),
)
np.set_printoptions(suppress=True)
print(f"New intrinsics for Camera {camera_id}")
print(self.cameras[camera_id].K)
print(f"New position for Camera {camera_id}")
print(self.cameras[camera_id].position)
print(f"New heading for Camera {camera_id}")
print(
dict(
zip(
["w", "x", "y", "z"],
Transform(self.cameras[camera_id].rotation).quaternion,
)
)
)
imgObj.imshow(
self.cameras[camera_id].get_projected_image(
self.get_image(
camera_id), self.points, self.transform, "depth", 1
)
)
fig.canvas.draw_idle()
axApply = fig.add_subplot(gs[1, 2])
bApply = Button(axApply, "Apply changes")
bApply.on_clicked(update)
plt.show()
def get_filename(self) -> str:
"""Get frame json file name
:returns: Json file name
:rtype: str
"""
return "frame-%s.json" % self.id
def apply_transform(self, T: Transform):
"""Apply the frame transformation. This will be used to define the device position and applied to cameras and points.
:param T: Transform for this frame
:type T: Transform
"""
self.transform = Transform(T) @ self.transform
def filter_points(self, min_intensity=None, min_intensity_percentile=None):
"""Filter points based on their intensity
:param min_intensity: Minimun intensity allowed
:type min_intensity: int
:param min_intensity_percentile: Minimun percentile allowed (use np.percentile)
:type min_intensity_percentile: int
"""
if min_intensity is not None:
self.points = self.points[self.points[:, 3] >= min_intensity]
if min_intensity_percentile is not None:
self.points = self.points[
self.points[:, 3]
>= np.percentile(self.points[:, 3], min_intensity_percentile)
]
def to_json(
self, base_url: str = "", s3_upload: bool = True, project_name: str = ""
):
"""Return the frame data in json format following Scale data format: https://private-docs.scale.com/?python#sensor-fusion-lidar-annotatio.
This will return the final data from the frame, this means cameras and points will be in world coordinates.
:param base_url: This url will concatenated with the image name, e.g.: `'%s/image-%s-%s.jpg' % (base_url, camera.id, frame.id)`
:type base_url: str
:returns: Frame object as a JSON formatted stream
:rtype: str
"""
def format_image(camera, s3_upload: bool, project_name: str):
image = self.images[camera.id]
if camera.world_poses:
wct = camera.world_poses[self.id]
else:
wct = (image.transform or self.transform) @ camera.pose
D = camera.D
image_url = "%s/image-%s-%s.jpg" % (base_url, camera.id, self.id)
if not s3_upload:
image_url = self.images[camera.id].scale_file_upload(
project_name)
result = dict(
position=format_point(wct.position),
heading=format_quaternion(wct.quaternion),
image_url=image_url,
camera_model=camera.model,
fx=camera.fx,
fy=camera.fy,
cx=camera.cx,
cy=camera.cy,
skew=camera.skew,
k1=float(D[0]),
k2=float(D[1]),
p1=float(D[2]),
p2=float(D[3]),
k3=float(D[4]),
k4=float(D[5]),
k5=float(D[6]) if len(D) >= 7 else 0,
k6=float(D[7]) if len(D) >= 8 else 0,
lx=float(D[8]) if len(D) >= 9 else 0,
ly=float(D[9]) if len(D) >= 10 else 0,
xi=float(D[10]) if len(D) >= 11 else 0,
scale_factor=camera.scale_factor,
)
if image.metadata:
result["metadata"] = image.metadata
if image.timestamp:
result["timestamp"] = image.timestamp
return result
images_json = (
self.cameras[self.images.index]
.apply(format_image, args=(s3_upload, project_name))
.to_json(orient="records")
)
points_json = pd.DataFrame(
self.get_world_points(), columns=["x", "y", "z", "i", "d", "t"]
).to_json(double_precision=4, orient="records", date_format=None)
colors_json = pd.Series(self.colors.reshape(-1).astype(np.uint32)).to_json(
orient="values", date_format=None
)
radar_points_json = ujson.dumps(
list(
np.array(
[
{
"position": format_point(row[0]),
"direction": format_point(row[1]),
"size": row[2][0],
}
for row in self.radar_points
]
)
)
)
frame_object = {
"images": "__IMAGES__",
"points": "__POINTS__",
"device_position": format_point(self.transform.position),
"device_heading": format_quaternion(self.transform.quaternion),
}
if len(self.colors) > 0:
frame_object["point_colors"] = "__COLORS__"
if len(self.radar_points) > 0:
frame_object["radar_points"] = "__RADAR_POINTS__"
out = ujson.dumps(frame_object)
out = out.replace('"__IMAGES__"', images_json)
out = out.replace('"__POINTS__"', points_json)
out = out.replace('"__COLORS__"', colors_json)
out = out.replace('"__RADAR_POINTS__"', radar_points_json)
return out
def save(self, path: str, base_url: str = ""):
"""Save frame object in a json file
:param path: Path in which the frame data should be saved
:type path: str
:param base_url: This url will concatenated with the image name, e.g.: `'%s/image-%s-%s.jpg' % (base_url, camera.id, frame.id)`
:type base_url: str
"""
# Save frame
with open(os.path.join(path, "frame-%s.json" % self.id), "w") as file:
file.write(self.to_json(base_url))
# Save images
for camera_id, image in self.images.items():
image.save(os.path.join(path, "image-%s-%s.jpg" %
(camera_id, self.id)))
def s3_upload(self, bucket: str, path: str):
"""Save frame in S3
:param bucket: S3 Bucket name
:type bucket: str
:param path: Path to store data
:type key: str
"""
# print(f'Uploading frame {self.id}...')
base_url = f"s3://{bucket}/{path}"
# Upload frame json file
s3_smart_upload(
fileobj=BytesIO(bytes(self.to_json(base_url), encoding="utf-8")),
bucket=bucket,
key=f"{path}/frame-{self.id}.json",
content_type="application/json",
)
# Upload images
for camera_id, image in self.images.items():
image.s3_upload(bucket, f"{path}/image-{camera_id}-{self.id}.jpg")
def scale_file_upload(self, project_name: str):
"""Save frame in Scale File
:param project_name: File project name
:type bucket: str
"""
fileobj = BytesIO(
bytes(
self.to_json(s3_upload=False, project_name=project_name),
encoding="utf-8",
)
)
return scale_file_upload(fileobj, project_name)
def downsample_frame(self, voxel_size_mm=250):
pcd = o3d.geometry.PointCloud()
normals = np.zeros((self.points.shape[0], 3), dtype=float)
normals[:, 0] = self.points[:, 3]
pcd.points = o3d.utility.Vector3dVector(self.points[:, :3])
pcd.normals = o3d.utility.Vector3dVector(normals)
pcd.colors = o3d.utility.Vector3dVector(self.colors)
pcd = pcd.voxel_down_sample(voxel_size_mm / 1000)
positions, intensities = np.asarray(
pcd.points), np.asarray(pcd.normals)[:, 0]
points = np.hstack(
[positions, intensities.reshape(intensities.shape[0], 1)])
self.points = np.hstack(
[points, np.zeros((points.shape[0], 1), dtype=float)])
self.colors = np.asarray(pcd.colors)
def __repr__(self):
return "Frame({0}) {1}".format(self.id, self.transform)
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/frame.py
| 0.875548 | 0.476762 |
frame.py
|
pypi
|
import numpy as np
import transforms3d as t3d
from pyquaternion import Quaternion
class Transform:
"""Transform object represent a rigid transformation matrix (rotation and translation).
Transform is a 4x4 matrix, although it could be instance using (16,1), (3,4), (3,3) or (3,1) matrixes.
**Note**: not all the methods from Transform will work using scaled/small matrixes
.. highlight:: python
.. code-block:: python
[
[ r00, r01, r02, t0],
[ r10, r11, r12, t1],
[ r20, r21, r22, t2],
[ 0, 0, 0, 1]
]
"""
def __init__(self, value=None):
self.matrix = np.eye(4)
if isinstance(value, Transform):
self.matrix = np.array(value)
elif isinstance(value, Quaternion):
self.rotation = value.rotation_matrix
elif value is not None:
value = np.array(value)
if value.shape == (4, 4):
self.matrix = value
if value.shape == (16,):
self.matrix = np.array(value).reshape((4, 4))
elif value.shape == (3, 4):
self.matrix[:3, :4] = value
elif value.shape == (3, 3):
self.rotation = value
elif value.shape == (3,):
self.translation = value
@staticmethod
def from_Rt(R, t):
"""Create a transform based on a rotation and a translation components.
:param R: Rotation matrix or quaternion.
:type R: Quaternion, list
:param t: Translation component
:type t: list
:returns: Transform created based on the components
:rtype: Transform
"""
if isinstance(R, Quaternion):
R = R.rotation_matrix
return Transform(np.block([[R, np.mat(t).T], [np.zeros(3), 1]]))
@staticmethod
def from_euler(angles, axes="sxyz", degrees=False):
"""Create a transform from euler angles
:param angles: Values of the rotation per axis
:type angles: list
:param axes: Order of the axis (default ``sxyz``)
:type axes: str
:param degrees: Use degrees or radians values (default ``False`` = radians)
:type degrees: boolean
:returns: Transform created from euler angles
:rtype: Transform
"""
if degrees:
angles = np.deg2rad(angles)
return Transform(t3d.euler.euler2mat(*angles, axes=axes))
@staticmethod
def from_transformed_points(A, B):
"""Create a transform from two points
:param A: Point A (x,y,z)
:type A: list
:param B: Point B (x,y,z)
:type B: list
:returns: Transform created from the angles
:rtype: Transform
"""
assert A.shape == B.shape
mean_A = np.mean(A, axis=0)
mean_B = np.mean(B, axis=0)
centroid_A = A - mean_A
centroid_B = B - mean_B
C = centroid_A.T @ centroid_B
V, S, W = np.linalg.svd(C)
if (np.linalg.det(V) * np.linalg.det(W)) < 0.0:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
R = V @ W
t = mean_B - mean_A @ R
return Transform.from_Rt(R.T, t)
@staticmethod
def random():
"""Create a transform from random rotation and translation
:returns: Transform created based on the angles
:rtype: Transform
"""
return Transform.from_Rt(Quaternion.random(), np.random.rand(3))
@property
def rotation(self):
"""Transform rotation
:getter: Return transform's rotation
:setter: Set transform rotation, could use a 3x3 matrix or a Quaternion
:type: 3x3 matrix
"""
return self.matrix[:3, :3]
@property
def quaternion(self):
"""Transform rotation as quaternion
:getter: Return transform's rotation as quaternion
:type: Quaternion
"""
return Quaternion(t3d.quaternions.mat2quat(self.matrix[:3, :3]))
@rotation.setter
def rotation(self, rotation):
if isinstance(rotation, Quaternion):
rotation = rotation.rotation_matrix
self.matrix[:3, :3] = rotation
@property
def position(self):
"""Transform position/translation
:getter: Return transform's position
:setter: Set transform's position list(3x1)
:type: list
"""
return self.matrix[:3, 3].flatten()
@position.setter
def position(self, position):
self.matrix[:3, 3] = np.array(position).reshape(3)
@property
def translation(self):
"""Transform position/translation
:getter: Return transform's position
:setter: Set transform's position list(3x1)
:type: list
"""
return self.matrix[:3, 3].flatten()
@translation.setter
def translation(self, translation):
self.matrix[:3, 3] = np.array(translation).reshape(3)
@property
def euler_angles(self, axes="sxyz"):
"""Transform rotation in euler angles
:getter: Return transform's rotaiton in euler angles
:type: list
"""
return t3d.euler.mat2euler(self.matrix, axes=axes)
@property
def euler_degrees(self, axes="sxyz"):
"""Transform rotation in euler degrees
:getter: Return transform's rotaiton in euler degrees
:type: list
"""
return np.rad2deg(t3d.euler.mat2euler(self.matrix, axes=axes))
@property
def T(self):
"""Transpose of the transform
:returns: Transpose of the transform
:rtype: Transform
"""
try:
return Transform(self.matrix.T)
except ValueError:
print("Can not transpose the Transform matrix")
@property
def inverse(self):
"""Inverse of the transform
:returns: Inverse of the transform
:rtype: Transform
"""
try:
return Transform.from_Rt(
self.rotation.T, np.dot(-self.rotation.T, self.translation)
)
except ValueError:
print("Can not inverse the Transform matrix")
def apply(self, points):
"""Apply transform to a list of points
:param points: List of points (N,3) or (N,4)
:returns: List of points witht the transform applied
:rtype: list
"""
points_4d = np.hstack([points[:, :3], np.ones((points.shape[0], 1))])
transformed_4d = points_4d.dot(self.matrix.T)
return np.hstack([transformed_4d[:, :3], points[:, 3:]])
def interpolate(self, other, factor):
"""Interpotation of the transform
:param other: Transform to interpolate with
:type other: Transform
:param factor: Factor of interpolation
:type factor: float between 0 and 1
:returns: Transform resulted from the interpolation
:rtype: Transform
"""
assert 0 <= factor <= 1.0
other = Transform(other)
return self.from_Rt(
Quaternion.slerp(self.quaternion, other.quaternion, factor),
self.position + factor * (other.position - self.position),
)
def __array__(self):
return self.matrix
def __getitem__(self, values):
return self.matrix.__getitem__(values)
def __add__(self, other):
return Transform(other) @ self
def __matmul__(self, other):
if isinstance(other, np.ndarray):
return self.apply(other)
return Transform(self.matrix @ Transform(other).matrix)
def __eq__(self, other):
return np.allclose(self.matrix, other.matrix)
def __repr__(self):
return "R=%s t=%s" % (
np.array_str(self.euler_degrees, precision=3, suppress_small=True),
np.array_str(self.position, precision=3, suppress_small=True),
)
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/transform.py
| 0.951897 | 0.865281 |
transform.py
|
pypi
|
import numpy as np
import requests
from scaleapi.tasks import Task, TaskType
import ujson
from .scene import LidarScene
from .helper import parse_xyz, get_api_client, get_default_template
class LidarAnnotationTask(Task):
"""Lidar annotation Task object"""
scene: LidarScene = None
def __init__(self, param_dict, client):
super(LidarAnnotationTask, self).__init__(param_dict, client)
@staticmethod
def from_scene(scene: LidarScene, template=None, client=None):
"""Load scene data and convert it into a LidarAnnotation Task format
:param scene: Scene to load
:type scene: LidarScene
:param template: Template/payload to use get fetch the Scale API
:type template: dict
:param client: ScaleClient object, by default it will load your SCALE_API_KEY from you env vars and set a client automatically.
:type client: scaleapi.ScaleClient
:returns: LidarAnnotationTask object
:rtype: LidarAnnotationTask
"""
if not scene.scale_file_attachments: # s3 upload
assert (
scene.base_url
), "No public URL on scene, please upload or save the scene first"
# Get client using environ api key
if client is None:
client = get_api_client()
param_dict = get_default_template()
# Load scene params
if not scene.scale_file_attachments: # s3 upload
scene_dict = scene.to_dict(scene.base_url)
param_dict["attachments"] = scene_dict["frames"]
else: # scale file upload
param_dict["attachments"] = scene.scale_file_attachments
param_dict["attachment_type"] = "json"
if isinstance(template, dict):
param_dict.update(template)
elif isinstance(template, str):
param_dict.update(ujson.load(open(template)))
elif template is not None:
raise AttributeError("Template error")
return LidarAnnotationTask(param_dict, client)
@staticmethod
def from_id(task_id: str):
"""Get LidarAnnotation task from a task id
:param task_id: Task id
:type task_id: str
:returns: LidarAnnotationTask object created based on the task id data
:rtype: LidarAnnotationTask
"""
task = get_api_client().fetch_task(task_id)
return LidarAnnotationTask(task.param_dict, task.client)
def get_annotations(self):
"""Get annotations/response from a completed LidarAnnotation task
:returns: Annotations
:rtype: dict
"""
assert "response" in self.param_dict, "Task without response"
url = self.param_dict["response"]["annotations"]["url"]
response = requests.get(url)
return ujson.loads(response.text)
def get_cuboid_positions_by_frame(self):
"""Get a list of each cuboid position in each frames (from a completed task)
:returns: List of cuboids positions
:rtype: list
"""
annotations = self.get_annotations()
return np.array(
[
np.array(
[parse_xyz(p) for p in [c["position"] for c in frame["cuboids"]]]
)
for frame in annotations
]
)
def publish(
self, task_type: TaskType = TaskType.LidarAnnotation, verbose: bool = True
):
"""Publish/create a task, request Scale API with the LidarAnnotation data
:param task_type: Task type to create, default ``lidarannotation``
:rtype task_type: scaleapi.tasks.TaskType
:returns: Task object creation from the response of the API call
:rtype: scaleapi.tasks.Task
"""
task = self._client.create_task(task_type, **self.as_dict())
if verbose:
print("Task created: %s" % task)
return task
class LidarTopDownTask(Task):
"""Lidar top-down Task object"""
scene: LidarScene = None
def __init__(self, param_dict, client):
super(LidarTopDownTask, self).__init__(param_dict, client)
@staticmethod
def from_scene(scene: LidarScene, template=None, client=None):
"""Load scene data and convert it into a LidarTopDown Task format
:param scene: Scene to load
:type scene: LidarScene
:param template: Template/payload to use get fetch the Scale API
:type template: dict
:param client: ScaleClient object, by default it will load your SCALE_API_KEY from you env vars and set a client automatically.
:type client: scaleapi.ScaleClient
:returns: LidarTopDownTask object
:rtype: LidarTopDownTask
"""
assert (
scene.base_url
), "No public URL on scene, please upload or save the scene first"
# Get client using environ api key
if client is None:
client = get_api_client()
param_dict = get_default_template()
# Load scene params
if not scene.scale_file_attachments: # s3 upload
scene_dict = scene.to_dict(scene.base_url)
param_dict["attachments"] = scene_dict["frames"]
else: # scale file upload
param_dict["attachments"] = scene.scale_file_attachments
param_dict["attachment_type"] = "json"
if isinstance(template, dict):
param_dict.update(template)
elif isinstance(template, str):
param_dict.update(ujson.load(open(template)))
elif template is not None:
raise AttributeError("Template error")
return LidarTopDownTask(param_dict, client)
@staticmethod
def from_id(task_id: str):
"""Get LidarTopDown task from a task id
:param task_id: Task id
:type task_id: str
:returns: LidarTopDownTask object created based on the task id data
:rtype: LidarTopDownTask
"""
task = get_api_client().fetch_task(task_id)
return LidarTopDownTask(task.param_dict, task.client)
def publish(
self, task_type: TaskType = TaskType.LidarTopdown, verbose: bool = True
):
"""Publish/create a task, request Scale API with the LidarTopDown data
:param task_type: Task type to create, default ``lidartopdown``
:rtype task_type: scaleapi.tasks.TaskType
:returns: Task object creation from the response of the API call
:rtype: scaleapi.tasks.Task
"""
task = self._client.create_task(task_type, **self.as_dict())
if verbose:
print("Task created: %s" % task)
return task
class LidarSegmentationTask(Task):
"""Lidar segmentation Task object"""
scene: LidarScene = None
def __init__(self, param_dict, client):
super(LidarSegmentationTask, self).__init__(param_dict, client)
@staticmethod
def from_scene(scene: LidarScene, template=None, client=None):
"""Load scene data and convert it into a LidarSegmentation Task format
:param scene: Scene to load
:type scene: LidarScene
:param template: Template/payload to use get fetch the Scale API
:type template: dict
:param client: ScaleClient object, by default it will load your SCALE_API_KEY from you env vars and set a client automatically.
:type client: scaleapi.ScaleClient
:returns: LidarSegmentationTask object
:rtype: LidarSegmentationTask
"""
assert (
scene.base_url
), "No public URL on scene, please upload or save the scene first"
# Get client using environ api key
if client is None:
client = get_api_client()
param_dict = get_default_template()
# Load scene params
if not scene.scale_file_attachments: # s3 upload
scene_dict = scene.to_dict(scene.base_url)
param_dict["attachments"] = scene_dict["frames"]
else: # scale file upload
param_dict["attachments"] = scene.scale_file_attachments
param_dict["attachment_type"] = "json"
if isinstance(template, dict):
param_dict.update(template)
elif isinstance(template, str):
param_dict.update(ujson.load(open(template)))
elif template is not None:
raise AttributeError("Template error")
return LidarSegmentationTask(param_dict, client)
@staticmethod
def from_id(task_id: str):
"""Get LidarSegmentation task from a task id
:param task_id: Task id
:type task_id: str
:returns: LidarSegmentationTask object created based on the task id data
:rtype: LidarSegmentationTask
"""
task = get_api_client().fetch_task(task_id)
return LidarSegmentationTask(task.param_dict, task.client)
def publish(
self, task_type: TaskType = TaskType.LidarSegmentation, verbose: bool = True
):
"""Publish/create a task, request Scale API with the LidarSegmentation data
:param task_type: Task type to create, default ``lidarsegmentation``
:rtype task_type: scaleapi.tasks.TaskType
:returns: Task object creation from the response of the API call
:rtype: scaleapi.tasks.Task
"""
task = self._client.create_task(task_type, **self.as_dict())
if verbose:
print("Task created: %s" % task)
return task
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/task.py
| 0.744192 | 0.262704 |
task.py
|
pypi
|
from collections import defaultdict
import numpy as np
from pyquaternion import Quaternion
from typing import List
from google.protobuf.json_format import MessageToDict
from .lidar_frame_1_pb2 import CameraImage, LidarFrame
from .transform import Transform
# Protobuf Helpers
def create_scene_from_protobufs(scene, protobufs: List[str]):
"""
Create a LidarScene object from a list of protobuf files
Args:
scene: LidarScene object
protobufs: List of filepaths to the protobuf files
Returns: The updated LidarScene
"""
for frame_num, protobuf in enumerate(protobufs):
with open(protobuf, "rb") as f:
frame = LidarFrame.FromString(f.read())
pose = Transform.from_Rt(
R=Quaternion(
frame.device_heading.w,
frame.device_heading.x,
frame.device_heading.y,
frame.device_heading.z,
),
t=[
frame.device_position.x,
frame.device_position.y,
frame.device_position.z,
],
)
# Group points by device ID
points_by_sensor = defaultdict(list)
for point in frame.points:
sensor_id = getattr(point, "d", 0)
points_by_sensor[sensor_id].append(point)
for sensor_id, lidar_points in points_by_sensor.items():
points = np.asarray([[p.x, p.y, p.z, p.i] for p in lidar_points])
scene.get_frame(frame_num).add_points(points, sensor_id=sensor_id)
scene.get_frame(frame_num).apply_transform(pose)
for camera in frame.images:
camera_num = camera.camera_index
image_url = camera.image_url
# Calibrate cameras once
if frame_num == 0:
calibrate_camera(scene, camera)
scene.get_frame(frame_num).get_image(camera_num).load_file(image_url)
return scene
def calibrate_camera(scene, camera: CameraImage):
camera_pose = Transform.from_Rt(
R=Quaternion(
camera.heading.w,
camera.heading.x,
camera.heading.y,
camera.heading.z,
),
t=[camera.position.x, camera.position.y, camera.position.z],
)
distortion_model = camera.WhichOneof("camera_intrinsics")
intrinsics = getattr(camera, distortion_model)
# Protobuf supports xi for omnidirectional but not supported by calibrate
if hasattr(intrinsics, "xi"):
print('NOTE: For omnnidirectional intrinsics, xi is not supported.')
intrinsics_params = MessageToDict(intrinsics)
scene.get_camera(camera.camera_index).calibrate(
pose=camera_pose, **intrinsics_params
)
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/protobuf_helper.py
| 0.842831 | 0.300803 |
protobuf_helper.py
|
pypi
|
import shutil
import numpy as np
import tempfile
from PIL import Image, ImageEnhance
from .helper import s3_smart_upload, scale_file_upload
class LidarImage:
"""LidarImage objects represent an image with a LidarCamera reference.
LidarImage properties:
- camera: Camera id
- image_path: Image path
- transform: Transformation apply to LidarImage (will be used as: LidarImage.transform or LidarFrame.transform) @ camera.pose)
- metadata: Metadata related to the image
- timestamp: Timestamp
"""
def __init__(self, camera):
self.camera = camera
self.image_path = None
self.transform = None
self.metadata = None
self.timestamp = None
# Legacy method
def load_file(self, file: str):
"""Set LidarImage image_path
(**Legacy method**)
:param file: Set image path
:type file: str
"""
if not isinstance(file, str):
print("WARNING: No file!")
self.image_path = file
def save_pil_image(self, pil_image: Image.Image):
"""Save image in image_path
:param pil_image: Image to save
:type pil_image: PIL.Image
"""
self.image_path = tempfile.mktemp(suffix="jpg")
pil_image = pil_image.convert("RGB")
pil_image.save(self.image_path, format="JPEG", quality=70, optimize=True)
print(f"Temp file created: {self.image_path}")
def get_image(self) -> Image:
"""Open LidarImage
:return: Image.open
"""
return Image.open(self.image_path)
def as_array(self) -> np.asarray:
"""Get the image as numpy array
:returns: image as numpy array
:rtype: np.asarray
"""
return np.asarray(self.get_image())
def set_scale(self, scale_factor: float):
"""Change image scale and save in image_path
:param scale_factor: Scale factor
:type scale_factor: float
"""
im = self.get_image()
size = (int(im.width * scale_factor), int(im.height * scale_factor))
self.save_pil_image(im.resize(size, Image.LANCZOS))
def set_brightness(self, factor: float):
"""Change image brightness and save in image_path
(will use PIL.ImageEnhance.Brightness)
:param factor: Brightness factor
:type scale_factor: float
"""
im = ImageEnhance.Brightness(self.get_image()).enhance(factor)
self.save_pil_image(im)
def save(self, target_file: str):
"""Save image in target_file path
:param target_file: Path in which the image should be saved
:type target_file: str
"""
if not isinstance(target_file, str):
print("WARNING: No file path!")
shutil.copyfile(self.image_path, target_file)
def s3_upload(self, bucket: str, key: str):
"""Save image in S3
:param bucket: S3 Bucket name
:type bucket: str
:param key: file name
:type key: str
"""
with open(self.image_path, "rb") as fp:
s3_smart_upload(
bucket=bucket, key=key, fileobj=fp, content_type="image/jpeg"
)
def scale_file_upload(self, project_name: str):
"""Save image in Scale File
:param project_name: File project name
:type bucket: str
"""
with open(self.image_path, "rb") as fp:
return scale_file_upload(fp, project_name)
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/image.py
| 0.793986 | 0.352982 |
image.py
|
pypi
|
from .scene import LidarScene
from .transform import Transform
from .frame import LidarFrame
from .camera import LidarCamera
from .image import LidarImage
from .helper import (
s3_smart_upload,
format_lidar_point,
format_point,
format_quaternion,
scale_file_upload,
get_signed_url
)
from io import BytesIO
from functools import partial
from multiprocessing.pool import ThreadPool
from typing import MutableMapping, List, Dict
from urllib.parse import urlparse
from typing import Any
import pandas as pd
import numpy as np
import ujson
import nucleus
UPLOAD_POOL_SIZE = 8
#Formatting Helpers
def get_image_url_path(path, camera_id, frame_id):
return f"{path}/image-{camera_id}-{frame_id}.jpg"
def get_image_ref_id(ref_id_prefix, camera_id, frame_id):
return f"{ref_id_prefix}-{camera_id}-image-{frame_id}"
def get_pointcloud_ref_id(ref_id_prefix, frame_id):
return f"{ref_id_prefix}-pointcloud-{frame_id}"
def get_scene_ref_id(ref_id_prefix):
return f"{ref_id_prefix}-scene"
class Cuboid():
'''Work in progress class - need to fill out'''
def __init__(self, position: np.ndarray, yaw: float):
self.position: np.ndarray = np.zeroes((0,3), dtype = float)
self.yaw: float = None
class NucleusLidarFrame(LidarFrame):
'''Overloaded Nucleus Frame class
Annotation and predictions are next up for implementation
'''
def __init__(self, frame_id, cameras):
self.id = frame_id
self.cameras: pd.Series[Any, LidarCamera] = cameras
self.images: pd.Series[Any, LidarImage] = pd.Series(dtype=object)
self.points: np.ndarray = np.zeros((0, 5), dtype=float)
self.pointcloud_metadata: dict = {}
self.radar_points: np.ndarray = np.zeros((0, 3), dtype=float)
self.colors: np.ndarray = np.zeros((0, 3), dtype=float)
self.transform = Transform()
self.annotations: pd.Series[Any,Cuboid] = pd.Series(dtype=object)
self.predictions: pd.Series[Any,Cuboid] = pd.Series(dtype=object)
def add_points(
self, points: np.array, transform: Transform = None, metadata: dict = None, intensity=1, sensor_id=0,
):
"""Add points to the frame, structure: np.array with dimension 1 and shape (N,3) or (N,4) (N being the number of point in the frame)
Points with intensity:
.. highlight:: python
.. code-block:: python
points = np.array([
[0.30694541, 0.27853175, 0.51152715, 0.4],
[0.80424087, 0.24164057, 0.45256181, 1],
...
])
Points without intensity:
.. highlight:: python
.. code-block:: python
points = np.array([
[0.30694541, 0.27853175, 0.51152715],
[0.80424087, 0.24164057, 0.45256181],
...
])
:param points: List of points
:type points: np.array
:param transform: Transform that should be applied to the points
:type transform: Transform
:param metadata: Any pointcloud metadata to be associated with dataset item
:type metadata: dict
:param intensity: If the points list doesn't include intensity, this value will be used as intensity for all the points (default ``1``)
:type intensity: int
:param sensor_id: Sensor id, used in case that you have more than one lidar sensor. (Default ``0``)
:type sensor_id: int
"""
if points.ndim == 1:
points = np.array([points])
if points.shape[1] == 3:
points = np.hstack([points, np.ones((points.shape[0], 1)) * intensity])
if transform is not None:
points = transform.apply(points)
points = np.hstack([points, np.ones((points.shape[0], 1)) * sensor_id])
self.points = np.vstack([self.points, points])
self.pointcloud_metadata = metadata
def to_json(
self,
base_url: str = "",
s3_upload: bool = True,
project_name: str = ""
):
"""Returns pointcloud json
:param base_url: This url will concatenated with the frame name
:type base_url: str
:returns: Frame object as a JSON formatted stream
:rtype: str
"""
points_json = pd.DataFrame(
self.get_world_points(), columns=["x", "y", "z", "i", "d"]
).to_json(double_precision=4, orient="records", date_format=None)
frame_object = {
"points": "__POINTS__",
"device_position": format_point(self.transform.position),
"device_heading": format_quaternion(self.transform.quaternion),
}
out = ujson.dumps(frame_object)
out = out.replace('"__POINTS__"', points_json)
return out
def s3_upload(self, bucket: str, path: str):
"""Save frame in S3
:param bucket: S3 Bucket name
:type bucket: str
:param path: Path to store data
:type key: str
"""
# print(f'Uploading frame {self.id}...')
base_url = f"s3://{bucket}/{path}"
# Upload frame json file
s3_smart_upload(
fileobj=BytesIO(
bytes(self.to_json(base_url), encoding="utf-8")
),
bucket=bucket,
key=f"{path}/frame-{self.id}.json",
content_type="application/json",
)
# Upload images
for camera_id, image in self.images.items():
image.s3_upload(bucket, f"{path}/image-{camera_id}-{self.id}.jpg")
def generate_cam_nucleus_dataset_items(
self,
scene_dict: dict,
ref_id_prefix: str,
presigned_items: bool = False
):
"""Generates all necessary camera dataset items for corresponding LidarFrame
:param scene_dict: Mapping from frame and camera images to URL
:type scene_dict: str
:param ref_id_prefix: String insert at beginning of automatically generated ref-id, required
:type ref_id_prefix: str
:param presigned_items: Presigns all URLs via S3
:type presigned_items: str
:returns: Dictionary of Nucleus image dataset items associated with camera ID
:rtype: Dict
"""
assert ref_id_prefix is not "", "Please set a Reference ID prefix to ensure reference idempotency."
def generate_camera_params(self, camera):
"""Generates camera specific metadata for nucleus dataset item"""
wct = self.transform @ camera.pose
heading = format_quaternion(wct.quaternion)
position = format_point(wct.translation)
camParams = {
"cx": float(camera.cx),
"cy": float(camera.cy),
"fx": float(camera.fx),
"fy": float(camera.fy),
"k1": float(camera.D[0]),
"k2": float(camera.D[1]),
"p1": float(camera.D[2]),
"p2": float(camera.D[3]),
"k3": float(camera.D[4]),
"k4": float(camera.D[5]),
"k5": float(camera.D[6]) if len(camera.D) >= 7 else 0,
"k6": float(camera.D[7]) if len(camera.D) >= 8 else 0,
"heading": heading,
"position": position,
"camera_model": camera.model,
}
return camParams
nucleus_camera_items = {}
for camera in self.cameras:
camera_params = generate_camera_params(self, camera)
image_location = scene_dict["cameras"][camera.id][self.id]
if presigned_items:
image_location = get_signed_url(
bucket=urlparse(image_location).netloc,
path=urlparse(image_location).path[1:],
)
item_metadata = {"camera_params": camera_params}
if self.images[camera.id].metadata:
item_metadata = dict(self.images[camera.id].metadata, **item_metadata)
item = nucleus.DatasetItem(
image_location=image_location,
reference_id=get_image_ref_id(ref_id_prefix,camera.id,self.id),
metadata=item_metadata,
)
nucleus_camera_items[str(camera.id)] = item
return nucleus_camera_items
class NucleusLidarScene(LidarScene):
'''Overloaded Nucleus scene'''
def __init__(self):
"""
:rtype: object
"""
self.cameras: MutableMapping[LidarCamera] = pd.Series()
self.frames: MutableMapping[NucleusLidarFrame] = pd.Series()
self.base_url = None
self.scale_file_attachments = None
self.ref_id_prefix = ""
def from_LidarScene(self, LidarScene):
self.cameras = LidarScene.cameras
for frame_idx, LidarFrame in enumerate(LidarScene.frames):
self.get_frame(frame_idx).points = LidarFrame.points
self.get_frame(frame_idx).images = LidarFrame.images
self.get_frame(frame_idx).transform = LidarFrame.transform
return self
def set_ref_id_prefix(self, ref_id_prefix: str):
self.ref_id_prefix = ref_id_prefix
def to_dict(self, base_url: str = None) -> dict:
"""Return a dictionary with the frame urls using the base_url as base.
:param base_url: This url will be concatenated with the frames name, e.g.: `'%s/frame-%s.json' % (base_url, frame.id)`
:type base_url: str
:return: Dictionary with the frame urls data
:rtype: dict
"""
if base_url is None:
base_url = self.base_url
cameras = {}
for camera in self.cameras:
cameras[camera.id] = [
"%s/image-%s-%s.jpg" % (base_url, camera.id, frame.id)
for frame in self.frames
]
return dict(
frames=["%s/frame-%s.json" % (base_url, frame.id) for frame in self.frames],
cameras=cameras,
)
def get_frame(self, frame_id=None, index: int = None) -> NucleusLidarFrame:
"""Get a frame by id (or index) or create one if it does not exist
:param frame_id: The frame id
:type frame_id: str, int
:param index: The frame index
:type index: int
:return: NucleusLidarFrame
:rtype: NucleusLidarFrame
"""
assert (
frame_id is not None or index is not None
), "id or index must be specified"
if frame_id is None:
frame_id = self.frames.index[index]
if frame_id not in self.frames:
if isinstance(frame_id, int):
self.frames.index = self.frames.index.astype(int)
self.frames[frame_id] = NucleusLidarFrame(frame_id, cameras=self.cameras)
return self.frames[frame_id]
def s3_upload(
self,
bucket: str,
path=None,
mock_upload: float = False,
use_threads: float = True
):
"""Overloaded S3 upload function
:param bucket: S3 Bucket name
:type bucket: str
:param path: Path to store data
:type key: str
:param mock_upload: To avoid upload the data to S3 (defualt ``False``)
:type mock_upload: float
:param use_threads: In order to upload multiple files at the same time using threads (defualt ``True``)
:type use_threads: float
:return: Scene S3 url
:rtype: str
"""
self.base_url = f"s3://{bucket}/{path}"
print("Uploading scene to S3: %s" % self.base_url)
scene_dict = self.to_dict(self.base_url)
poses_csv = pd.DataFrame(
self.frames.map(lambda f: list(f.transform.matrix.reshape(-1))).to_dict()
).T.to_csv(header=False)
if not mock_upload:
# Upload scene json file
s3_smart_upload(
bucket=bucket,
key=f"{path}/scene.json",
fileobj=BytesIO(bytes(ujson.dumps(scene_dict), encoding="utf-8")),
content_type="application/json",
)
# Upload ego2world csv file
s3_smart_upload(
bucket=bucket,
key=f"{path}/ego2world.csv",
fileobj=BytesIO(bytes(poses_csv, encoding="utf-8")),
content_type="text/plain",
)
if use_threads:
p = ThreadPool(processes=UPLOAD_POOL_SIZE)
func = partial(
NucleusLidarFrame.s3_upload,
bucket=bucket,
path=path
)
p.map(func, self.frames)
else:
for frame in self.frames:
frame.s3_upload(bucket, path)
signed_url = get_signed_url(bucket, f"{path}/scene.json")
print(f"Scene uploaded: {signed_url}")
return self.base_url
def generate_nucleus_scene(
self,
ref_id_prefix: str = "",
presigned_items: bool = False
) -> nucleus.LidarScene:
"""Generates the Nucleus Scene object that can be asynchronously uploaded to the platform
:param ref_id_prefix: a prefix that can be added to reference ID of the scene and dataset items to ensure unique values, if not already existent
:type ref_id_prefix: string
:param presigned_items: Dictates that all items involved in Nucleus scene are presigned via S3
:type presigned_items: bool
:returns: the fully constructed Nucleus LidarScene
:type LidarScene: nucleus.LidarScene
"""
if ref_id_prefix is not "":
self.ref_id_prefix = ref_id_prefix
assert self.ref_id_prefix is not "", "Please set a Reference ID prefix to ensure reference idempotency."
scene_dict = self.to_dict()
nucleus_frames = []
for frame_idx, frame in enumerate(self.frames):
cam_items = frame.generate_cam_nucleus_dataset_items(
scene_dict=self.to_dict(),
ref_id_prefix=self.ref_id_prefix,
presigned_items=presigned_items,
)
pointcloud_location = scene_dict["frames"][frame_idx]
if presigned_items:
pointcloud_location = get_signed_url(
bucket=urlparse(pointcloud_location).netloc,
path=urlparse(pointcloud_location).path[1:],
)
nucleus_frame = nucleus.Frame(
lidar=nucleus.DatasetItem(
pointcloud_location=pointcloud_location,
metadata=frame.pointcloud_metadata,
reference_id=get_pointcloud_ref_id(self.ref_id_prefix, frame_idx)
),
**cam_items,
)
nucleus_frames.append(nucleus_frame)
return nucleus.LidarScene(
reference_id=get_scene_ref_id(self.ref_id_prefix), frames=nucleus_frames
)
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/nucleus_scene.py
| 0.838151 | 0.265113 |
nucleus_scene.py
|
pypi
|
import random
import cv2
import numpy as np
from PIL import Image, ImageDraw
from .color_utils import map_colors
from .transform import Transform
class LidarCamera:
"""Camera object that contains all the camera information
Camera properties:
- id = camera id/Name/Identifier, type: int, str
- pose: Camera pose/extrinsic, type: Transform
- world_poses: World poses, this will make the camera ignore the frame poses, type: list(Transform)
- K: Intrinsic matrix
- D: Camera distortion coefficients [k1,k2,p1,p2,k3,k4,k5,k6,lx,ly,xi], default all set to ``0``
- model: Camera model, default ``brown_conrady``
- scale_factor: Camera scale factor, default ``1``
- skew: Camera scale factor, default ``0``
Usefull extra documentation to understand better how this object works:
https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html
"""
world2cam = Transform.from_euler([-90, 0, -90], degrees=True)
def __init__(self, camera_id):
self.id = camera_id
self.pose = Transform(self.world2cam)
self.world_poses = None
self.K = np.eye(3, dtype=np.float32)
self.D = np.zeros(7, dtype=np.float32)
self.model = "brown_conrady"
self.scale_factor = 1
self.skew = 0
@property
def position(self) -> np.ndarray:
"""Camera position
:getter: Return camera's position
:setter: Set camera's position
:type: list(x,y,z)
"""
return self.pose.position
@property
def rotation(self) -> np.ndarray:
"""Camera rotation/heading
:getter: Return camera's rotation
:setter: Set camera's rotation
:type: 3x3 rotation matrix
"""
return self.pose.rotation
@property
def world_transform(self) -> Transform:
"""World transform/pose (to avoid frame pose)
:getter: pose @ world2cam.T
:setter: pose = transform @ world2cam
:type: Transform
"""
return self.pose @ self.world2cam.T
@property
def fx(self):
"""Camera X focal length
:getter: Return camera's X focal length
:type: double
"""
return self.K[0, 0]
@property
def fy(self):
"""Camera Y focal length
:getter: Return camera's Y focal length
:type: double
"""
return self.K[1, 1]
@property
def cx(self):
"""Camera X center point
:getter: Return camera's X center point
:type: double
"""
return self.K[0, 2]
@property
def cy(self):
"""Camera Y center point
:getter: Return camera's Y center point
:type: double
"""
return self.K[1, 2]
@property
def intrinsic_matrix(self):
"""Camera intrinsic/K
:getter: Return camera's intrinsic matrix
:type: 3x3 matrix
"""
return self.K
@property
def extrinsic_matrix(self):
"""Camera extrinsic
:getter: Return camera's extrinsic matrix (pose.inverse[:3, :4])
:setter: pose = Transform(matrix).inverse
:type: 3x4 matrix
"""
return self.pose.inverse[:3, :4]
@property
def projection_matrix(self):
"""Projection matrix
:getter: K @ extrinsic_matrix
:setter: K, R, t, _, _, _, _ = cv2.decomposeProjectionMatrix(projection_matrix)
:type: 3x4 projection matrix
"""
return self.K @ self.extrinsic_matrix
@position.setter
def position(self, position: np.ndarray):
self.pose.position = position
@rotation.setter
def rotation(self, rotation):
self.pose.rotation = Transform(rotation).rotation
@world_transform.setter
def world_transform(self, transform: Transform):
self.pose = transform @ self.world2cam
@extrinsic_matrix.setter
def extrinsic_matrix(self, matrix):
self.pose = Transform(matrix).inverse
@projection_matrix.setter
def projection_matrix(self, P):
assert P.shape == (3, 4), "Projection matrix should be 3x4"
K, R, t, _, _, _, _ = cv2.decomposeProjectionMatrix(P)
self.pose = Transform.from_Rt(R.T, t[:3, 0] / t[3, 0])
self.K = K
def calibrate(
self,
position=None,
rotation=None,
pose=None,
extrinsic_matrix=None,
projection_matrix=None,
K=None,
D=None,
model=None,
scale_factor=None,
skew=None,
world_transform=None,
world_poses=None,
**kwargs
):
"""Helper for camera calibration
Args:
position (list(int)): Camera position [x, y, z]
rotation (rotation matrix): Camera rotation/heading
pose (Transform): Camera pose (position + rotation)
extrinsic_matrix (matrix 4x4): Extrinsic 4x4 matrix (world to camera transform) (pose = Transform(matrix).inverse)
projection_matrix (matrix 3x4): 3x4 projection matrix (K, R, t, _, _, _, _ = cv2.decomposeProjectionMatrix(projection_matrix))
K (matrix 3x3): Intrinsic 3x3 matrix
D (list(double)): Distortion values following this order: [k1,k2,p1,p2,k3,k4,k5,k6], required [k1,k2,p1,p2,k3,k4]
model (str): Camera model
scale_factor (int): Image scale_factor
skew (int): Camera skew coefficient
world_transform (Transform): Overwrite camera pose with the world transform (pose = transform @ world2cam)
world_poses (list(Transform)): World poses, this will make the camera ignore the frame poses
Keyword Args:
fx (str): Focal length in X
fy (str): Focal length in Y
cx (str): Center point in X
cy (str): Center point in Y
k1 (double): Radial distortion param k1
k2 (double): Radial distortion param k2
k3 (double): Radial distortion param k3
k4 (double): Radial distortion param k4
k5 (double): Radial distortion param k5
k6 (double): Radial distortion param k6
p1 (double): Tangential distortion param p1
p2 (double): Tangential distortion param p2
lx (double): Decentering distortion param lx
l6 (double): Decentering distortion param ly
xi (double): Mirror param xi
"""
if position is not None:
self.position = position
if rotation is not None:
self.rotation = rotation
if pose is not None:
self.pose = Transform(pose)
if extrinsic_matrix is not None:
self.extrinsic_matrix = extrinsic_matrix
if projection_matrix is not None:
self.projection_matrix = projection_matrix
if K is not None:
self.K = np.array(K[:3, :3])
if D is not None:
assert (
len(D) >= 6
), "Distortion list should have have at least these values [k1,k2,p1,p2,k3,k4]"
self.D = D
if model is not None:
self.model = model
if scale_factor is not None:
self.scale_factor = scale_factor
if skew is not None:
self.skew = skew
if world_transform is not None:
self.world_transform = world_transform
if world_poses is not None:
self.world_poses = world_poses
if "fx" in kwargs:
self.K[0, 0] = kwargs["fx"]
if "fy" in kwargs:
self.K[1, 1] = kwargs["fy"]
if "cx" in kwargs:
self.K[0, 2] = kwargs["cx"]
if "cy" in kwargs:
self.K[1, 2] = kwargs["cy"]
if "k1" in kwargs:
self.D[0] = kwargs["k1"]
if "k2" in kwargs:
self.D[1] = kwargs["k2"]
if "p1" in kwargs:
self.D[2] = kwargs["p1"]
if "p2" in kwargs:
self.D[3] = kwargs["p2"]
if "k3" in kwargs:
self.D[4] = kwargs["k3"]
if "k4" in kwargs:
self.D[5] = kwargs["k4"]
if "k5" in kwargs:
pad_count = 7 - len(self.D)
self.D[6] = kwargs["k5"]
if "k6" in kwargs:
pad_count = 8 - len(self.D)
if pad_count > 0:
self.D = np.lib.pad(self.D, (0, pad_count), "constant", constant_values=(0))
self.D[7] = kwargs["k6"]
if "lx" in kwargs:
pad_count = 9 - len(self.D)
if pad_count > 0:
self.D = np.lib.pad(self.D, (0, pad_count), "constant", constant_values=(0))
self.D[8] = kwargs["lx"]
if "ly" in kwargs:
pad_count = 10 - len(self.D)
if pad_count > 0:
self.D = np.lib.pad(self.D, (0, pad_count), "constant", constant_values=(0))
self.D[9] = kwargs["ly"]
if "xi" in kwargs:
pad_count = 11 - len(self.D)
if pad_count > 0:
self.D = np.lib.pad(self.D, (0, pad_count), "constant", constant_values=(0))
self.D[10] = kwargs["xi"]
def apply_transform(self, transform: Transform):
"""Apply transformation to the camera (transform @ pose)
:param transform: Transform to apply to the object
:type transform: Transform
"""
self.pose = transform @ self.pose
def rotate(self, angles, degrees=True):
"""Rotate the camera,
(pose = Transform.from_euler(angles, degrees=degrees) @ pose)
:param angles: Angles to rotate (x,y,z)
:type angles: list(float)
:param degrees: Use rad or degrees
:type degrees: boolean
"""
self.apply_transform(Transform.from_euler(angles, degrees=degrees))
def translate(self, vector):
"""Move the camera,
(pose = Transform(angles, degrees=degrees) @ pose)
:param vector: [x,y,z]
:type vector: list(float)
"""
self.apply_transform(Transform(vector))
def project_points(self, points: np.ndarray, use_distortion=False):
"""Return array of projected points based on camera calibration values
- When ``use_distortion=True`` it uses: cv.fisheye.projectPoints( objectPoints, rvec, tvec, K, D[, imagePoints[, alpha[, jacobian]]] )
:param points: list of points
:type points: list(float)
:param use_distortion: For fisheye/omni cameras (not necesary for cameras like Brown-Conrady)
:type use_distortion: boolean
"""
projected = Transform(self.projection_matrix) @ points[:, :3]
# projected = ((points[:, :3] - self.position) @ self.rotation) @ self.intrinsic_matrix[:3, :3].T
projected[:, 0] /= np.where(projected[:, 2] == 0, np.inf, projected[:, 2])
projected[:, 1] /= np.where(projected[:, 2] == 0, np.inf, projected[:, 2])
if use_distortion:
projected[:, :2] = cv2.fisheye.projectPoints(
objectPoints=np.array([points[:, :3]], dtype=np.float32),
rvec=cv2.Rodrigues(self.extrinsic_matrix[:3, :3])[0],
tvec=self.extrinsic_matrix[:3, 3],
D=np.array(
[self.D[0], self.D[1], self.D[4], self.D[5]], dtype=np.float32
),
K=np.array(self.K, dtype=np.float32),
alpha=self.skew,
)[0].reshape((-1, 2))
return np.hstack([projected[:, :3], points[:, 3:]])
def get_projected_image(
self, image, points, frame_transform, color_mode="default", oversample=3
):
"""Return image with points projected onto it
:param image: Camera image
:type image: PIL.Image
:param points: list of points/pointcloud
:type points: list(float)
:param frame_transform: Frame transform/pose
:type frame_transform: Transform
:param color_mode: Color mode, default ``default``, modes are: 'depth', 'intensity' and 'default'
:type color_mode: str
:param oversample: Padding on projected points, this is used to project points outside the image, it's useful for debugging, default ``3`` = 3 times the image size
:type oversample: int
:returns: Image with points projected
:rtype: PIL.Image
"""
assert image, "No image loaded."
def crop_points(points, bounding_box):
conditions = np.logical_and(
points[:, :3] >= bounding_box[0], points[:, :3] < bounding_box[1]
)
mask = np.all(conditions, axis=1)
return points[mask]
im = image.get_image().convert("RGBA")
radius = 3
points = np.array(
random.sample(points.tolist(), int(len(points) / 2))
) # reduce the number of points projected, no need to project everything
# Project points image
points_im = Image.new(
"RGBA", (im.size[0] * oversample, im.size[1] * oversample)
)
draw = ImageDraw.Draw(points_im)
if self.model == "cylindrical":
# handle cylindrical cameras
epsilon = 0.0000001
fisheye = frame_transform.inverse @ np.array(
[points[:, 0], points[:, 1], points[:, 2]], dtype=np.float
) # 3D point in camera coordinates
fisheye = fisheye.T # 3D point in camera coordinates
fisheye = self.pose.inverse @ fisheye
fisheye[
:, 1
] *= -1 # invert y because cylindrical y is up and cartesian y is down
fisheye = (
Transform(self.extrinsic_matrix[:3, :3]) @ fisheye.T
) # lift cylinder to stand up straight
cylindrical = np.array(
[
np.arctan2(fisheye[0, :], fisheye[2, :]),
fisheye[1, :] / np.sqrt(fisheye[0, :] ** 2 + fisheye[2, :] ** 2),
np.ones(fisheye.shape[1]),
]
)
cylindrical[
1, :
] *= -1 # invert y because cylindrical y is up and cartesian y is down
q = self.K @ cylindrical
img_coords = q[[0, 1], :] # pixels on image
img_coords = img_coords.T
for point in img_coords[:, :2]:
draw.ellipse(
[tuple(point - radius), tuple(point + radius)],
fill=tuple([255, 10, 10]),
)
points_im = points_im.resize(im.size, Image.CUBIC)
# Merge images
projected_im = Image.composite(points_im, im, points_im)
return projected_im
if self.model == "fisheye":
wct = self.pose @ frame_transform.T
projected = self.project_points(points, use_distortion=True)
# projected = crop_points(projected,
# np.array([[0, 0, 0.1], [im.size[0], im.size[1], np.inf]]))
fisheye = self.world_transform @ np.array(
[projected[:, 0], projected[:, 1], projected[:, 2]], dtype=np.float
) # 3D point in camera coordinates
fisheye = fisheye.T # 3D point in camera coordinates
fisheye = np.concatenate(
(np.array(fisheye), np.array(projected[:, 3])[:, None]), axis=1
)
if not len(fisheye):
return im
colors = map_colors(fisheye, color_mode)
for point, color in zip(fisheye[:, :2] * oversample, colors):
draw.ellipse(
[tuple(point - radius), tuple(point + radius)], fill=tuple(color)
)
points_im = points_im.resize(im.size, Image.CUBIC)
# Merge images
projected_im = Image.composite(points_im, im, points_im)
return projected_im
else:
projected = self.project_points(points, use_distortion=False)
projected = crop_points(
projected, np.array([[0, 0, 0.1], [im.size[0], im.size[1], np.inf]])
)
# Returns original image if not projected points on image
if not len(projected):
return im
colors = map_colors(projected, color_mode)
for point, color in zip(projected[:, :2] * oversample, colors):
draw.ellipse(
[tuple(point - radius), tuple(point + radius)], fill=tuple(color)
)
points_im = points_im.resize(im.size, Image.CUBIC)
# Merge images
projected_im = Image.composite(points_im, im, points_im)
return projected_im
def __repr__(self):
return "LidarCamera({0}) {1}".format(self.id, self.pose)
|
/scale_lidar_io-1.2.5-py3-none-any.whl/scale_lidar_io/camera.py
| 0.876522 | 0.522994 |
camera.py
|
pypi
|
from typing import Dict, Optional, Union
from llmengine.api_engine import DEFAULT_TIMEOUT, APIEngine
from llmengine.data_types import (
CancelFineTuneResponse,
CreateFineTuneRequest,
CreateFineTuneResponse,
GetFineTuneEventsResponse,
GetFineTuneResponse,
ListFineTunesResponse,
)
class FineTune(APIEngine):
"""
FineTune API. This API is used to fine-tune models.
Fine-tuning is a process where the LLM is further trained on a task-specific dataset, allowing the model to adjust its parameters to better align with the task at hand. Fine-tuning is a supervised training phase, where prompt/response pairs are provided to optimize the performance of the LLM. LLM Engine currently uses [LoRA](https://arxiv.org/abs/2106.09685) for fine-tuning. Support for additional fine-tuning methods is upcoming.
LLM Engine provides APIs to create fine-tunes on a base model with training & validation datasets. APIs are also provided to list, cancel and retrieve fine-tuning jobs.
Creating a fine-tune will end with the creation of a Model, which you can view using `Model.get(model_name)` or delete using `Model.delete(model_name)`.
"""
@classmethod
def create(
cls,
model: str,
training_file: str,
validation_file: Optional[str] = None,
hyperparameters: Optional[Dict[str, Union[str, int, float]]] = None,
suffix: Optional[str] = None,
) -> CreateFineTuneResponse:
"""
Creates a job that fine-tunes a specified model with a given dataset.
This API can be used to fine-tune a model. The _model_ is the name of base model
([Model Zoo](../../model_zoo) for available models) to fine-tune. The training
and validation files should consist of prompt and response pairs. `training_file`
and `validation_file` must be publicly accessible HTTP or HTTPS URLs to a CSV file
that includes two columns: `prompt` and `response`. A maximum of 100,000 rows of data is
currently supported. At least 200 rows of data is recommended to start to see benefits from
fine-tuning. For sequences longer than the native `max_seq_length` of the model, the sequences
will be truncated.
A fine-tuning job can take roughly 30 minutes for a small dataset (~200 rows)
and several hours for larger ones.
Args:
model (`str`):
The name of the base model to fine-tune. See [Model Zoo](../../model_zoo) for the list of available models to fine-tune.
training_file (`str`):
Publicly accessible URL to a CSV file for training. When no validation_file is provided, one will automatically be created using a 10% split of the training_file data.
validation_file (`Optional[str]`):
Publicly accessible URL to a CSV file for validation. The validation file is used to compute metrics which let LLM Engine pick the best fine-tuned checkpoint, which will be used for inference when fine-tuning is complete.
hyperparameters (`Optional[Dict[str, str]]`):
A dict of hyperparameters to customize fine-tuning behavior.
Currently supported hyperparameters:
* `lr`: Peak learning rate used during fine-tuning. It decays with a cosine schedule afterward. (Default: 2e-3)
* `warmup_ratio`: Ratio of training steps used for learning rate warmup. (Default: 0.03)
* `epochs`: Number of fine-tuning epochs. This should be less than 20. (Default: 5)
* `weight_decay`: Regularization penalty applied to learned weights. (Default: 0.001)
suffix (`Optional[str]`):
A string that will be added to your fine-tuned model name. If present, the entire fine-tuned model name
will be formatted like `"[model].[suffix].[YYYY-MM-DD-HH-MM-SS]"`. If absent, the
fine-tuned model name will be formatted `"[model].[YYYY-MM-DD-HH-MM-SS]"`.
For example, if `suffix` is `"my-experiment"`, the fine-tuned model name could be
`"llama-2-7b.my-experiment.2023-07-17-23-01-50"`.
Returns:
CreateFineTuneResponse: an object that contains the ID of the created fine-tuning job
Here is an example script to create a 5-row CSV of properly formatted data for fine-tuning
an airline question answering bot:
=== "Formatting data in Python"
```python
import csv
# Define data
data = [
("What is your policy on carry-on luggage?", "Our policy allows each passenger to bring one piece of carry-on luggage and one personal item such as a purse or briefcase. The maximum size for carry-on luggage is 22 x 14 x 9 inches."),
("How can I change my flight?", "You can change your flight through our website or mobile app. Go to 'Manage my booking' section, enter your booking reference and last name, then follow the prompts to change your flight."),
("What meals are available on my flight?", "We offer a variety of meals depending on the flight's duration and route. These can range from snacks and light refreshments to full-course meals on long-haul flights. Specific meal options can be viewed during the booking process."),
("How early should I arrive at the airport before my flight?", "We recommend arriving at least two hours before domestic flights and three hours before international flights."),
"Can I select my seat in advance?", "Yes, you can select your seat during the booking process or afterwards via the 'Manage my booking' section on our website or mobile app."),
]
# Write data to a CSV file
with open('customer_service_data.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["prompt", "response"])
writer.writerows(data)
```
Currently, data needs to be uploaded to a publicly accessible web URL so that it can be read
for fine-tuning. Publicly accessible HTTP and HTTPS URLs are currently supported.
Support for privately sharing data with the LLM Engine API is coming shortly. For quick
iteration, you can look into tools like Pastebin or GitHub Gists to quickly host your CSV
files in a public manner. An example Github Gist can be found
[here](https://gist.github.com/tigss/7cec73251a37de72756a3b15eace9965). To use the gist,
you can use the URL given when you click the “Raw” button
([URL](https://gist.githubusercontent.com/tigss/7cec73251a37de72756a3b15eace9965/raw/85d9742890e1e6b0c06468507292893b820c13c9/llm_sample_data.csv)).
Example code for fine-tuning:
=== "Fine-tuning in Python"
```python
from llmengine import FineTune
response = FineTune.create(
model="llama-2-7b",
training_file="https://my-bucket.s3.us-west-2.amazonaws.com/path/to/training-file.csv",
)
print(response.json())
```
=== "Response in JSON"
```json
{
"fine_tune_id": "ft-cir3eevt71r003ks6il0"
}
```
"""
request = CreateFineTuneRequest(
model=model,
training_file=training_file,
validation_file=validation_file,
hyperparameters=hyperparameters,
suffix=suffix,
)
response = cls.post_sync(
resource_name="v1/llm/fine-tunes",
data=request.dict(),
timeout=DEFAULT_TIMEOUT,
)
return CreateFineTuneResponse.parse_obj(response)
@classmethod
def get(
cls,
fine_tune_id: str,
) -> GetFineTuneResponse:
"""
Get status of a fine-tuning job.
This API can be used to get the status of an already running
fine-tuning job. It takes as a single parameter the `fine_tune_id`
and returns a
[GetFineTuneResponse](../../api/data_types/#llmengine.GetFineTuneResponse)
object with the id and status (`PENDING`, `STARTED`,
`UNDEFINED`, `FAILURE` or `SUCCESS`).
Args:
fine_tune_id (`str`):
ID of the fine-tuning job
Returns:
GetFineTuneResponse: an object that contains the ID and status of the requested job
=== "Getting status of fine-tuning in Python"
```python
from llmengine import FineTune
response = FineTune.get(
fine_tune_id="ft-cir3eevt71r003ks6il0",
)
print(response.json())
```
=== "Response in JSON"
```json
{
"fine_tune_id": "ft-cir3eevt71r003ks6il0",
"status": "STARTED"
}
```
"""
response = cls._get(f"v1/llm/fine-tunes/{fine_tune_id}", timeout=DEFAULT_TIMEOUT)
return GetFineTuneResponse.parse_obj(response)
@classmethod
def list(cls) -> ListFineTunesResponse:
"""
List fine-tuning jobs.
This API can be used to list all the fine-tuning jobs.
It returns a list of pairs of `fine_tune_id` and `status` for
all existing jobs.
Returns:
ListFineTunesResponse: an object that contains a list of all fine-tuning jobs and their statuses
=== "Listing fine-tuning jobs in Python"
```python
from llmengine import FineTune
response = FineTune.list()
print(response.json())
```
=== "Response in JSON"
```json
{
"jobs": [
{
"fine_tune_id": "ft-cir3eevt71r003ks6il0",
"status": "STARTED"
},
{
"fine_tune_id": "ft_def456",
"status": "SUCCESS"
}
]
}
```
"""
response = cls._get("v1/llm/fine-tunes", timeout=DEFAULT_TIMEOUT)
return ListFineTunesResponse.parse_obj(response)
@classmethod
def cancel(cls, fine_tune_id: str) -> CancelFineTuneResponse:
"""
Cancel a fine-tuning job.
This API can be used to cancel an existing fine-tuning job if
it's no longer required. It takes the `fine_tune_id` as a parameter
and returns a response object which has a `success` field
confirming if the cancellation was successful.
Args:
fine_tune_id (`str`):
ID of the fine-tuning job
Returns:
CancelFineTuneResponse: an object that contains whether the cancellation was successful
=== "Cancelling fine-tuning job in Python"
```python
from llmengine import FineTune
response = FineTune.cancel(fine_tune_id="ft-cir3eevt71r003ks6il0")
print(response.json())
```
=== "Response in JSON"
```json
{
"success": true
}
```
"""
response = cls.put(
f"v1/llm/fine-tunes/{fine_tune_id}/cancel",
data=None,
timeout=DEFAULT_TIMEOUT,
)
return CancelFineTuneResponse.parse_obj(response)
@classmethod
def get_events(cls, fine_tune_id: str) -> GetFineTuneEventsResponse:
"""
Get events of a fine-tuning job.
This API can be used to get the list of detailed events for a fine-tuning job.
It takes the `fine_tune_id` as a parameter and returns a response object
which has a list of events that has happened for the fine-tuning job. Two events
are logged periodically: an evaluation of the training loss, and an
evaluation of the eval loss. This API will return all events for the fine-tuning job.
Args:
fine_tune_id (`str`):
ID of the fine-tuning job
Returns:
GetFineTuneEventsResponse: an object that contains the list of events for the fine-tuning job
=== "Getting events for fine-tuning jobs in Python"
```python
from llmengine import FineTune
response = FineTune.get_events(fine_tune_id="ft-cir3eevt71r003ks6il0")
print(response.json())
```
=== "Response in JSON"
```json
{
"events":
[
{
"timestamp": 1689665099.6704428,
"message": "{'loss': 2.108, 'learning_rate': 0.002, 'epoch': 0.7}",
"level": "info"
},
{
"timestamp": 1689665100.1966307,
"message": "{'eval_loss': 1.67730712890625, 'eval_runtime': 0.2023, 'eval_samples_per_second': 24.717, 'eval_steps_per_second': 4.943, 'epoch': 0.7}",
"level": "info"
},
{
"timestamp": 1689665105.6544185,
"message": "{'loss': 1.8961, 'learning_rate': 0.0017071067811865474, 'epoch': 1.39}",
"level": "info"
},
{
"timestamp": 1689665106.159139,
"message": "{'eval_loss': 1.513688564300537, 'eval_runtime': 0.2025, 'eval_samples_per_second': 24.696, 'eval_steps_per_second': 4.939, 'epoch': 1.39}",
"level": "info"
}
]
}
```
"""
response = cls._get(
f"v1/llm/fine-tunes/{fine_tune_id}/events",
timeout=DEFAULT_TIMEOUT,
)
return GetFineTuneEventsResponse.parse_obj(response)
|
/scale_llm_engine-0.0.0b8-py3-none-any.whl/llmengine/fine_tuning.py
| 0.953221 | 0.860252 |
fine_tuning.py
|
pypi
|
from io import BufferedReader
from llmengine.api_engine import DEFAULT_TIMEOUT, APIEngine
from llmengine.data_types import (
DeleteFileResponse,
GetFileContentResponse,
GetFileResponse,
ListFilesResponse,
UploadFileResponse,
)
class File(APIEngine):
"""
File API. This API is used to upload private files to LLM engine so that fine-tunes can access them for training and validation data.
Functions are provided to upload, get, list, and delete files, as well as to get the contents of a file.
"""
@classmethod
def upload(cls, file: BufferedReader) -> UploadFileResponse:
"""
Uploads a file to LLM engine.
Args:
file (`BufferedReader`):
A file opened with open(file_path, "r")
Returns:
UploadFileResponse: an object that contains the ID of the uploaded file
=== "Uploading file in Python"
```python
from llmengine import File
response = File.upload(open("training_dataset.csv", "r"))
print(response.json())
```
=== "Response in JSON"
```json
{
"id": "file-abc123"
}
```
"""
files = {"file": file}
response = cls.post_file(
resource_name="v1/files",
files=files,
timeout=DEFAULT_TIMEOUT,
)
return UploadFileResponse.parse_obj(response)
@classmethod
def get(cls, file_id: str) -> GetFileResponse:
"""
Get file metadata, including filename and size.
Args:
file_id (`str`):
ID of the file
Returns:
GetFileResponse: an object that contains the ID, filename, and size of the requested file
=== "Getting metadata about file in Python"
```python
from llmengine import File
response = File.get(
file_id="file-abc123",
)
print(response.json())
```
=== "Response in JSON"
```json
{
"id": "file-abc123",
"filename": "training_dataset.csv",
"size": 100
}
```
"""
response = cls._get(f"v1/files/{file_id}", timeout=DEFAULT_TIMEOUT)
return GetFileResponse.parse_obj(response)
@classmethod
def list(cls) -> ListFilesResponse:
"""
List metadata about all files, e.g. their filenames and sizes.
Returns:
ListFilesResponse: an object that contains a list of all files and their filenames and sizes
=== "Listing files in Python"
```python
from llmengine import File
response = File.list()
print(response.json())
```
=== "Response in JSON"
```json
{
"files": [
{
"id": "file-abc123",
"filename": "training_dataset.csv",
"size": 100
},
{
"id": "file-def456",
"filename": "validation_dataset.csv",
"size": 50
}
]
}
```
"""
response = cls._get("v1/files", timeout=30)
return ListFilesResponse.parse_obj(response)
@classmethod
def delete(cls, file_id: str) -> DeleteFileResponse:
"""
Deletes a file.
Args:
file_id (`str`):
ID of the file
Returns:
DeleteFileResponse: an object that contains whether the deletion was successful
=== "Deleting file in Python"
```python
from llmengine import File
response = File.delete(file_id="file-abc123")
print(response.json())
```
=== "Response in JSON"
```json
{
"deleted": true
}
```
"""
response = cls._delete(
f"v1/files/{file_id}",
timeout=DEFAULT_TIMEOUT,
)
return DeleteFileResponse.parse_obj(response)
@classmethod
def download(cls, file_id: str) -> GetFileContentResponse:
"""
Get contents of a file, as a string. (If the uploaded file is in binary, a string encoding will be returned.)
Args:
file_id (`str`):
ID of the file
Returns:
GetFileContentResponse: an object that contains the ID and content of the file
=== "Getting file content in Python"
```python
from llmengine import File
response = File.get_content(file_id="file-abc123")
print(response.json())
```
=== "Response in JSON"
```json
{
"id": "file-abc123",
"content": "Hello world!"
}
```
"""
response = cls._get(
f"v1/files/{file_id}/content",
timeout=DEFAULT_TIMEOUT,
)
return GetFileContentResponse.parse_obj(response)
|
/scale_llm_engine-0.0.0b8-py3-none-any.whl/llmengine/file.py
| 0.768038 | 0.541227 |
file.py
|
pypi
|
import json
# LLM Engine Errors
class ValidationError(Exception):
def __init__(self, message: str):
super().__init__(message)
# API Inference Errors
class BadRequestError(Exception):
"""
Corresponds to HTTP 400. Indicates that the request had inputs that were invalid. The user should not
attempt to retry the request without changing the inputs.
"""
def __init__(self, message: str):
super().__init__(message)
class UnauthorizedError(Exception):
"""
Corresponds to HTTP 401. This means that no valid API key was provided.
"""
def __init__(self, message: str):
super().__init__(message)
class NotFoundError(Exception):
"""
Corresponds to HTTP 404. This means that the resource (e.g. a Model, FineTune, etc.) could not be found.
Note that this can also be returned in some cases where the object might exist, but the user does not have access
to the object. This is done to avoid leaking information about the existence or nonexistence of said object that
the user does not have access to.
"""
def __init__(self, message: str):
super().__init__(message)
class RateLimitExceededError(Exception):
"""
Corresponds to HTTP 429. Too many requests hit the API too quickly. We recommend an exponential backoff for retries.
"""
def __init__(self, message: str):
super().__init__(message)
class ServerError(Exception):
"""
Corresponds to HTTP 5xx errors on the server.
"""
def __init__(self, status_code: int, message: str):
super().__init__(f"Server exception with {status_code=}, {message=}")
# Unknown error
class UnknownError(Exception):
def __init__(self, message: str):
super().__init__(message)
def parse_error(status_code: int, content: bytes) -> Exception:
"""
Parse error given an HTTP status code and a bytes payload
Args:
status_code (`int`):
HTTP status code
content (`bytes`):
payload
Returns:
Exception: parsed exception
"""
# Try to parse a LLM Engine error
try:
payload = json.loads(content)
message = payload["detail"]
except json.JSONDecodeError:
message = content.decode("utf-8")
# Try to parse a APIInference error
if status_code == 400:
return BadRequestError(message)
if status_code == 401:
return UnauthorizedError(message)
if status_code == 404:
return NotFoundError(message)
if status_code == 429:
return RateLimitExceededError(message)
if 600 < status_code <= 500:
return ServerError(status_code, message)
# Fallback to an unknown error
return UnknownError(message)
|
/scale_llm_engine-0.0.0b8-py3-none-any.whl/llmengine/errors.py
| 0.684053 | 0.16378 |
errors.py
|
pypi
|
import datetime
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Union
from pydantic import BaseModel, Field, HttpUrl
CpuSpecificationType = Union[str, int, float]
StorageSpecificationType = Union[str, int, float] # TODO(phil): we can make this more specific.
class LLMInferenceFramework(str, Enum):
DEEPSPEED = "deepspeed"
TEXT_GENERATION_INFERENCE = "text_generation_inference"
class LLMSource(str, Enum):
HUGGING_FACE = "hugging_face"
class Quantization(str, Enum):
BITSANDBYTES = "bitsandbytes"
class GpuType(str, Enum):
"""Lists allowed GPU types for LLMEngine."""
NVIDIA_TESLA_T4 = "nvidia-tesla-t4"
NVIDIA_AMPERE_A10 = "nvidia-ampere-a10"
NVIDIA_AMPERE_A100 = "nvidia-ampere-a100"
class ModelEndpointType(str, Enum):
ASYNC = "async"
SYNC = "sync"
STREAMING = "streaming"
class ModelEndpointStatus(str, Enum):
# Duplicates common/types::EndpointStatus, when refactor is done, delete the old type
# See EndpointStatus for status explanations
READY = "READY"
UPDATE_PENDING = "UPDATE_PENDING"
UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS"
UPDATE_FAILED = "UPDATE_FAILED"
DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS"
class CallbackBasicAuth(BaseModel):
kind: Literal["basic"]
username: str
password: str
class CallbackmTLSAuth(BaseModel):
kind: Literal["mtls"]
cert: str
key: str
class CallbackAuth(BaseModel):
__root__: Union[CallbackBasicAuth, CallbackmTLSAuth] = Field(..., discriminator="kind")
class ModelEndpointDeploymentState(BaseModel):
"""
This is the entity-layer class for the deployment settings related to a Model Endpoint.
"""
min_workers: int = Field(..., ge=0)
max_workers: int = Field(..., ge=0)
per_worker: int = Field(..., gt=0)
available_workers: Optional[int] = Field(default=None, ge=0)
unavailable_workers: Optional[int] = Field(default=None, ge=0)
class ModelEndpointResourceState(BaseModel):
"""
This is the entity-layer class for the resource settings per worker of a Model Endpoint.
"""
cpus: CpuSpecificationType # TODO(phil): try to use decimal.Decimal
gpus: int = Field(..., ge=0)
memory: StorageSpecificationType
gpu_type: Optional[GpuType]
storage: Optional[StorageSpecificationType]
optimize_costs: Optional[bool]
class GetModelEndpointResponse(BaseModel):
id: str
name: str
endpoint_type: ModelEndpointType
destination: str
deployment_name: Optional[str] = Field(default=None)
metadata: Optional[Dict[str, Any]] = Field(default=None) # TODO: JSON type
bundle_name: str
status: ModelEndpointStatus
post_inference_hooks: Optional[List[str]] = Field(default=None)
default_callback_url: Optional[HttpUrl] = Field(default=None)
default_callback_auth: Optional[CallbackAuth] = Field(default=None)
labels: Optional[Dict[str, str]] = Field(default=None)
aws_role: Optional[str] = Field(default=None)
results_s3_bucket: Optional[str] = Field(default=None)
created_by: str
created_at: datetime.datetime
last_updated_at: datetime.datetime
deployment_state: Optional[ModelEndpointDeploymentState] = Field(default=None)
resource_state: Optional[ModelEndpointResourceState] = Field(default=None)
num_queued_items: Optional[int] = Field(default=None)
public_inference: Optional[bool] = Field(default=None)
class PostInferenceHooks(str, Enum):
"""
Post-inference hooks are functions that are called after inference is complete.
Attributes:
CALLBACK: The callback hook is called with the inference response and the task ID.
"""
# INSIGHT = "insight"
CALLBACK: str = "callback"
class CreateLLMEndpointRequest(BaseModel):
name: str
# LLM specific fields
model_name: str
source: LLMSource = LLMSource.HUGGING_FACE
inference_framework: LLMInferenceFramework = LLMInferenceFramework.TEXT_GENERATION_INFERENCE
inference_framework_image_tag: str
num_shards: int = 1
"""
Number of shards to distribute the model onto GPUs. Only affects behavior for text-generation-inference models
"""
quantize: Optional[Quantization] = None
"""
Quantization for the LLM. Only affects behavior for text-generation-inference models
"""
checkpoint_path: Optional[str] = None
"""
Path to the checkpoint to load the model from. Only affects behavior for text-generation-inference models
"""
# General endpoint fields
metadata: Dict[str, Any] # TODO: JSON type
post_inference_hooks: Optional[List[str]]
endpoint_type: ModelEndpointType = ModelEndpointType.STREAMING
cpus: CpuSpecificationType
gpus: int
memory: StorageSpecificationType
gpu_type: GpuType
storage: Optional[StorageSpecificationType]
optimize_costs: Optional[bool]
min_workers: int
max_workers: int
per_worker: int
labels: Dict[str, str]
prewarm: Optional[bool]
high_priority: Optional[bool]
default_callback_url: Optional[HttpUrl]
default_callback_auth: Optional[CallbackAuth]
public_inference: Optional[bool] = True
"""
Whether the endpoint can be used for inference for all users. LLM endpoints are public by default.
"""
class CreateLLMEndpointResponse(BaseModel):
endpoint_creation_task_id: str
class GetLLMEndpointResponse(BaseModel):
"""
Response object for retrieving a Model.
"""
id: Optional[str] = Field(
default=None, description="(For self-hosted users) The autogenerated ID of the model."
)
"""(For self-hosted users) The autogenerated ID of the model."""
name: str = Field(
description="The name of the model. Use this for making inference requests to the model."
)
"""The name of the model. Use this for making inference requests to the model."""
model_name: Optional[str] = Field(
default=None,
description="(For self-hosted users) For fine-tuned models, the base model. For base models, this will be the same as `name`.",
)
"""(For self-hosted users) For fine-tuned models, the base model. For base models, this will be the same as `name`."""
source: LLMSource = Field(description="The source of the model, e.g. Hugging Face.")
"""The source of the model, e.g. Hugging Face."""
inference_framework: LLMInferenceFramework = Field(
description="The inference framework used by the model."
)
"""(For self-hosted users) The inference framework used by the model."""
inference_framework_tag: Optional[str] = Field(
default=None,
description="(For self-hosted users) The Docker image tag used to run the model.",
)
"""(For self-hosted users) The Docker image tag used to run the model."""
num_shards: Optional[int] = Field(
default=None, description="(For self-hosted users) The number of shards."
)
"""(For self-hosted users) The number of shards."""
quantize: Optional[Quantization] = Field(
default=None, description="(For self-hosted users) The quantization method."
)
"""(For self-hosted users) The quantization method."""
spec: Optional[GetModelEndpointResponse] = Field(
default=None, description="(For self-hosted users) Model endpoint details."
)
"""(For self-hosted users) Model endpoint details."""
class ListLLMEndpointsResponse(BaseModel):
"""
Response object for listing Models.
"""
model_endpoints: List[GetLLMEndpointResponse] = Field(
...,
description="The list of models.",
)
"""
A list of Models, represented as `GetLLMEndpointResponse`s.
"""
class DeleteLLMEndpointResponse(BaseModel):
"""
Response object for deleting a Model.
"""
deleted: bool = Field(..., description="Whether deletion was successful.")
"""
Whether the deletion succeeded.
"""
class CompletionSyncV1Request(BaseModel):
"""
Request object for a synchronous prompt completion task.
"""
prompt: str = Field(..., min_length=1)
max_new_tokens: int = Field(..., gt=0)
temperature: float = Field(..., gt=0.0)
class CompletionOutput(BaseModel):
"""
Represents the output of a completion request to a model.
"""
text: str
"""The text of the completion."""
num_completion_tokens: int
"""Number of tokens in the completion."""
class CompletionSyncResponse(BaseModel):
"""
Response object for a synchronous prompt completion.
"""
request_id: str
"""The unique ID of the corresponding Completion request. This `request_id` is generated on the server, and all logs
associated with the request are grouped by the `request_id`, which allows for easier troubleshooting of errors as
follows:
* When running the *Scale-hosted* LLM Engine, please provide the `request_id` in any bug reports.
* When running the *self-hosted* LLM Engine, the `request_id` serves as a trace ID in your observability
provider."""
output: CompletionOutput
"""Completion output."""
class CompletionStreamV1Request(BaseModel):
"""
Request object for a streaming prompt completion.
"""
prompt: str = Field(..., min_length=1)
max_new_tokens: int = Field(..., gt=0)
temperature: float = Field(..., gt=0.0)
class CompletionStreamOutput(BaseModel):
text: str
"""The text of the completion."""
finished: bool
"""Whether the completion is finished."""
num_completion_tokens: Optional[int] = None
"""Number of tokens in the completion."""
class CompletionStreamResponse(BaseModel):
"""
Response object for a stream prompt completion task.
"""
request_id: str
"""The unique ID of the corresponding Completion request. This `request_id` is generated on the server, and all logs
associated with the request are grouped by the `request_id`, which allows for easier troubleshooting of errors as
follows:
* When running the *Scale-hosted* LLM Engine, please provide the `request_id` in any bug reports.
* When running the *self-hosted* LLM Engine, the `request_id` serves as a trace ID in your observability
provider."""
output: Optional[CompletionStreamOutput] = None
"""Completion output."""
class CreateFineTuneRequest(BaseModel):
"""
Request object for creating a FineTune.
"""
model: str = Field(..., description="Identifier of base model to train from.")
"""Identifier of base model to train from."""
training_file: str = Field(
...,
description="Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.",
)
"""Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'."""
validation_file: Optional[str] = Field(
default=None,
description="Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.",
)
"""Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset."""
hyperparameters: Optional[Dict[str, Any]] = Field(
default=None, description="Hyperparameters to pass in to training job."
)
"""Hyperparameters to pass in to training job."""
suffix: Optional[str] = Field(
default=None,
description="Optional user-provided identifier suffix for the fine-tuned model.",
)
"""Optional user-provided identifier suffix for the fine-tuned model."""
class CreateFineTuneResponse(BaseModel):
"""
Response object for creating a FineTune.
"""
id: str = Field(..., description="ID of the created fine-tuning job.")
"""
The ID of the FineTune.
"""
class BatchJobStatus(str, Enum):
PENDING = "PENDING"
RUNNING = "RUNNING"
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
CANCELLED = "CANCELLED"
UNDEFINED = "UNDEFINED"
TIMEOUT = "TIMEOUT"
class GetFineTuneResponse(BaseModel):
"""
Response object for retrieving a FineTune.
"""
id: str = Field(..., description="ID of the requested job.")
"""
The ID of the FineTune.
"""
fine_tuned_model: Optional[str] = Field(
default=None,
description="Name of the resulting fine-tuned model. This can be plugged into the "
"Completion API once the fine-tune is complete",
)
"""
The name of the resulting fine-tuned model. This can be plugged into the Completion API
once the fine-tune is complete.
"""
status: BatchJobStatus = Field(..., description="Status of the requested job.")
"""
The status of the FineTune job.
"""
class ListFineTunesResponse(BaseModel):
"""
Response object for listing FineTunes.
"""
jobs: List[GetFineTuneResponse] = Field(
..., description="List of fine-tuning jobs and their statuses."
)
"""
A list of FineTunes, represented as `GetFineTuneResponse`s.
"""
class CancelFineTuneResponse(BaseModel):
"""
Response object for cancelling a FineTune.
"""
success: bool = Field(..., description="Whether cancellation was successful.")
"""
Whether the cancellation succeeded.
"""
class LLMFineTuneEvent(BaseModel):
"""
Response object one FineTune event.
"""
timestamp: Optional[float] = Field(
description="Timestamp of the event.",
default=None,
)
message: str = Field(description="Message of the event.")
level: str = Field(description="Logging level of the event.")
class GetFineTuneEventsResponse(BaseModel):
"""
Response object for getting events for a FineTune.
"""
events: List[LLMFineTuneEvent] = Field(..., description="List of fine-tuning events.")
class UploadFileResponse(BaseModel):
"""Response object for uploading a file."""
id: str = Field(..., description="ID of the uploaded file.")
"""ID of the uploaded file."""
class GetFileResponse(BaseModel):
"""Response object for retrieving a file."""
id: str = Field(..., description="ID of the requested file.")
"""ID of the requested file."""
filename: str = Field(..., description="File name.")
"""File name."""
size: int = Field(..., description="Length of the file, in characters.")
"""Length of the file, in characters."""
class ListFilesResponse(BaseModel):
"""Response object for listing files."""
files: List[GetFileResponse] = Field(..., description="List of file IDs, names, and sizes.")
"""List of file IDs, names, and sizes."""
class DeleteFileResponse(BaseModel):
"""Response object for deleting a file."""
deleted: bool = Field(..., description="Whether deletion was successful.")
"""Whether deletion was successful."""
class GetFileContentResponse(BaseModel):
"""Response object for retrieving a file's content."""
id: str = Field(..., description="ID of the requested file.")
"""ID of the requested file."""
content: str = Field(..., description="File content.")
"""File content."""
|
/scale_llm_engine-0.0.0b8-py3-none-any.whl/llmengine/data_types.py
| 0.672117 | 0.170266 |
data_types.py
|
pypi
|
from typing import Dict, List, Optional
from llmengine.api_engine import DEFAULT_TIMEOUT, APIEngine, assert_self_hosted
from llmengine.data_types import (
CreateLLMEndpointRequest,
CreateLLMEndpointResponse,
DeleteLLMEndpointResponse,
GetLLMEndpointResponse,
GpuType,
ListLLMEndpointsResponse,
LLMInferenceFramework,
LLMSource,
ModelEndpointType,
PostInferenceHooks,
Quantization,
)
class Model(APIEngine):
"""
Model API. This API is used to get, list, and delete models. Models include both base
models built into LLM Engine, and fine-tuned models that you create through the
[FineTune.create()](./#llmengine.fine_tuning.FineTune.create) API.
See [Model Zoo](../../model_zoo) for the list of publicly available base models.
"""
@classmethod
@assert_self_hosted
def create(
cls,
name: str,
# LLM specific fields
model: str,
inference_framework_image_tag: str,
source: LLMSource = LLMSource.HUGGING_FACE,
inference_framework: LLMInferenceFramework = LLMInferenceFramework.TEXT_GENERATION_INFERENCE,
num_shards: int = 4,
quantize: Optional[Quantization] = None,
checkpoint_path: Optional[str] = None,
# General endpoint fields
cpus: int = 32,
memory: str = "192Gi",
storage: str = "96Gi",
gpus: int = 4,
min_workers: int = 0,
max_workers: int = 1,
per_worker: int = 10,
endpoint_type: ModelEndpointType = ModelEndpointType.STREAMING,
gpu_type: Optional[str] = "nvidia-ampere-a10",
high_priority: Optional[bool] = False,
post_inference_hooks: Optional[List[PostInferenceHooks]] = None,
default_callback_url: Optional[str] = None,
public_inference: Optional[bool] = True,
labels: Optional[Dict[str, str]] = None,
) -> CreateLLMEndpointResponse:
"""
Create an LLM model. Note: This feature is only available for self-hosted users.
Args:
name (`str`):
Name of the endpoint
model (`str`):
Name of the base model
inference_framework_image_tag (`str`):
Image tag for the inference framework
source (`LLMSource`):
Source of the LLM. Currently only HuggingFace is supported
inference_framework (`LLMInferenceFramework`):
Inference framework for the LLM. Currently only DeepSpeed is supported
num_shards (`int`):
Number of shards for the LLM. When bigger than 1, LLM will be sharded
to multiple GPUs. Number of GPUs must be larger than num_shards.
Only affects behavior for text-generation-inference models
quantize (`Optional[Quantization]`):
Quantization for the LLM. Only affects behavior for text-generation-inference models
checkpoint_path (`Optional[str]`):
Path to the checkpoint for the LLM. For now we only support loading a tar file from AWS S3.
Safetensors are preferred but PyTorch checkpoints are also accepted (model loading will be slower).
Only affects behavior for text-generation-inference models
cpus (`int`):
Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater
than or equal to 1
memory (`str`):
Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must
be a positive amount of memory
storage (`str`):
Amount of local ephemeral storage each worker should get, e.g. "4Gi",
"512Mi", etc. This must be a positive amount of storage
gpus (`int`):
Number of gpus each worker should get, e.g. 0, 1, etc.
min_workers (`int`):
The minimum number of workers. Must be greater than or equal to 0. This
should be determined by computing the minimum throughput of your workload and
dividing it by the throughput of a single worker. This field must be at least ``1``
for synchronous endpoints
max_workers (`int`):
The maximum number of workers. Must be greater than or equal to 0,
and as well as greater than or equal to ``min_workers``. This should be determined by
computing the maximum throughput of your workload and dividing it by the throughput
of a single worker
per_worker (`int`):
The maximum number of concurrent requests that an individual worker can
service. Launch automatically scales the number of workers for the endpoint so that
each worker is processing ``per_worker`` requests, subject to the limits defined by
``min_workers`` and ``max_workers``
- If the average number of concurrent requests per worker is lower than
``per_worker``, then the number of workers will be reduced. - Otherwise,
if the average number of concurrent requests per worker is higher than
``per_worker``, then the number of workers will be increased to meet the elevated
traffic.
Here is our recommendation for computing ``per_worker``:
1. Compute ``min_workers`` and ``max_workers`` per your minimum and maximum
throughput requirements. 2. Determine a value for the maximum number of
concurrent requests in the workload. Divide this number by ``max_workers``. Doing
this ensures that the number of workers will "climb" to ``max_workers``.
endpoint_type (`ModelEndpointType`):
``"sync"``, ``"async"`` or ``"streaming"``.
gpu_type (`Optional[str]`):
If specifying a non-zero number of gpus, this controls the type of gpu
requested. Here are the supported values:
- ``nvidia-tesla-t4``
- ``nvidia-ampere-a10``
high_priority (`Optional[bool]`):
Either ``True`` or ``False``. Enabling this will allow the created
endpoint to leverage the shared pool of prewarmed nodes for faster spinup time
post_inference_hooks (`Optional[List[PostInferenceHooks]]`):
List of hooks to trigger after inference tasks are served
default_callback_url (`Optional[str]`):
The default callback url to use for async endpoints.
This can be overridden in the task parameters for each individual task.
post_inference_hooks must contain "callback" for the callback to be triggered
public_inference (`Optional[bool]`):
If ``True``, this endpoint will be available to all user IDs for
inference
labels (`Optional[Dict[str, str]]`):
An optional dictionary of key/value pairs to associate with this endpoint
Returns:
CreateLLMEndpointResponse: creation task ID of the created Model.
"""
post_inference_hooks_strs = None
if post_inference_hooks is not None:
post_inference_hooks_strs = []
for hook in post_inference_hooks:
if isinstance(hook, PostInferenceHooks):
post_inference_hooks_strs.append(hook.value)
else:
post_inference_hooks_strs.append(hook)
request = CreateLLMEndpointRequest(
name=name,
model_name=model,
source=source,
inference_framework=inference_framework,
inference_framework_image_tag=inference_framework_image_tag,
num_shards=num_shards,
quantize=quantize,
checkpoint_path=checkpoint_path,
cpus=cpus,
endpoint_type=ModelEndpointType(endpoint_type),
gpus=gpus,
gpu_type=GpuType(gpu_type) if gpu_type is not None else None,
labels=labels or {},
max_workers=max_workers,
memory=memory,
metadata={},
min_workers=min_workers,
per_worker=per_worker,
high_priority=high_priority,
post_inference_hooks=post_inference_hooks_strs,
default_callback_url=default_callback_url,
storage=storage,
public_inference=public_inference,
)
response = cls.post_sync(
resource_name="v1/llm/model-endpoints",
data=request.dict(),
timeout=DEFAULT_TIMEOUT,
)
return CreateLLMEndpointResponse.parse_obj(response)
@classmethod
def get(
cls,
model: str,
) -> GetLLMEndpointResponse:
"""
Get information about an LLM model.
This API can be used to get information about a Model's source and inference framework.
For self-hosted users, it returns additional information about number of shards, quantization, infra settings, etc.
The function takes as a single parameter the name `model`
and returns a
[GetLLMEndpointResponse](../../api/data_types/#llmengine.GetLLMEndpointResponse)
object.
Args:
model (`str`):
Name of the model
Returns:
GetLLMEndpointResponse: object representing the LLM and configurations
=== "Accessing model in Python"
```python
from llmengine import Model
response = Model.get("llama-2-7b.suffix.2023-07-18-12-00-00")
print(response.json())
```
=== "Response in JSON"
```json
{
"id": null,
"name": "llama-2-7b.suffix.2023-07-18-12-00-00",
"model_name": null,
"source": "hugging_face",
"inference_framework": "text_generation_inference",
"inference_framework_tag": null,
"num_shards": null,
"quantize": null,
"spec": null
}
```
"""
response = cls._get(f"v1/llm/model-endpoints/{model}", timeout=DEFAULT_TIMEOUT)
return GetLLMEndpointResponse.parse_obj(response)
@classmethod
def list(cls) -> ListLLMEndpointsResponse:
"""
List LLM models available to call inference on.
This API can be used to list all available models, including both publicly
available models and user-created fine-tuned models.
It returns a list of
[GetLLMEndpointResponse](../../api/data_types/#llmengine.GetLLMEndpointResponse)
objects for all models. The most important field is the model `name`.
Returns:
ListLLMEndpointsResponse: list of models
=== "Listing available modes in Python"
```python
from llmengine import Model
response = Model.list()
print(response.json())
```
=== "Response in JSON"
```json
{
"model_endpoints": [
{
"id": null,
"name": "llama-2-7b.suffix.2023-07-18-12-00-00",
"model_name": null,
"source": "hugging_face",
"inference_framework": "text_generation_inference",
"inference_framework_tag": null,
"num_shards": null,
"quantize": null,
"spec": null
},
{
"id": null,
"name": "llama-2-7b",
"model_name": null,
"source": "hugging_face",
"inference_framework": "text_generation_inference",
"inference_framework_tag": null,
"num_shards": null,
"quantize": null,
"spec": null
},
{
"id": null,
"name": "llama-13b-deepspeed-sync",
"model_name": null,
"source": "hugging_face",
"inference_framework": "deepspeed",
"inference_framework_tag": null,
"num_shards": null,
"quantize": null,
"spec": null
},
{
"id": null,
"name": "falcon-40b",
"model_name": null,
"source": "hugging_face",
"inference_framework": "text_generation_inference",
"inference_framework_tag": null,
"num_shards": null,
"quantize": null,
"spec": null
}
]
}
```
"""
response = cls._get("v1/llm/model-endpoints", timeout=DEFAULT_TIMEOUT)
return ListLLMEndpointsResponse.parse_obj(response)
@classmethod
def delete(cls, model: str) -> DeleteLLMEndpointResponse:
"""
Deletes an LLM model.
This API can be used to delete a fine-tuned model. It takes
as parameter the name of the `model` and returns a response
object which has a `deleted` field confirming if the deletion
was successful. If called on a base model included with LLM
Engine, an error will be thrown.
Args:
model (`str`):
Name of the model
Returns:
response: whether the model was successfully deleted
=== "Deleting model in Python"
```python
from llmengine import Model
response = Model.delete("llama-2-7b.suffix.2023-07-18-12-00-00")
print(response.json())
```
=== "Response in JSON"
```json
{
"deleted": true
}
```
"""
response = cls._delete(f"v1/llm/model-endpoints/{model}", timeout=DEFAULT_TIMEOUT)
return DeleteLLMEndpointResponse.parse_obj(response)
|
/scale_llm_engine-0.0.0b8-py3-none-any.whl/llmengine/model.py
| 0.91482 | 0.397938 |
model.py
|
pypi
|
from typing import AsyncIterable, Iterator, Union
from llmengine.api_engine import APIEngine
from llmengine.data_types import (
CompletionStreamResponse,
CompletionStreamV1Request,
CompletionSyncResponse,
CompletionSyncV1Request,
)
class Completion(APIEngine):
"""
Completion API. This API is used to generate text completions.
Language models are trained to understand natural language and predict text outputs as a response to
their inputs. The inputs are called _prompts_ and the outputs are referred to as _completions_.
LLMs take the input prompts and chunk them into smaller units called _tokens_ to process and generate
language. Tokens may include trailing spaces and even sub-words; this process is language dependent.
The Completion API can be run either synchronous or asynchronously (via Python `asyncio`).
For each of these modes, you can also choose whether to stream token responses or not.
"""
@classmethod
async def acreate(
cls,
model: str,
prompt: str,
max_new_tokens: int = 20,
temperature: float = 0.2,
timeout: int = 10,
stream: bool = False,
) -> Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]:
"""
Creates a completion for the provided prompt and parameters asynchronously (with `asyncio`).
This API can be used to get the LLM to generate a completion *asynchronously*.
It takes as parameters the `model` ([see Model Zoo](../../model_zoo)) and the `prompt`.
Optionally it takes `max_new_tokens`, `temperature`, `timeout` and `stream`.
It returns a
[CompletionSyncResponse](../../api/data_types/#llmengine.CompletionSyncResponse)
if `stream=False` or an async iterator of
[CompletionStreamResponse](../../api/data_types/#llmengine.CompletionStreamResponse)
with `request_id` and `outputs` fields.
Args:
model (str):
Name of the model to use. See [Model Zoo](../../model_zoo) for a list of Models that are supported.
prompt (str):
The prompt to generate completions for, encoded as a string.
max_new_tokens (int):
The maximum number of tokens to generate in the completion.
The token count of your prompt plus `max_new_tokens` cannot exceed the model's context length. See
[Model Zoo](../../model_zoo) for information on each supported model's context length.
temperature (float):
What sampling temperature to use, in the range `(0, 1]`. Higher values like 0.8 will make the output
more random, while lower values like 0.2 will make it more focused and deterministic.
timeout (int):
Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.
stream (bool):
Whether to stream the response. If true, the return type is an
`Iterator[CompletionStreamResponse]`. Otherwise, the return type is a `CompletionSyncResponse`.
When streaming, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format).
Returns:
response (Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]): The generated response (if `stream=False`) or iterator of response chunks (if `stream=True`)
=== "Asynchronous completion without token streaming in Python"
```python
import asyncio
from llmengine import Completion
async def main():
response = await Completion.acreate(
model="llama-2-7b",
prompt="Hello, my name is",
max_new_tokens=10,
temperature=0.2,
)
print(response.json())
asyncio.run(main())
```
=== "Response in JSON"
```json
{
"request_id": "9cfe4d5a-f86f-4094-a935-87f871d90ec0",
"output": {
"text": "_______ and I am a _______",
"num_completion_tokens": 10
}
}
```
Token streaming can be used to reduce _perceived_ latency for applications. Here is how applications can use streaming:
=== "Asynchronous completion with token streaming in Python"
```python
import asyncio
from llmengine import Completion
async def main():
stream = await Completion.acreate(
model="llama-2-7b",
prompt="why is the sky blue?",
max_new_tokens=5,
temperature=0.2,
stream=True,
)
async for response in stream:
if response.output:
print(response.json())
asyncio.run(main())
```
=== "Response in JSON"
```json
{"request_id": "9cfe4d5a-f86f-4094-a935-87f871d90ec0", "output": {"text": "\\n", "finished": false, "num_completion_tokens": 1}}
{"request_id": "9cfe4d5a-f86f-4094-a935-87f871d90ec0", "output": {"text": "I", "finished": false, "num_completion_tokens": 2}}
{"request_id": "9cfe4d5a-f86f-4094-a935-87f871d90ec0", "output": {"text": " think", "finished": false, "num_completion_tokens": 3}}
{"request_id": "9cfe4d5a-f86f-4094-a935-87f871d90ec0", "output": {"text": " the", "finished": false, "num_completion_tokens": 4}}
{"request_id": "9cfe4d5a-f86f-4094-a935-87f871d90ec0", "output": {"text": " sky", "finished": true, "num_completion_tokens": 5}}
```
"""
if stream:
async def _acreate_stream(
**kwargs,
) -> AsyncIterable[CompletionStreamResponse]:
data = CompletionStreamV1Request(**kwargs).dict()
response = cls.apost_stream(
resource_name=f"v1/llm/completions-stream?model_endpoint_name={model}",
data=data,
timeout=timeout,
)
async for chunk in response:
yield CompletionStreamResponse.parse_obj(chunk)
return _acreate_stream(
model=model,
prompt=prompt,
max_new_tokens=max_new_tokens,
temperature=temperature,
timeout=timeout,
)
else:
async def _acreate_sync(**kwargs) -> CompletionSyncResponse:
data = CompletionSyncV1Request(**kwargs).dict()
response = await cls.apost_sync(
resource_name=f"v1/llm/completions-sync?model_endpoint_name={model}",
data=data,
timeout=timeout,
)
return CompletionSyncResponse.parse_obj(response)
return await _acreate_sync(
prompt=prompt, max_new_tokens=max_new_tokens, temperature=temperature
)
@classmethod
def create(
cls,
model: str,
prompt: str,
max_new_tokens: int = 20,
temperature: float = 0.2,
timeout: int = 10,
stream: bool = False,
) -> Union[CompletionSyncResponse, Iterator[CompletionStreamResponse]]:
"""
Creates a completion for the provided prompt and parameters synchronously.
This API can be used to get the LLM to generate a completion *synchronously*.
It takes as parameters the `model` ([see Model Zoo](../../model_zoo)) and the `prompt`.
Optionally it takes `max_new_tokens`, `temperature`, `timeout` and `stream`.
It returns a
[CompletionSyncResponse](../../api/data_types/#llmengine.CompletionSyncResponse)
if `stream=False` or an async iterator of
[CompletionStreamResponse](../../api/data_types/#llmengine.CompletionStreamResponse)
with `request_id` and `outputs` fields.
Args:
model (str):
Name of the model to use. See [Model Zoo](../../model_zoo) for a list of Models that are supported.
prompt (str):
The prompt to generate completions for, encoded as a string.
max_new_tokens (int):
The maximum number of tokens to generate in the completion.
The token count of your prompt plus `max_new_tokens` cannot exceed the model's context length. See
[Model Zoo](../../model_zoo) for information on each supported model's context length.
temperature (float):
What sampling temperature to use, in the range `(0, 1]`. Higher values like 0.8 will make the output
more random, while lower values like 0.2 will make it more focused and deterministic.
timeout (int):
Timeout in seconds. This is the maximum amount of time you are willing to wait for a response.
stream (bool):
Whether to stream the response. If true, the return type is an
`Iterator[CompletionStreamResponse]`. Otherwise, the return type is a `CompletionSyncResponse`.
When streaming, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format).
Returns:
response (Union[CompletionSyncResponse, AsyncIterable[CompletionStreamResponse]]): The generated response (if `stream=False`) or iterator of response chunks (if `stream=True`)
=== "Synchronous completion without token streaming in Python"
```python
from llmengine import Completion
response = Completion.create(
model="llama-2-7b",
prompt="Hello, my name is",
max_new_tokens=10,
temperature=0.2,
)
print(response.json())
```
=== "Response in JSON"
```json
{
"request_id": "8bbd0e83-f94c-465b-a12b-aabad45750a9",
"output": {
"text": "_______ and I am a _______",
"num_completion_tokens": 10
}
}
```
Token streaming can be used to reduce _perceived_ latency for applications. Here is how applications can use streaming:
=== "Synchronous completion with token streaming in Python"
```python
from llmengine import Completion
stream = Completion.create(
model="llama-2-7b",
prompt="why is the sky blue?",
max_new_tokens=5,
temperature=0.2,
stream=True,
)
for response in stream:
if response.output:
print(response.json())
```
=== "Response in JSON"
```json
{"request_id": "ebbde00c-8c31-4c03-8306-24f37cd25fa2", "output": {"text": "\\n", "finished": false, "num_completion_tokens": 1 } }
{"request_id": "ebbde00c-8c31-4c03-8306-24f37cd25fa2", "output": {"text": "I", "finished": false, "num_completion_tokens": 2 } }
{"request_id": "ebbde00c-8c31-4c03-8306-24f37cd25fa2", "output": {"text": " don", "finished": false, "num_completion_tokens": 3 } }
{"request_id": "ebbde00c-8c31-4c03-8306-24f37cd25fa2", "output": {"text": "’", "finished": false, "num_completion_tokens": 4 } }
{"request_id": "ebbde00c-8c31-4c03-8306-24f37cd25fa2", "output": {"text": "t", "finished": true, "num_completion_tokens": 5 } }
```
"""
if stream:
def _create_stream(**kwargs):
data_stream = CompletionStreamV1Request(**kwargs).dict()
response_stream = cls.post_stream(
resource_name=f"v1/llm/completions-stream?model_endpoint_name={model}",
data=data_stream,
timeout=timeout,
)
for chunk in response_stream:
yield CompletionStreamResponse.parse_obj(chunk)
return _create_stream(
prompt=prompt, max_new_tokens=max_new_tokens, temperature=temperature
)
else:
data = CompletionSyncV1Request(
prompt=prompt, max_new_tokens=max_new_tokens, temperature=temperature
).dict()
response = cls.post_sync(
resource_name=f"v1/llm/completions-sync?model_endpoint_name={model}",
data=data,
timeout=timeout,
)
return CompletionSyncResponse.parse_obj(response)
|
/scale_llm_engine-0.0.0b8-py3-none-any.whl/llmengine/completion.py
| 0.923351 | 0.762579 |
completion.py
|
pypi
|
# Nucleus
https://dashboard.scale.com/nucleus
Aggregate metrics in ML are not good enough. To improve production ML, you need to understand their qualitative failure modes, fix them by gathering more data, and curate diverse scenarios.
Scale Nucleus helps you:
- Visualize your data
- Curate interesting slices within your dataset
- Review and manage annotations
- Measure and debug your model performance
Nucleus is a new way—the right way—to develop ML models, helping us move away from the concept of one dataset and towards a paradigm of collections of scenarios.
## Installation
`$ pip install scale-nucleus`
## CLI installation
We recommend installing the CLI via `pipx` (https://pypa.github.io/pipx/installation/). This makes sure that
the CLI does not interfere with you system packages and is accessible from your favorite terminal.
For MacOS:
```bash
brew install pipx
pipx ensurepath
pipx install scale-nucleus
# Optional installation of shell completion (for bash, zsh or fish)
nu install-completions
```
Otherwise, install via pip (requires pip 19.0 or later):
```bash
python3 -m pip install --user pipx
python3 -m pipx ensurepath
python3 -m pipx install scale-nucleus
# Optional installation of shell completion (for bash, zsh or fish)
nu install-completions
```
## Common issues/FAQ
### Outdated Client
Nucleus is iterating rapidly and as a result we do not always perfectly preserve backwards compatibility with older versions of the client. If you run into any unexpected error, it's a good idea to upgrade your version of the client by running
```
pip install --upgrade scale-nucleus
```
## Usage
For the most up to date documentation, reference: https://dashboard.scale.com/nucleus/docs/api?language=python.
## For Developers
Clone from github and install as editable
```
git clone git@github.com:scaleapi/nucleus-python-client.git
cd nucleus-python-client
pip3 install poetry
poetry install
```
Please install the pre-commit hooks by running the following command:
```python
poetry run pre-commit install
```
When releasing a new version please add release notes to the changelog in `CHANGELOG.md`.
**Best practices for testing:**
(1). Please run pytest from the root directory of the repo, i.e.
```
poetry run pytest tests/test_dataset.py
```
(2) To skip slow integration tests that have to wait for an async job to start.
```
poetry run pytest -m "not integration"
```
## Pydantic Models
Prefer using [Pydantic](https://pydantic-docs.helpmanual.io/usage/models/) models rather than creating raw dictionaries
or dataclasses to send or receive over the wire as JSONs. Pydantic is created with data validation in mind and provides very clear error
messages when it encounters a problem with the payload.
The Pydantic model(s) should mirror the payload to send. To represent a JSON payload that looks like this:
```json
{
"example_json_with_info": {
"metadata": {
"frame": 0
},
"reference_id": "frame0",
"url": "s3://example/scale_nucleus/2021/lidar/0038711321865000.json",
"type": "pointcloud"
},
"example_image_with_info": {
"metadata": {
"author": "Picasso"
},
"reference_id": "frame0",
"url": "s3://bucket/0038711321865000.jpg",
"type": "image"
}
}
```
Could be represented as the following structure. Note that the field names map to the JSON keys and the usage of field
validators (`@validator`).
```python
import os.path
from pydantic import BaseModel, validator
from typing import Literal
class JsonWithInfo(BaseModel):
metadata: dict # any dict is valid
reference_id: str
url: str
type: Literal["pointcloud", "recipe"]
@validator("url")
def has_json_extension(cls, v):
if not v.endswith(".json"):
raise ValueError(f"Expected '.json' extension got {v}")
return v
class ImageWithInfo(BaseModel):
metadata: dict # any dict is valid
reference_id: str
url: str
type: Literal["image", "mask"]
@validator("url")
def has_valid_extension(cls, v):
valid_extensions = {".jpg", ".jpeg", ".png", ".tiff"}
_, extension = os.path.splitext(v)
if extension not in valid_extensions:
raise ValueError(f"Expected extension in {valid_extensions} got {v}")
return v
class ExampleNestedModel(BaseModel):
example_json_with_info: JsonWithInfo
example_image_with_info: ImageWithInfo
# Usage:
import requests
payload = requests.get("/example")
parsed_model = ExampleNestedModel.parse_obj(payload.json())
requests.post("example/post_to", json=parsed_model.dict())
```
### Migrating to Pydantic
- When migrating an interface from a dictionary use `nucleus.pydantic_base.DictCompatibleModel`. That allows you to get
the benefits of Pydantic but maintaints backwards compatibility with a Python dictionary by delegating `__getitem__` to
fields.
- When migrating a frozen dataclass use `nucleus.pydantic_base.ImmutableModel`. That is a base class set up to be
immutable after initialization.
**Updating documentation:**
We use [Sphinx](https://www.sphinx-doc.org/en/master/) to autogenerate our API Reference from docstrings.
To test your local docstring changes, run the following commands from the repository's root directory:
```
poetry shell
cd docs
sphinx-autobuild . ./_build/html --watch ../nucleus
```
`sphinx-autobuild` will spin up a server on localhost (port 8000 by default) that will watch for and automatically rebuild a version of the API reference based on your local docstring changes.
## Custom Metrics using Shapely in scale-validate
Certain metrics use `Shapely` and `rasterio` which is added as optional dependencies.
```bash
pip install scale-nucleus[metrics]
```
Note that you might need to install a local GEOS package since Shapely doesn't provide binaries bundled with GEOS for every platform.
```bash
#Mac OS
brew install geos
# Ubuntu/Debian flavors
apt-get install libgeos-dev
```
To develop it locally use
`poetry install --extras metrics`
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/README.md
| 0.675765 | 0.900267 |
README.md
|
pypi
|
import json
import os.path
from collections import Counter
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Sequence
from .annotation import is_local_path
from .camera_params import CameraParams
from .constants import (
BACKEND_REFERENCE_ID_KEY,
CAMERA_PARAMS_KEY,
IMAGE_URL_KEY,
METADATA_KEY,
ORIGINAL_IMAGE_URL_KEY,
POINTCLOUD_URL_KEY,
REFERENCE_ID_KEY,
TYPE_KEY,
UPLOAD_TO_SCALE_KEY,
URL_KEY,
)
class DatasetItemType(Enum):
IMAGE = "image"
POINTCLOUD = "pointcloud"
@dataclass # pylint: disable=R0902
class DatasetItem: # pylint: disable=R0902
"""A dataset item is an image or pointcloud that has associated metadata.
Note: for 3D data, please include a :class:`CameraParams` object under a key named
"camera_params" within the metadata dictionary. This will allow for projecting
3D annotations to any image within a scene.
Args:
image_location (Optional[str]): Required if pointcloud_location is not present:
The location containing the image for the given row of data. This can be a local
path, or a remote URL. Remote formats supported include any URL (``http://`` or
``https://``) or URIs for AWS S3, Azure, or GCS (i.e. ``s3://``, ``gcs://``).
pointcloud_location (Optional[str]): Required if image_location is not present:
The remote URL containing the pointcloud JSON. Remote formats supported include
any URL (``http://`` or ``https://``) or URIs for AWS S3, Azure, or GCS (i.e.
``s3://``, ``gcs://``).
reference_id (Optional[str]): A user-specified identifier to reference the
item.
metadata (Optional[dict]): Extra information about the particular
dataset item. ints, floats, string values will be made searchable in
the query bar by the key in this dict. For example, ``{"animal":
"dog"}`` will become searchable via ``metadata.animal = "dog"``.
Categorical data can be passed as a string and will be treated
categorically by Nucleus if there are less than 250 unique values in the
dataset. This means histograms of values in the "Insights" section and
autocomplete within the query bar.
Numerical metadata will generate histograms in the "Insights" section,
allow for sorting the results of any query, and can be used with the
modulo operator For example: metadata.frame_number % 5 = 0
All other types of metadata will be visible from the dataset item detail
view.
It is important that string and numerical metadata fields are consistent
- if a metadata field has a string value, then all metadata fields with
the same key should also have string values, and vice versa for numerical
metadata. If conflicting types are found, Nucleus will return an error
during upload!
The recommended way of adding or updating existing metadata is to re-run
the ingestion (dataset.append) with update=True, which will replace any
existing metadata with whatever your new ingestion run uses. This will
delete any metadata keys that are not present in the new ingestion run.
We have a cache based on image_location that will skip the need for a
re-upload of the images, so your second ingestion will be faster than
your first.
For 3D (sensor fusion) data, it is highly recommended to include
camera intrinsics the metadata of your camera image items. Nucleus
requires these intrinsics to create visualizations such as cuboid
projections. Refer to our `guide to uploading 3D data
<https://nucleus.scale.com/docs/uploading-3d-data>`_ for more
info.
Coordinate metadata may be provided to enable the Map Chart in the Nucleus Dataset charts page.
These values can be specified as `{ "lat": 52.5, "lon": 13.3, ... }`.
Context Attachments may be provided to display the attachments side by side with the dataset
item in the Detail View by specifying
`{ "context_attachments": [ { "attachment": 'https://example.com/1' }, { "attachment": 'https://example.com/2' }, ... ] }`.
.. todo ::
Shorten this once we have a guide migrated for metadata, or maybe link
from other places to here.
upload_to_scale (Optional[bool]): Set this to false in order to use
`privacy mode <https://nucleus.scale.com/docs/privacy-mode>`_.
Setting this to false means the actual data within the item will not be
uploaded to scale meaning that you can send in links that are only accessible
to certain users, and not to Scale. Skipping upload to Scale is currently only
implemented for images.
"""
image_location: Optional[str] = None
reference_id: str = (
"DUMMY_VALUE" # preserve argument ordering for backwards compatibility
)
metadata: Optional[dict] = None
pointcloud_location: Optional[str] = None
upload_to_scale: Optional[bool] = True
def __post_init__(self):
assert self.reference_id != "DUMMY_VALUE", "reference_id is required."
assert bool(self.image_location) != bool(
self.pointcloud_location
), "Must specify exactly one of the image_location or pointcloud_location parameters"
if (self.pointcloud_location) and not self.upload_to_scale:
raise NotImplementedError(
"Skipping upload to Scale is not currently implemented for pointclouds."
)
self.local = (
is_local_path(self.image_location) if self.image_location else None
)
self.type = (
DatasetItemType.IMAGE
if self.image_location
else DatasetItemType.POINTCLOUD
)
camera_params = (
self.metadata.get(CAMERA_PARAMS_KEY, None)
if self.metadata
else None
)
self.camera_params = (
CameraParams.from_json(camera_params) if camera_params else None
)
@classmethod
def from_json(cls, payload: dict):
"""Instantiates dataset item object from schematized JSON dict payload."""
image_url = payload.get(IMAGE_URL_KEY, None) or payload.get(
ORIGINAL_IMAGE_URL_KEY, None
)
pointcloud_url = payload.get(POINTCLOUD_URL_KEY, None)
# handle case when re-converting Scene.from_json
url = payload.get(URL_KEY, None)
if url and not image_url and not pointcloud_url:
if url.split(".")[-1] in ("jpg", "png"):
image_url = url
elif url.split(".")[-1] in ("json",):
pointcloud_url = url
if BACKEND_REFERENCE_ID_KEY in payload:
payload[REFERENCE_ID_KEY] = payload[BACKEND_REFERENCE_ID_KEY]
return cls(
image_location=image_url,
pointcloud_location=pointcloud_url,
reference_id=payload.get(REFERENCE_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
upload_to_scale=payload.get(UPLOAD_TO_SCALE_KEY, True),
)
def local_file_exists(self):
# TODO: make private
return os.path.isfile(self.image_location)
def to_payload(self, is_scene=False) -> dict:
"""Serializes dataset item object to schematized JSON dict."""
payload: Dict[str, Any] = {
METADATA_KEY: self.metadata or {},
}
payload[REFERENCE_ID_KEY] = self.reference_id
if is_scene:
if self.image_location:
payload[URL_KEY] = self.image_location
elif self.pointcloud_location:
payload[URL_KEY] = self.pointcloud_location
payload[TYPE_KEY] = self.type.value
if self.camera_params:
payload[CAMERA_PARAMS_KEY] = self.camera_params.to_payload()
else:
assert (
self.image_location
), "Must specify image_location for DatasetItems not in a LidarScene or VideoScene"
payload[IMAGE_URL_KEY] = self.image_location
payload[UPLOAD_TO_SCALE_KEY] = self.upload_to_scale
return payload
def to_json(self) -> str:
"""Serializes dataset item object to schematized JSON string."""
return json.dumps(self.to_payload(), allow_nan=False)
def check_all_paths_remote(dataset_items: Sequence[DatasetItem]):
for item in dataset_items:
if item.image_location and is_local_path(item.image_location):
raise ValueError(
f"All paths must be remote, but {item.image_location} is either "
"local, or a remote URL type that is not supported."
)
def check_for_duplicate_reference_ids(dataset_items: Sequence[DatasetItem]):
ref_ids = []
for dataset_item in dataset_items:
if dataset_item.reference_id is None:
raise ValueError(
f"Reference ID cannot be None. Encountered DatasetItem with no reference ID:\n{dataset_item}"
)
ref_ids.append(dataset_item.reference_id)
if len(ref_ids) != len(set(ref_ids)):
duplicates = {
f"{key}": f"Count: {value}"
for key, value in Counter(ref_ids).items()
if value > 1
}
raise ValueError(
f"Duplicate reference IDs found among dataset_items: {duplicates}"
)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/dataset_item.py
| 0.844345 | 0.42931 |
dataset_item.py
|
pypi
|
from enum import Enum
from typing import TYPE_CHECKING, Dict, Optional
from .async_job import AsyncJob
from .camera_params import CameraParams
from .constants import CAMERA_PARAMS_KEY
if TYPE_CHECKING:
from . import NucleusClient
# Wording set to match with backend enum
class ExportMetadataType(Enum):
SCENES = "scene"
DATASET_ITEMS = "item"
class MetadataManager:
"""
Helper class for managing metadata updates on a scene or dataset item.
Do not call directly, use the dataset class methods: `update_scene_metadata` or `update_item_metadata`
"""
def __init__(
self,
dataset_id: str,
client: "NucleusClient",
raw_mappings: Dict[str, dict],
level: ExportMetadataType,
asynchronous: bool,
):
self.dataset_id = dataset_id
self._client = client
self.raw_mappings = raw_mappings
self.level = level
self.asynchronous = asynchronous
if len(self.raw_mappings) > 500 and not self.asynchronous:
raise Exception(
"Number of items to update is too large to perform it synchronously. "
"Consider running the metadata_update with `asynchronous=True`, to avoid timeouts."
)
self._payload = self._format_mappings()
def _extract_camera_params(self, metadata: dict) -> Optional[CameraParams]:
camera_params = metadata.get(CAMERA_PARAMS_KEY, None)
if camera_params is None:
return None
return CameraParams.from_json(camera_params)
def _format_mappings(self):
payloads = []
for ref_id, meta in self.raw_mappings.items():
payload = {"reference_id": ref_id, "metadata": meta}
if self.level.value == ExportMetadataType.DATASET_ITEMS.value:
camera_params = self._extract_camera_params(meta)
if camera_params:
payload[CAMERA_PARAMS_KEY] = camera_params.to_payload()
payloads.append(payload)
return payloads
def update(self):
payload = {"metadata": self._payload, "level": self.level.value}
is_async = int(self.asynchronous)
try:
resp = self._client.make_request(
payload=payload,
route=f"dataset/{self.dataset_id}/metadata?async={is_async}",
)
if self.asynchronous:
return AsyncJob.from_json(resp, self._client)
return resp
except Exception as e: # pylint: disable=W0703
print(
"Failed to complete the request. If a timeout occurred, consider running the "
"metadata_update with `asynchronous=True`."
)
print(f"Request failed with:\n\n{e}")
return None
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metadata_manager.py
| 0.881761 | 0.18101 |
metadata_manager.py
|
pypi
|
import json
from collections import Counter
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence
from nucleus.annotation import Annotation, SegmentationAnnotation
from nucleus.async_utils import (
FileFormField,
FormDataContextHandler,
make_many_form_data_requests_concurrently,
)
from nucleus.constants import MASK_TYPE, SERIALIZED_REQUEST_KEY
from nucleus.errors import DuplicateIDError
from nucleus.payload_constructor import (
construct_annotation_payload,
construct_segmentation_payload,
)
if TYPE_CHECKING:
from . import NucleusClient
def accumulate_dict_values(dicts: Iterable[dict]):
"""
Accumulate a list of dicts into a single dict using summation.
"""
result = {}
for d in dicts:
for key, value in d.items():
if (
key not in result
or key == "dataset_id"
or key == "model_run_id"
):
result[key] = value
else:
result[key] += value
return result
class AnnotationUploader:
"""This is a helper class not intended for direct use. Please use dataset.annotate
or dataset.upload_predictions.
This class is purely a helper class for implementing dataset.annotate/dataset.predict.
"""
def __init__(
self, dataset_id: Optional[str], client: "NucleusClient"
): # noqa: F821
self._client = client
self._route = f"dataset/{dataset_id}/annotate"
def upload(
self,
annotations: Iterable[Annotation],
batch_size: int = 5000,
update: bool = False,
remote_files_per_upload_request: int = 20,
local_files_per_upload_request: int = 10,
):
"""For more details on parameters and functionality, see dataset.annotate."""
if local_files_per_upload_request > 10:
raise ValueError("local_files_per_upload_request must be <= 10")
annotations_without_files: List[Annotation] = []
segmentations_with_local_files: List[SegmentationAnnotation] = []
segmentations_with_remote_files: List[SegmentationAnnotation] = []
for annotation in annotations:
if annotation.has_local_files_to_upload():
# Only segmentations have local files currently, and probably for a long
# time to to come.
assert isinstance(annotation, SegmentationAnnotation)
segmentations_with_local_files.append(annotation)
elif isinstance(annotation, SegmentationAnnotation):
segmentations_with_remote_files.append(annotation)
else:
annotations_without_files.append(annotation)
responses = []
if segmentations_with_local_files:
responses.extend(
self.make_batched_file_form_data_requests(
segmentations=segmentations_with_local_files,
update=update,
local_files_per_upload_request=local_files_per_upload_request,
)
)
if segmentations_with_remote_files:
# Segmentations require an upload and must be batched differently since a single
# segmentation will take a lot longer for the server to process than a single
# annotation of any other kind.
responses.extend(
self.make_batched_requests(
segmentations_with_remote_files,
update,
batch_size=remote_files_per_upload_request,
segmentation=True,
)
)
if annotations_without_files:
responses.extend(
self.make_batched_requests(
annotations_without_files,
update,
batch_size=batch_size,
segmentation=False,
)
)
return accumulate_dict_values(responses)
def make_batched_requests(
self,
annotations: Sequence[Annotation],
update: bool,
batch_size: int,
segmentation: bool,
):
batches = [
annotations[i : i + batch_size]
for i in range(0, len(annotations), batch_size)
]
responses = []
progress_bar_name = (
"Segmentation batches" if segmentation else "Annotation batches"
)
for batch in self._client.tqdm_bar(batches, desc=progress_bar_name):
payload = construct_annotation_payload(batch, update)
responses.append(
self._client.make_request(payload, route=self._route)
)
return responses
def make_batched_file_form_data_requests(
self,
segmentations: Sequence[SegmentationAnnotation],
update,
local_files_per_upload_request: int,
):
requests = []
for i in range(0, len(segmentations), local_files_per_upload_request):
batch = segmentations[i : i + local_files_per_upload_request]
request = FormDataContextHandler(
self.get_form_data_and_file_pointers_fn(batch, update)
)
requests.append(request)
progressbar = self._client.tqdm_bar(
total=len(requests),
desc="Local segmentation mask file batches",
)
return make_many_form_data_requests_concurrently(
client=self._client,
requests=requests,
route=self._route,
progressbar=progressbar,
)
def get_form_data_and_file_pointers_fn(
self,
segmentations: Iterable[SegmentationAnnotation],
update: bool,
):
"""Defines a function to be called on each retry.
File pointers are also returned so whoever calls this function can
appropriately close the files. This is intended for use with a
FormDataContextHandler in order to make form data requests.
"""
def fn():
request_json = construct_segmentation_payload(
segmentations, update
)
form_data = [
FileFormField(
name=SERIALIZED_REQUEST_KEY,
filename=None,
value=json.dumps(request_json),
content_type="application/json",
)
]
file_pointers = []
for segmentation in segmentations:
# I don't know of a way to use with, since all files in the request
# need to be opened at the same time.
# pylint: disable=consider-using-with
mask_fp = open(segmentation.mask_url, "rb")
# pylint: enable=consider-using-with
file_type = segmentation.mask_url.split(".")[-1]
if file_type != "png":
raise ValueError(
f"Only png files are supported. Got {file_type} for {segmentation.mask_url}"
)
form_data.append(
FileFormField(
name=MASK_TYPE,
filename=segmentation.mask_url,
value=mask_fp,
content_type="image/png",
)
)
file_pointers.append(mask_fp)
return form_data, file_pointers
return fn
@staticmethod
def check_for_duplicate_ids(annotations: Iterable[Annotation]):
"""Do not allow annotations to have the same (annotation_id, reference_id) tuple"""
# some annotations like CategoryAnnotation do not have annotation_id attribute, and as such, we allow duplicates
tuple_ids = [
(ann.reference_id, ann.annotation_id) # type: ignore
for ann in annotations
if hasattr(ann, "annotation_id")
]
tuple_count = Counter(tuple_ids)
duplicates = {key for key, value in tuple_count.items() if value > 1}
if len(duplicates) > 0:
raise DuplicateIDError(
f"Duplicate annotations with the same (reference_id, annotation_id) properties found.\n"
f"Duplicates: {duplicates}\n"
f"To fix this, avoid duplicate annotations, or specify a different annotation_id attribute "
f"for the failing items."
)
class PredictionUploader(AnnotationUploader):
def __init__(
self,
client: "NucleusClient",
dataset_id: Optional[str] = None,
model_id: Optional[str] = None,
model_run_id: Optional[str] = None,
):
super().__init__(dataset_id, client)
self._client = client
if model_run_id is not None:
assert model_id is None and dataset_id is None
self._route = f"modelRun/{model_run_id}/predict"
else:
assert (
model_id is not None and dataset_id is not None
), "Model ID and dataset ID are required if not using model run id."
self._route = (
f"dataset/{dataset_id}/model/{model_id}/uploadPredictions"
)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/annotation_uploader.py
| 0.831725 | 0.305613 |
annotation_uploader.py
|
pypi
|
import json
import warnings
from abc import ABC
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from nucleus.constants import (
FRAME_RATE_KEY,
FRAMES_KEY,
IMAGE_LOCATION_KEY,
LENGTH_KEY,
METADATA_KEY,
NUM_SENSORS_KEY,
POINTCLOUD_LOCATION_KEY,
REFERENCE_ID_KEY,
TRACKS_KEY,
UPLOAD_TO_SCALE_KEY,
VIDEO_LOCATION_KEY,
VIDEO_URL_KEY,
)
from nucleus.track import Track
from .annotation import is_local_path
from .dataset_item import (
DatasetItem,
DatasetItemType,
check_for_duplicate_reference_ids,
)
if TYPE_CHECKING:
from . import NucleusClient
class Frame:
"""Collection of sensor data pertaining to a single time step.
For 3D data, each Frame houses a sensor-to-data mapping and must have exactly
one pointcloud with any number of camera images.
Parameters:
**kwargs (Dict[str, :class:`DatasetItem`]): Mappings from sensor name
to dataset item. Each frame of a lidar scene must contain exactly one
pointcloud and any number of images (e.g. from different angles).
Refer to our `guide to uploading 3D data
<https://docs.nucleus.scale.com/docs/uploading-3d-data>`_ for more info!
"""
def __init__(self, **kwargs):
self.items: Dict[str, DatasetItem] = {}
for key, value in kwargs.items():
assert isinstance(key, str), "All keys must be names of sensors"
assert isinstance(
value, DatasetItem
), f"All values must be DatasetItems, instead got type {type(value)}"
self.items[key] = value
check_for_duplicate_reference_ids(list(self.items.values()))
def __repr__(self) -> str:
return f"Frame(items={self.items})"
def __eq__(self, other):
for key, value in self.items.items():
if key not in other.items:
return False
if value != other.items[key]:
return False
return True
def add_item(self, item: DatasetItem, sensor_name: str) -> None:
"""Adds DatasetItem object to frame as sensor data.
Parameters:
item (:class:`DatasetItem`): Pointcloud or camera image item to add.
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
"""
self.items[sensor_name] = item
def get_item(self, sensor_name: str) -> DatasetItem:
"""Fetches the DatasetItem object associated with the given sensor.
Parameters:
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
Returns:
:class:`DatasetItem`: DatasetItem object pertaining to the sensor.
"""
if sensor_name not in self.items:
raise ValueError(
f"This frame does not have a {sensor_name} sensor"
)
return self.items[sensor_name]
def get_items(self) -> List[DatasetItem]:
"""Fetches all items in the frame.
Returns:
List[:class:`DatasetItem`]: List of all DatasetItem objects in the frame.
"""
return list(self.items.values())
def get_sensors(self) -> List[str]:
"""Fetches all sensor names of the frame.
Returns:
List of all sensor names of the frame."""
return list(self.items.keys())
@classmethod
def from_json(cls, payload: dict):
"""Instantiates frame object from schematized JSON dict payload."""
items = {
sensor: DatasetItem.from_json(item)
for sensor, item in payload.items()
}
return cls(**items)
def to_payload(self) -> dict:
"""Serializes frame object to schematized JSON dict."""
return {
sensor: dataset_item.to_payload(is_scene=True)
for sensor, dataset_item in self.items.items()
}
@dataclass
class Scene(ABC):
reference_id: str
frames: List[Frame] = field(default_factory=list)
metadata: Optional[dict] = field(default_factory=dict)
tracks: List[Track] = field(default_factory=list)
skip_validate: Optional[bool] = False
def __post_init__(self):
self.sensors = set(
flatten([frame.get_sensors() for frame in self.frames])
)
self.frames_dict = dict(enumerate(self.frames))
if self.metadata is None:
self.metadata = {}
if not self.skip_validate:
self.validate()
def __eq__(self, other):
return all(
[
self.reference_id == other.reference_id,
self.frames == other.frames,
self.metadata == other.metadata,
self.tracks == other.tracks,
]
)
@property
def length(self) -> int:
"""Number of frames in the scene."""
return len(self.frames_dict)
@property
def num_sensors(self) -> int:
"""Number of sensors in the scene."""
return len(self.get_sensors())
def validate(self):
# TODO: make private
assert self.length > 0, "Must have at least 1 frame in a scene"
all_items = []
for frame in self.frames_dict.values():
assert isinstance(
frame, Frame
), "Each frame in a scene must be a Frame object"
all_items.extend(frame.get_items())
check_for_duplicate_reference_ids(all_items)
def add_item(
self, index: int, sensor_name: str, item: DatasetItem
) -> None:
"""Adds DatasetItem to the specified frame as sensor data.
Parameters:
index: Serial index of the frame to which to add the item.
item (:class:`DatasetItem`): Pointcloud or camera image item to add.
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
"""
self.sensors.add(sensor_name)
if index not in self.frames_dict:
new_frame = Frame(**{sensor_name: item})
self.frames_dict[index] = new_frame
else:
self.frames_dict[index].items[sensor_name] = item
def add_frame(
self, frame: Frame, index: int, update: bool = False
) -> None:
"""Adds frame to scene at the specified index.
Parameters:
frame (:class:`Frame`): Frame object to add.
index: Serial index at which to add the frame.
update: Whether to overwrite the frame at the specified index, if it
exists. Default is False.
"""
if (
index not in self.frames_dict
or index in self.frames_dict
and update
):
self.frames_dict[index] = frame
self.sensors.update(frame.get_sensors())
def get_frame(self, index: int) -> Frame:
"""Fetches the Frame object at the specified index.
Parameters:
index: Serial index for which to retrieve the Frame.
Return:
:class:`Frame`: Frame object at the specified index."""
if index not in self.frames_dict:
raise ValueError(
f"This scene does not have a frame at index {index}"
)
return self.frames_dict[index]
def get_frames(self) -> List[Frame]:
"""Fetches a sorted list of Frames of the scene.
Returns:
List[:class:`Frame`]: List of Frames, sorted by index ascending.
"""
return [
frame
for _, frame in sorted(
self.frames_dict.items(), key=lambda x: x[0]
)
]
def get_sensors(self) -> List[str]:
"""Fetches all sensor names of the scene.
Returns:
List of all sensor names associated with frames in the scene."""
return list(self.sensors)
def get_item(self, index: int, sensor_name: str) -> DatasetItem:
"""Fetches the DatasetItem object of the given frame and sensor.
Parameters:
index: Serial index of the frame from which to fetch the item.
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
Returns:
:class:`DatasetItem`: DatasetItem object of the frame and sensor.
"""
frame = self.get_frame(index)
return frame.get_item(sensor_name)
def get_items_from_sensor(self, sensor_name: str) -> List[DatasetItem]:
"""Fetches all DatasetItem objects of the given sensor.
Parameters:
sensor_name: Name of the sensor, e.g. "lidar" or "front_cam."
Returns:
List[:class:`DatasetItem`]: List of DatasetItem objects associated
with the specified sensor.
"""
if sensor_name not in self.sensors:
raise ValueError(
f"This scene does not have a {sensor_name} sensor"
)
items_from_sensor = []
for frame in self.frames_dict.values():
try:
sensor_item = frame.get_item(sensor_name)
items_from_sensor.append(sensor_item)
except ValueError:
# This sensor is not present at current frame
items_from_sensor.append(None)
return items_from_sensor
def get_items(self) -> List[DatasetItem]:
"""Fetches all items in the scene.
Returns:
List[:class:`DatasetItem`]: Unordered list of all DatasetItem
objects in the scene.
"""
return flatten([frame.get_items() for frame in self.get_frames()])
def info(self):
"""Fetches information about the scene.
Returns:
Payload containing::
{
"reference_id": str,
"length": int,
"num_sensors": int
}
"""
return {
REFERENCE_ID_KEY: self.reference_id,
LENGTH_KEY: self.length,
NUM_SENSORS_KEY: self.num_sensors,
}
def validate_frames_dict(self):
# TODO: make private
is_continuous = set(list(range(len(self.frames_dict)))) == set(
self.frames_dict.keys()
)
assert (
is_continuous
), "frames must be 0-indexed and continuous (no missing frames)"
@classmethod
def from_json(
cls,
payload: dict,
client: Optional["NucleusClient"] = None,
skip_validate: Optional[bool] = False,
):
"""Instantiates scene object from schematized JSON dict payload."""
frames_payload = payload.get(FRAMES_KEY, [])
frames = [Frame.from_json(frame) for frame in frames_payload]
tracks_payload = payload.get(TRACKS_KEY, [])
tracks = (
[
Track.from_json(track, connection=client.connection)
for track in tracks_payload
]
if client
else []
)
return cls(
reference_id=payload[REFERENCE_ID_KEY],
frames=frames,
metadata=payload.get(METADATA_KEY, {}),
skip_validate=skip_validate,
tracks=tracks,
)
def to_payload(self) -> dict:
"""Serializes scene object to schematized JSON dict."""
self.validate_frames_dict()
ordered_frames = self.get_frames()
frames_payload = [frame.to_payload() for frame in ordered_frames]
payload: Dict[str, Any] = {
REFERENCE_ID_KEY: self.reference_id,
FRAMES_KEY: frames_payload,
}
if self.tracks:
payload[TRACKS_KEY] = [track.to_payload() for track in self.tracks]
if self.metadata:
payload[METADATA_KEY] = self.metadata
return payload
def to_json(self) -> str:
"""Serializes scene object to schematized JSON string."""
return json.dumps(self.to_payload(), allow_nan=False)
@dataclass
class LidarScene(Scene):
"""Sequence of lidar pointcloud and camera images over time.
Nucleus 3D datasets are comprised of LidarScenes, which are sequences of
lidar pointclouds and camera images over time. These sequences are in turn
comprised of :class:`Frames <Frame>`.
By organizing data across multiple sensors over time, LidarScenes make it
easier to interpret pointclouds, allowing you to see objects move over time
by clicking through frames and providing context in the form of corresponding
images.
You can think of scenes and frames as nested groupings of sensor data across
time:
* LidarScene for a given location
* Frame at timestep 0
* DatasetItem of pointcloud
* DatasetItem of front camera image
* DatasetItem of rear camera image
* Frame at timestep 1
* ...
* ...
* LidarScene for another location
* ...
LidarScenes are uploaded to a :class:`Dataset` with any accompanying
metadata. Frames do not accept metadata, but each of its constituent
:class:`DatasetItems <DatasetItem>` does.
Note: Uploads with a different number of frames/items will error out (only
on scenes that now differ). Existing scenes are expected to retain the
same structure, i.e. the same number of frames, and same items per frame.
If a scene definition is changed (for example, additional frames added) the
update operation will be ignored. If you would like to alter the structure
of a scene, please delete the scene and re-upload.
Parameters:
reference_id (str): User-specified identifier to reference the scene.
frames (Optional[List[:class:`Frame`]]): List of frames to be a part of
the scene. A scene can be created before frames or items have been
added to it, but must be non-empty when uploading to a :class:`Dataset`.
metadata (Optional[Dict]):
Optional metadata to include with the scene.
Coordinate metadata may be provided to enable the Map Chart in the Nucleus Dataset charts page.
These values can be specified as `{ "lat": 52.5, "lon": 13.3, ... }`.
Context Attachments may be provided to display the attachments side by side with the dataset
item in the Detail View by specifying
`{ "context_attachments": [ { "attachment": 'https://example.com/1' }, { "attachment": 'https://example.com/2' }, ... ] }`.
Refer to our `guide to uploading 3D data
<https://docs.nucleus.scale.com/docs/uploading-3d-data>`_ for more info!
"""
def __repr__(self) -> str:
return f"LidarScene(reference_id='{self.reference_id}', frames={self.get_frames()}, metadata={self.metadata})"
def validate(self):
# TODO: make private
super().validate()
lidar_sensors = flatten(
[
[
sensor
for sensor in frame.items.keys()
if frame.items[sensor].type == DatasetItemType.POINTCLOUD
]
for frame in self.frames_dict.values()
]
)
assert (
len(set(lidar_sensors)) == 1
), "Each lidar scene must have exactly one lidar sensor"
for frame in self.frames_dict.values():
num_pointclouds = sum(
[
int(item.type == DatasetItemType.POINTCLOUD)
for item in frame.get_items()
]
)
assert (
num_pointclouds == 1
), "Each frame of a lidar scene must have exactly 1 pointcloud"
def flatten(t):
return [item for sublist in t for item in sublist]
@dataclass
class VideoScene(ABC):
"""Video or sequence of images over time.
Nucleus video datasets are comprised of VideoScenes. These can be
comprised of a single video, or a sequence of :class:`DatasetItems <DatasetItem>`
which are equivalent to frames.
VideoScenes are uploaded to a :class:`Dataset` with any accompanying
metadata. Each of :class:`DatasetItems <DatasetItem>` representing a frame
also accepts metadata.
Note: Updates with different items will error out (only on scenes that
now differ). Existing video are expected to retain the same frames, and only
metadata can be updated. If a video definition is changed (for example,
additional frames added) the update operation will be ignored. If you would
like to alter the structure of a video scene, please delete the scene and
re-upload.
Parameters:
reference_id (str): User-specified identifier to reference the scene.
frame_rate (Optional[int]): Required if uploading items. Frame rate of the video.
video_location (Optional[str]): Required if not uploading items. The remote URL
containing the video MP4. Remote formats supported include any URL (``http://``
or ``https://``) or URIs for AWS S3, Azure, or GCS (i.e. ``s3://``, ``gcs://``).
items (Optional[List[:class:`DatasetItem`]]): Required if not uploading video_location.
List of items representing frames, to be a part of the scene. A scene can be created
before items have been added to it, but must be non-empty when uploading to
a :class:`Dataset`. A video scene can contain a maximum of 3000 items.
metadata (Optional[Dict]):
Optional metadata to include with the scene.
Coordinate metadata may be provided to enable the Map Chart in the Nucleus Dataset charts page.
These values can be specified as `{ "lat": 52.5, "lon": 13.3, ... }`.
Context Attachments may be provided to display the attachments side by side with the dataset
item in the Detail View by specifying
`{ "context_attachments": [ { "attachment": 'https://example.com/1' }, { "attachment": 'https://example.com/2' }, ... ] }`.
upload_to_scale (Optional[bool]): Set this to false in order to use
`privacy mode <https://nucleus.scale.com/docs/privacy-mode>`_. If using privacy mode
you must upload both a video_location and items to the VideoScene.
Setting this to false means the actual data within the video scene will not be
uploaded to scale meaning that you can send in links that are only accessible
to certain users, and not to Scale.
Refer to our `guide to uploading video data
<https://nucleus.scale.com/docs/uploading-video-data>`_ for more info!
"""
reference_id: str
frame_rate: Optional[int] = None
video_location: Optional[str] = None
items: List[DatasetItem] = field(default_factory=list)
metadata: Optional[dict] = field(default_factory=dict)
upload_to_scale: Optional[bool] = True
attachment_type: Optional[str] = None
tracks: List[Track] = field(default_factory=list)
def __post_init__(self):
if self.attachment_type:
warnings.warn(
"The attachment_type parameter is no longer required and will be deprecated soon.",
DeprecationWarning,
)
if self.metadata is None:
self.metadata = {}
def __eq__(self, other):
return all(
[
self.reference_id == other.reference_id,
self.items == other.items,
self.video_location == other.video_location,
self.metadata == other.metadata,
self.tracks == other.tracks,
]
)
@property
def length(self) -> int:
"""Gets number of items in the scene for videos uploaded with an array of images."""
assert (
not self.upload_to_scale or not self.video_location
), "Only videos with items have a length"
return len(self.items)
def validate(self):
# TODO: make private
assert (
self.items or self.video_location
), "Please upload either a video_location or an array of dataset items representing frames"
if self.upload_to_scale is False:
assert (
self.frame_rate > 0
), "When using privacy mode frame rate must be at least 1"
assert (
self.items and self.length > 0
), "When using privacy mode scene must have a list of items of length at least 1"
for item in self.items:
assert isinstance(
item, DatasetItem
), "Each item in a scene must be a DatasetItem object"
assert (
item.image_location is not None
), "Each item in a video scene must have an image_location"
assert (
item.upload_to_scale is not False
), "Please specify whether to upload to scale in the VideoScene for videos"
elif self.items:
assert (
self.frame_rate > 0
), "When uploading an array of items frame rate must be at least 1"
assert (
self.length > 0
), "When uploading an array of items scene must have a list of items of length at least 1"
assert (
not self.video_location
), "No video location is accepted when uploading an array of items unless you are using privacy mode"
for item in self.items:
assert isinstance(
item, DatasetItem
), "Each item in a scene must be a DatasetItem object"
assert (
item.image_location is not None
), "Each item in a video scene must have an image_location"
assert (
item.upload_to_scale is not False
), "Please specify whether to upload to scale in the VideoScene for videos"
else:
assert (
not self.frame_rate
), "No frame rate is accepted when uploading a video_location"
assert (
not self.items
), "No list of items is accepted when uploading a video_location unless you are using privacy mode"
def add_item(
self, item: DatasetItem, index: int = None, update: bool = False
) -> None:
"""Adds DatasetItem to the specified index for videos uploaded as an array of images.
Parameters:
item (:class:`DatasetItem`): Video item to add.
index: Serial index at which to add the item.
update: Whether to overwrite the item at the specified index, if it
exists. Default is False.
"""
assert (
not self.upload_to_scale or not self.video_location
), "Cannot add item to a video without items"
if index is None:
index = len(self.items)
assert (
0 <= index <= len(self.items)
), f"Video scenes must be contiguous so index must be at least 0 and at most {len(self.items)}."
if index < len(self.items) and update:
self.items[index] = item
else:
self.items.append(item)
def get_item(self, index: int) -> DatasetItem:
"""Fetches the DatasetItem at the specified index for videos uploaded as an array of images.
Parameters:
index: Serial index for which to retrieve the DatasetItem.
Return:
:class:`DatasetItem`: DatasetItem at the specified index."""
assert (
not self.upload_to_scale or not self.video_location
), "Cannot add item to a video without items"
if index < 0 or index > len(self.items):
raise ValueError(
f"This scene does not have an item at index {index}"
)
return self.items[index]
def get_items(self) -> List[DatasetItem]:
"""Fetches a sorted list of DatasetItems of the scene for videos uploaded as an array of images.
Returns:
List[:class:`DatasetItem`]: List of DatasetItems, sorted by index ascending.
"""
assert (
not self.upload_to_scale or not self.video_location
), "Cannot add item to a video without items"
return self.items
def info(self):
"""Fetches information about the video scene.
Returns:
Payload containing::
{
"reference_id": str,
"length": Optional[int],
"frame_rate": int,
"video_url": Optional[str],
}
"""
payload: Dict[str, Any] = {
REFERENCE_ID_KEY: self.reference_id,
}
if self.frame_rate:
payload[FRAME_RATE_KEY] = self.frame_rate
if self.video_location:
payload[VIDEO_URL_KEY] = self.video_location
if self.items:
payload[LENGTH_KEY] = self.length
if self.upload_to_scale:
payload[UPLOAD_TO_SCALE_KEY] = self.upload_to_scale
return payload
@classmethod
def from_json(
cls, payload: dict, client: Optional["NucleusClient"] = None
):
"""Instantiates scene object from schematized JSON dict payload."""
items_payload = payload.get(FRAMES_KEY, [])
items = [DatasetItem.from_json(item) for item in items_payload]
tracks_payload = payload.get(TRACKS_KEY, [])
tracks = (
[
Track.from_json(track, connection=client.connection)
for track in tracks_payload
]
if client
else []
)
return cls(
reference_id=payload[REFERENCE_ID_KEY],
frame_rate=payload.get(FRAME_RATE_KEY, None),
items=items,
metadata=payload.get(METADATA_KEY, {}),
video_location=payload.get(VIDEO_URL_KEY, None),
upload_to_scale=payload.get(UPLOAD_TO_SCALE_KEY, True),
tracks=tracks,
)
def to_payload(self) -> dict:
"""Serializes scene object to schematized JSON dict."""
self.validate()
payload: Dict[str, Any] = {
REFERENCE_ID_KEY: self.reference_id,
}
if self.frame_rate:
payload[FRAME_RATE_KEY] = self.frame_rate
if self.metadata:
payload[METADATA_KEY] = self.metadata
if self.video_location:
payload[VIDEO_URL_KEY] = self.video_location
if self.items:
items_payload = [
item.to_payload(is_scene=True) for item in self.items
]
payload[FRAMES_KEY] = items_payload
if self.upload_to_scale is not None:
payload[UPLOAD_TO_SCALE_KEY] = self.upload_to_scale
if self.tracks:
payload[TRACKS_KEY] = [track.to_payload() for track in self.tracks]
return payload
def to_json(self) -> str:
"""Serializes scene object to schematized JSON string."""
return json.dumps(self.to_payload(), allow_nan=False)
def check_all_scene_paths_remote(
scenes: Union[List[LidarScene], List[VideoScene]]
):
for scene in scenes:
if isinstance(scene, VideoScene) and scene.video_location:
video_location = getattr(scene, VIDEO_LOCATION_KEY)
if video_location and is_local_path(video_location):
raise ValueError(
f"All paths for videos must be remote, but {scene.video_location} is either "
"local, or a remote URL type that is not supported."
)
if isinstance(scene, LidarScene) or scene.items:
for item in scene.get_items():
pointcloud_location = getattr(item, POINTCLOUD_LOCATION_KEY)
if pointcloud_location and is_local_path(pointcloud_location):
raise ValueError(
f"All paths for DatasetItems in a Scene must be remote, but {item.pointcloud_location} is either "
"local, or a remote URL type that is not supported."
)
image_location = getattr(item, IMAGE_LOCATION_KEY)
if image_location and is_local_path(image_location):
raise ValueError(
f"All paths for DatasetItems in a Scene must be remote, but {item.image_location} is either "
"local, or a remote URL type that is not supported."
)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/scene.py
| 0.915219 | 0.444324 |
scene.py
|
pypi
|
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict
from .annotation import Point3D
from .constants import (
CAMERA_MODEL_KEY,
CX_KEY,
CY_KEY,
FX_KEY,
FY_KEY,
HEADING_KEY,
K1_KEY,
K2_KEY,
K3_KEY,
K4_KEY,
P1_KEY,
P2_KEY,
POSITION_KEY,
)
from .quaternion import Quaternion
REQUIRED_CAMERA_PARAMS_KEYS = {
POSITION_KEY,
HEADING_KEY,
FX_KEY,
FY_KEY,
CX_KEY,
CY_KEY,
}
class CameraModels(str, Enum):
BROWN_CONRADY = "brown_conrady"
FISHEYE = "fisheye"
def __contains__(self, item):
try:
self(item)
except ValueError:
return False
return True
@dataclass
class CameraParams:
"""Camera position/heading used to record the image.
Args:
position (:class:`Point3D`): World-normalized position of the camera
heading (:class:`Quaternion`): Vector4 indicating the quaternion of the
camera direction; note that the z-axis of the camera frame
represents the camera's optical axis. See `Heading Examples
<https://docs.scale.com/reference/data-types-and-the-frame-objects#heading-examples>`_.
fx (float): Focal length in x direction (in pixels).
fy (float): Focal length in y direction (in pixels).
cx (float): Principal point x value.
cy (float): Principal point y value.
"""
position: Point3D
heading: Quaternion
fx: float
fy: float
cx: float
cy: float
camera_model: str
k1: float
k2: float
k3: float
k4: float
p1: float
p2: float
def __post_init__(self):
if self.camera_model is not None:
if self.camera_model not in (k for k in CameraModels):
raise ValueError(
f'Invalid Camera Model, the supported options are "{CameraModels.BROWN_CONRADY}" and "{CameraModels.FISHEYE}"'
)
@classmethod
def from_json(cls, payload: Dict[str, Any]):
"""Instantiates camera params object from schematized JSON dict payload."""
keys = set(payload.keys())
if not keys.issuperset(REQUIRED_CAMERA_PARAMS_KEYS):
raise ValueError(
f"The following fields must be present in the camera_params dictionary: {REQUIRED_CAMERA_PARAMS_KEYS}"
)
return cls(
Point3D.from_json(payload[POSITION_KEY]),
Quaternion.from_json(payload[HEADING_KEY]),
payload[FX_KEY],
payload[FY_KEY],
payload[CX_KEY],
payload[CY_KEY],
payload.get(CAMERA_MODEL_KEY, None),
payload.get(K1_KEY, None),
payload.get(K2_KEY, None),
payload.get(K3_KEY, None),
payload.get(K4_KEY, None),
payload.get(P1_KEY, None),
payload.get(P2_KEY, None),
)
def to_payload(self) -> dict:
"""Serializes camera params object to schematized JSON dict."""
payload = {
POSITION_KEY: self.position.to_payload(),
HEADING_KEY: self.heading.to_payload(),
FX_KEY: self.fx,
FY_KEY: self.fy,
CX_KEY: self.cx,
CY_KEY: self.cy,
}
if self.k1:
payload[K1_KEY] = self.k1
if self.k2:
payload[K2_KEY] = self.k2
if self.k3:
payload[K3_KEY] = self.k3
if self.k4:
payload[K4_KEY] = self.k4
if self.p1:
payload[P1_KEY] = self.p1
if self.p2:
payload[P2_KEY] = self.p2
if self.camera_model:
payload[CAMERA_MODEL_KEY] = self.camera_model
return payload
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/camera_params.py
| 0.864253 | 0.326218 |
camera_params.py
|
pypi
|
import json
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Sequence, Type, Union
from urllib.parse import urlparse
import numpy as np
from .constants import (
ANNOTATION_ID_KEY,
ANNOTATIONS_KEY,
BOX_TYPE,
CATEGORY_TYPE,
CUBOID_TYPE,
DIMENSIONS_KEY,
EMBEDDING_VECTOR_KEY,
GEOMETRY_KEY,
HEIGHT_KEY,
I_KEY,
INDEX_KEY,
KEYPOINTS_KEY,
KEYPOINTS_NAMES_KEY,
KEYPOINTS_SKELETON_KEY,
KEYPOINTS_TYPE,
LABEL_KEY,
LABELS_KEY,
LINE_TYPE,
MASK_TYPE,
MASK_URL_KEY,
METADATA_KEY,
MULTICATEGORY_TYPE,
POLYGON_TYPE,
POSITION_KEY,
REFERENCE_ID_KEY,
TAXONOMY_NAME_KEY,
TRACK_REFERENCE_ID_KEY,
TYPE_KEY,
VERTICES_KEY,
VISIBLE_KEY,
WIDTH_KEY,
X_KEY,
Y_KEY,
YAW_KEY,
Z_KEY,
)
# TODO: refactor to reduce this file to under 1000 lines.
# pylint: disable=C0302
class Annotation:
"""Internal base class, not to be used directly.
.. todo ::
Inherit common constructor parameters from here
"""
reference_id: str
@classmethod
def from_json(cls, payload: dict):
"""Instantiates annotation object from schematized JSON dict payload."""
type_key_to_type: Dict[str, Type[Annotation]] = {
BOX_TYPE: BoxAnnotation,
LINE_TYPE: LineAnnotation,
POLYGON_TYPE: PolygonAnnotation,
KEYPOINTS_TYPE: KeypointsAnnotation,
CUBOID_TYPE: CuboidAnnotation,
CATEGORY_TYPE: CategoryAnnotation,
MULTICATEGORY_TYPE: MultiCategoryAnnotation,
}
type_key = payload.get(TYPE_KEY, None)
AnnotationCls = type_key_to_type.get(type_key, SegmentationAnnotation)
return AnnotationCls.from_json(payload)
def to_payload(self) -> dict:
"""Serializes annotation object to schematized JSON dict."""
raise NotImplementedError(
"For serialization, use a specific subclass (e.g. SegmentationAnnotation), "
"not the base annotation class."
)
def to_json(self) -> str:
"""Serializes annotation object to schematized JSON string."""
return json.dumps(self.to_payload(), allow_nan=False)
def has_local_files_to_upload(self) -> bool:
"""Returns True if annotation has local files that need to be uploaded.
Nearly all subclasses have no local files, so we default this to just return
false. If the subclass has local files, it should override this method (but
that is not the only thing required to get local upload of files to work.)
"""
return False
@dataclass # pylint: disable=R0902
class BoxAnnotation(Annotation): # pylint: disable=R0902
"""A bounding box annotation.
::
from nucleus import BoxAnnotation
box = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"},
embedding_vector=[0.1423, 1.432, ..., 3.829],
track_reference_id="car_a",
)
Parameters:
label (str): The label for this annotation.
x (Union[float, int]): The distance, in pixels, between the left border
of the bounding box and the left border of the image.
y (Union[float, int]): The distance, in pixels, between the top border
of the bounding box and the top border of the image.
width (Union[float, int]): The width in pixels of the annotation.
height (Union[float, int]): The height in pixels of the annotation.
reference_id (str): User-defined ID of the image to which to apply this
annotation.
annotation_id (Optional[str]): The annotation ID that uniquely
identifies this annotation within its target dataset item. Upon
ingest, a matching annotation id will be ignored by default, and
overwritten if update=True for dataset.annotate. If no annotation
ID is passed, one will be automatically generated using the label,
x, y, width, and height, so that you can make inserts idempotently
as identical boxes will be ignored.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to
attach to this annotation. Strings, floats and ints are supported best
by querying and insights features within Nucleus. For more details see
our `metadata guide <https://nucleus.scale.com/docs/upload-metadata>`_.
Coordinate metadata may be provided to enable the Map Chart in the Nucleus Dataset charts page.
These values can be specified as `{ "lat": 52.5, "lon": 13.3, ... }`.
embedding_vector: Custom embedding vector for this object annotation.
If any custom object embeddings have been uploaded previously to this dataset,
this vector must match the dimensions of the previously ingested vectors.
track_reference_id: A unique string to identify the annotation as part of a group.
For instance, multiple "car" annotations across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
label: str
x: Union[float, int]
y: Union[float, int]
width: Union[float, int]
height: Union[float, int]
reference_id: str
annotation_id: Optional[str] = None
metadata: Optional[Dict] = None
embedding_vector: Optional[list] = None
track_reference_id: Optional[str] = None
def __post_init__(self):
self.metadata = self.metadata if self.metadata else {}
if self.annotation_id is None:
self.annotation_id = f"{self.label}-{self.x}-{self.y}-{self.width}-{self.height}-{self.reference_id}"
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
x=geometry.get(X_KEY, 0),
y=geometry.get(Y_KEY, 0),
width=geometry.get(WIDTH_KEY, 0),
height=geometry.get(HEIGHT_KEY, 0),
reference_id=payload[REFERENCE_ID_KEY],
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
embedding_vector=payload.get(EMBEDDING_VECTOR_KEY, None),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
def to_payload(self) -> dict:
return {
LABEL_KEY: self.label,
TYPE_KEY: BOX_TYPE,
GEOMETRY_KEY: {
X_KEY: self.x,
Y_KEY: self.y,
WIDTH_KEY: self.width,
HEIGHT_KEY: self.height,
},
REFERENCE_ID_KEY: self.reference_id,
ANNOTATION_ID_KEY: self.annotation_id,
METADATA_KEY: self.metadata,
EMBEDDING_VECTOR_KEY: self.embedding_vector,
TRACK_REFERENCE_ID_KEY: self.track_reference_id,
}
def __eq__(self, other):
return (
self.label == other.label
and self.x == other.x
and self.y == other.y
and self.width == other.width
and self.height == other.height
and self.reference_id == other.reference_id
and self.annotation_id == other.annotation_id
and sorted(self.metadata.items()) == sorted(other.metadata.items())
and self.embedding_vector == other.embedding_vector
and self.track_reference_id == other.track_reference_id
)
@dataclass
class Point:
"""A point in 2D space.
Parameters:
x (float): The x coordinate of the point.
y (float): The y coordinate of the point.
"""
x: float
y: float
@classmethod
def from_json(cls, payload: Dict[str, float]):
return cls(payload[X_KEY], payload[Y_KEY])
def to_payload(self) -> dict:
return {X_KEY: self.x, Y_KEY: self.y}
@dataclass
class LineAnnotation(Annotation):
"""A polyline annotation consisting of an ordered list of 2D points.
A LineAnnotation differs from a PolygonAnnotation by not forming a closed
loop, and by having zero area.
::
from nucleus import LineAnnotation
line = LineAnnotation(
label="face",
vertices=[Point(100, 100), Point(200, 300), Point(300, 200)],
reference_id="person_image_1",
annotation_id="person_image_1_line_1",
metadata={"camera_mode": "portrait"},
track_reference_id="face_human",
)
Parameters:
label (str): The label for this annotation.
vertices (List[:class:`Point`]): The list of points making up the line.
reference_id (str): User-defined ID of the image to which to apply this
annotation.
annotation_id (Optional[str]): The annotation ID that uniquely identifies
this annotation within its target dataset item. Upon ingest, a matching
annotation id will be ignored by default, and updated if update=True
for dataset.annotate.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to
attach to this annotation. Strings, floats and ints are supported best
by querying and insights features within Nucleus. For more details see
our `metadata guide <https://nucleus.scale.com/docs/upload-metadata>`_.
track_reference_id: A unique string to identify the annotation as part of a group.
For instance, multiple "car" annotations across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
label: str
vertices: List[Point]
reference_id: str
annotation_id: Optional[str] = None
metadata: Optional[Dict] = None
track_reference_id: Optional[str] = None
def __post_init__(self):
self.metadata = self.metadata if self.metadata else {}
if len(self.vertices) > 0:
if not hasattr(self.vertices[0], X_KEY) or not hasattr(
self.vertices[0], "to_payload"
):
try:
self.vertices = [
Point(x=vertex[X_KEY], y=vertex[Y_KEY])
for vertex in self.vertices
]
except KeyError as ke:
raise ValueError(
"Use a point object to pass in vertices. For example, vertices=[nucleus.Point(x=1, y=2)]"
) from ke
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
vertices=[
Point.from_json(_) for _ in geometry.get(VERTICES_KEY, [])
],
reference_id=payload[REFERENCE_ID_KEY],
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
def to_payload(self) -> dict:
payload = {
LABEL_KEY: self.label,
TYPE_KEY: LINE_TYPE,
GEOMETRY_KEY: {
VERTICES_KEY: [_.to_payload() for _ in self.vertices]
},
REFERENCE_ID_KEY: self.reference_id,
ANNOTATION_ID_KEY: self.annotation_id,
METADATA_KEY: self.metadata,
TRACK_REFERENCE_ID_KEY: self.track_reference_id,
}
return payload
@dataclass
class PolygonAnnotation(Annotation):
"""A polygon annotation consisting of an ordered list of 2D points.
::
from nucleus import PolygonAnnotation
polygon = PolygonAnnotation(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
metadata={"vehicle_color": "yellow"},
embedding_vector=[0.1423, 1.432, ..., 3.829],
track_reference_id="school_bus",
)
Parameters:
label (str): The label for this annotation.
vertices (List[:class:`Point`]): The list of points making up the polygon.
reference_id (str): User-defined ID of the image to which to apply this
annotation.
annotation_id (Optional[str]): The annotation ID that uniquely identifies
this annotation within its target dataset item. Upon ingest, a matching
annotation id will be ignored by default, and updated if update=True
for dataset.annotate.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to
attach to this annotation. Strings, floats and ints are supported best
by querying and insights features within Nucleus. For more details see
our `metadata guide <https://nucleus.scale.com/docs/upload-metadata>`_.
embedding_vector: Custom embedding vector for this object annotation.
If any custom object embeddings have been uploaded previously to this dataset,
this vector must match the dimensions of the previously ingested vectors.
track_reference_id: A unique string to identify the annotation as part of a group.
For instance, multiple "car" annotations across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
label: str
vertices: List[Point]
reference_id: str
annotation_id: Optional[str] = None
metadata: Optional[Dict] = None
embedding_vector: Optional[list] = None
track_reference_id: Optional[str] = None
def __post_init__(self):
self.metadata = self.metadata if self.metadata else {}
if len(self.vertices) > 0:
if not hasattr(self.vertices[0], X_KEY) or not hasattr(
self.vertices[0], "to_payload"
):
try:
self.vertices = [
Point(x=vertex[X_KEY], y=vertex[Y_KEY])
for vertex in self.vertices
]
except KeyError as ke:
raise ValueError(
"Use a point object to pass in vertices. For example, vertices=[nucleus.Point(x=1, y=2)]"
) from ke
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
vertices=[
Point.from_json(_) for _ in geometry.get(VERTICES_KEY, [])
],
reference_id=payload[REFERENCE_ID_KEY],
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
embedding_vector=payload.get(EMBEDDING_VECTOR_KEY, None),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
def to_payload(self) -> dict:
payload = {
LABEL_KEY: self.label,
TYPE_KEY: POLYGON_TYPE,
GEOMETRY_KEY: {
VERTICES_KEY: [_.to_payload() for _ in self.vertices]
},
REFERENCE_ID_KEY: self.reference_id,
ANNOTATION_ID_KEY: self.annotation_id,
METADATA_KEY: self.metadata,
EMBEDDING_VECTOR_KEY: self.embedding_vector,
TRACK_REFERENCE_ID_KEY: self.track_reference_id,
}
return payload
@dataclass
class Keypoint:
"""A 2D point that has an additional visibility flag.
Keypoints are intended to be part of a larger collection, and connected
via a pre-defined skeleton. A keypoint in this skeleton may be visible
or not-visible, and may be unlabeled and not visible. Because of this,
the x, y coordinates may be optional, assuming that the keypoint is not
visible, and would not be shown as part of the combined label.
Parameters:
x (Optional[float]): The x coordinate of the point.
y (Optional[float]): The y coordinate of the point.
visible (bool): The visibility of the point.
"""
x: Optional[float] = None
y: Optional[float] = None
visible: bool = True
def __post_init__(self):
if self.visible and (self.x is None or self.y is None):
raise ValueError(
"Visible keypoints must have non-None x and y coordinates"
)
@classmethod
def from_json(cls, payload: Dict[str, Union[float, bool]]):
return cls(
payload.get(X_KEY, None),
payload.get(Y_KEY, None),
bool(payload[VISIBLE_KEY]),
)
def to_payload(self) -> dict:
return {
X_KEY: self.x,
Y_KEY: self.y,
VISIBLE_KEY: self.visible,
}
@dataclass
class KeypointsAnnotation(Annotation):
"""A keypoints annotation containing a list of keypoints and the structure
of those keypoints: the naming of each point and the skeleton that connects
those keypoints.
::
from nucleus import KeypointsAnnotation
keypoints = KeypointsAnnotation(
label="face",
keypoints=[Keypoint(100, 100), Keypoint(120, 120), Keypoint(visible=False), Keypoint(0, 0)],
names=["point1", "point2", "point3", "point4"],
skeleton=[[0, 1], [1, 2], [1, 3], [2, 3]],
reference_id="image_2",
annotation_id="image_2_face_keypoints_1",
metadata={"face_direction": "forward"},
track_reference_id="face_1",
)
Parameters:
label (str): The label for this annotation.
keypoints (List[:class:`Keypoint`]): The list of keypoints objects.
names (List[str]): A list that corresponds to the names of each keypoint.
skeleton (List[List[int]]): A list of 2-length lists indicating a beginning and ending
index for each line segment in the skeleton of this keypoint label.
reference_id (str): User-defined ID of the image to which to apply this
annotation.
annotation_id (Optional[str]): The annotation ID that uniquely identifies
this annotation within its target dataset item. Upon ingest, a matching
annotation id will be ignored by default, and updated if update=True
for dataset.annotate.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to
attach to this annotation. Strings, floats and ints are supported best
by querying and insights features within Nucleus. For more details see
our `metadata guide <https://nucleus.scale.com/docs/upload-metadata>`_.
track_reference_id: A unique string to identify the annotation as part of a group.
For instance, multiple "car" annotations across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
label: str
keypoints: List[Keypoint]
names: List[str]
skeleton: List[List[int]]
reference_id: str
annotation_id: Optional[str] = None
metadata: Optional[Dict] = None
track_reference_id: Optional[str] = None
def __post_init__(self):
self.metadata = self.metadata or {}
if len(self.keypoints) != len(self.names):
raise ValueError(
"The list of keypoints must be the same length as the list of names"
)
if len(set(self.names)) != len(self.names):
seen = set()
for name in self.names:
if name in seen:
raise ValueError(
f"The keypoint name '{name}' is repeated in the list of names"
)
seen.add(name)
max_segment_index = len(self.keypoints) - 1
for segment in self.skeleton:
if len(segment) != 2:
raise ValueError(
"The keypoints skeleton must contain a list of line segments with exactly 2 indices"
)
for index in segment:
if index > max_segment_index:
raise ValueError(
f"The skeleton index {index} is not a valid keypoint index"
)
if self.annotation_id is None:
self.annotation_id = f"{self.label}-{self.reference_id}-keypoints"
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
keypoints=[
Keypoint.from_json(_) for _ in geometry.get(KEYPOINTS_KEY, [])
],
names=geometry[KEYPOINTS_NAMES_KEY],
skeleton=geometry[KEYPOINTS_SKELETON_KEY],
reference_id=payload[REFERENCE_ID_KEY],
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
def to_payload(self) -> dict:
payload = {
LABEL_KEY: self.label,
TYPE_KEY: KEYPOINTS_TYPE,
GEOMETRY_KEY: {
KEYPOINTS_KEY: [_.to_payload() for _ in self.keypoints],
KEYPOINTS_NAMES_KEY: self.names,
KEYPOINTS_SKELETON_KEY: self.skeleton,
},
REFERENCE_ID_KEY: self.reference_id,
ANNOTATION_ID_KEY: self.annotation_id,
METADATA_KEY: self.metadata,
TRACK_REFERENCE_ID_KEY: self.track_reference_id,
}
return payload
@dataclass
class Point3D:
"""A point in 3D space.
Parameters:
x (float): The x coordinate of the point.
y (float): The y coordinate of the point.
z (float): The z coordinate of the point.
"""
x: float
y: float
z: float
@classmethod
def from_json(cls, payload: Dict[str, float]):
return cls(payload[X_KEY], payload[Y_KEY], payload[Z_KEY])
def to_payload(self) -> dict:
return {X_KEY: self.x, Y_KEY: self.y, Z_KEY: self.z}
def to_list(self):
return [self.x, self.y, self.z]
@dataclass
class LidarPoint(Point3D):
"""A Lidar point in 3D space and intensity.
Parameters:
x (float): The x coordinate of the point.
y (float): The y coordinate of the point.
z (float): The z coordinate of the point.
i (float): The intensity value returned by the lidar scan point.
"""
i: float
@classmethod
def from_json(cls, payload: Dict[str, float]):
return cls(
payload[X_KEY], payload[Y_KEY], payload[Z_KEY], payload[I_KEY]
)
def to_payload(self) -> dict:
return {X_KEY: self.x, Y_KEY: self.y, Z_KEY: self.z, I_KEY: self.i}
def to_list(self):
return [self.x, self.y, self.z, self.i]
def to_numpy(self):
return np.array(self.to_list())
@dataclass # pylint: disable=R0902
class CuboidAnnotation(Annotation): # pylint: disable=R0902
"""A 3D Cuboid annotation.
::
from nucleus import CuboidAnnotation
cuboid = CuboidAnnotation(
label="car",
position=Point3D(100, 100, 10),
dimensions=Point3D(5, 10, 5),
yaw=0,
reference_id="pointcloud_1",
annotation_id="pointcloud_1_car_cuboid_1",
metadata={"vehicle_color": "green"},
track_reference_id="red_car",
)
Parameters:
label (str): The label for this annotation.
position (:class:`Point3D`): The point at the center of the cuboid
dimensions (:class:`Point3D`): The length (x), width (y), and height (z) of the cuboid
yaw (float): The rotation, in radians, about the Z axis of the cuboid
reference_id (str): User-defined ID of the image to which to apply this annotation.
annotation_id (Optional[str]): The annotation ID that uniquely identifies this
annotation within its target dataset item. Upon ingest, a matching
annotation id will be ignored by default, and updated if update=True
for dataset.annotate.
metadata (Optional[str]): Arbitrary key/value dictionary of info to attach to this
annotation. Strings, floats and ints are supported best by querying
and insights features within Nucleus. For more details see our `metadata
guide <https://nucleus.scale.com/docs/upload-metadata>`_.
track_reference_id: A unique string to identify the annotation as part of a group.
For instance, multiple "car" annotations across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
label: str
position: Point3D
dimensions: Point3D
yaw: float
reference_id: str
annotation_id: Optional[str] = None
metadata: Optional[Dict] = None
track_reference_id: Optional[str] = None
def __post_init__(self):
self.metadata = self.metadata if self.metadata else {}
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
position=Point3D.from_json(geometry.get(POSITION_KEY, {})),
dimensions=Point3D.from_json(geometry.get(DIMENSIONS_KEY, {})),
yaw=geometry.get(YAW_KEY, 0),
reference_id=payload[REFERENCE_ID_KEY],
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
def to_payload(self) -> dict:
payload = {
LABEL_KEY: self.label,
TYPE_KEY: CUBOID_TYPE,
GEOMETRY_KEY: {
POSITION_KEY: self.position.to_payload(),
DIMENSIONS_KEY: self.dimensions.to_payload(),
YAW_KEY: self.yaw,
},
}
payload[REFERENCE_ID_KEY] = self.reference_id
if self.annotation_id:
payload[ANNOTATION_ID_KEY] = self.annotation_id
if self.metadata:
payload[METADATA_KEY] = self.metadata
if self.track_reference_id:
payload[TRACK_REFERENCE_ID_KEY] = self.track_reference_id
return payload
@dataclass
class Segment:
"""Segment represents either a class or an instance depending on the task type.
For semantic segmentation, this object should store the mapping between a single
class index and the string label.
For instance segmentation, you can use this class to store the label of a single
instance, whose extent in the image is represented by the value of ``index``.
In both cases, additional metadata can be attached to the segment.
Parameters:
label (str): The label name of the class for the class or instance
represented by index in the associated mask.
index (int): The integer pixel value in the mask this mapping refers to.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to attach to this segment.
Strings, floats and ints are supported best by querying and insights
features within Nucleus. For more details see our `metadata guide
<https://nucleus.scale.com/docs/upload-metadata>`_.
"""
label: str
index: int
metadata: Optional[dict] = None
@classmethod
def from_json(cls, payload: dict):
return cls(
label=payload.get(LABEL_KEY, ""),
index=payload.get(INDEX_KEY, None),
metadata=payload.get(METADATA_KEY, None),
)
def to_payload(self) -> dict:
payload = {
LABEL_KEY: self.label,
INDEX_KEY: self.index,
}
if self.metadata is not None:
payload[METADATA_KEY] = self.metadata
return payload
@dataclass
class SegmentationAnnotation(Annotation):
"""A segmentation mask on a 2D image.
When uploading a mask annotation, Nucleus expects the mask file to be in
PNG format with each pixel being a 0-255 uint8. Currently, Nucleus only
supports uploading masks from URL.
Nucleus automatically enforces the constraint that each DatasetItem can
have at most one ground truth segmentation mask. As a consequence, if
during upload a duplicate mask is detected for a given image, by default it
will be ignored. You can change this behavior by setting ``update = True``,
which will replace the existing segmentation mask with the new mask.
::
from nucleus import SegmentationAnnotation
segmentation = SegmentationAnnotation(
mask_url="s3://your-bucket-name/segmentation-masks/image_2_mask_id_1.png",
annotations=[
Segment(label="grass", index="1"),
Segment(label="road", index="2"),
Segment(label="bus", index="3", metadata={"vehicle_color": "yellow"}),
Segment(label="tree", index="4")
],
reference_id="image_2",
annotation_id="image_2_mask_1",
)
Parameters:
mask_url (str): A URL pointing to the segmentation prediction mask which is
accessible to Scale. This "URL" can also be a path to a local file.
The mask is an HxW int8 array saved in PNG format,
with each pixel value ranging from [0, N), where N is the number of
possible classes (for semantic segmentation) or instances (for instance
segmentation).
The height and width of the mask must be the same as the
original image. One example for semantic segmentation: the mask is 0
for pixels where there is background, 1 where there is a car, and 2
where there is a pedestrian.
Another example for instance segmentation: the mask is 0 for one car,
1 for another car, 2 for a motorcycle and 3 for another motorcycle.
The class name for each value in the mask is stored in the list of
Segment objects passed for "annotations"
annotations (List[:class:`Segment`]): The list of mappings between the integer values contained
in mask_url and string class labels. In the semantic segmentation
example above these would map that 0 to background, 1 to car and 2 to
pedestrian. In the instance segmentation example above, 0 and 1 would
both be mapped to car, 2 and 3 would both be mapped to motorcycle
reference_id (str): User-defined ID of the image to which to apply this annotation.
annotation_id (Optional[str]): For segmentation annotations, this value is ignored
because there can only be one segmentation annotation per dataset item.
Therefore regardless of annotation ID, if there is an existing
segmentation on a dataset item, it will be ignored unless update=True
is passed to :meth:`Dataset.annotate`, in which case it will be overwritten.
Storing a custom ID here may be useful in order to tie this annotation
to an external database, and its value will be returned for any export.
"""
mask_url: str
annotations: List[Segment]
reference_id: str
annotation_id: Optional[str] = None
# metadata: Optional[dict] = None # TODO(sc: 422637)
def __post_init__(self):
if not self.mask_url:
raise Exception("You must specify a mask_url.")
@classmethod
def from_json(cls, payload: dict):
if MASK_URL_KEY not in payload:
raise ValueError(f"Missing {MASK_URL_KEY} in json")
return cls(
mask_url=payload[MASK_URL_KEY],
annotations=[
Segment.from_json(ann)
for ann in payload.get(ANNOTATIONS_KEY, [])
],
reference_id=payload[REFERENCE_ID_KEY],
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
# metadata=payload.get(METADATA_KEY, None), # TODO(sc: 422637)
)
def to_payload(self) -> dict:
payload = {
TYPE_KEY: MASK_TYPE,
MASK_URL_KEY: self.mask_url,
ANNOTATIONS_KEY: [ann.to_payload() for ann in self.annotations],
ANNOTATION_ID_KEY: self.annotation_id,
REFERENCE_ID_KEY: self.reference_id,
# METADATA_KEY: self.metadata, # TODO(sc: 422637)
}
return payload
def has_local_files_to_upload(self) -> bool:
"""Check if the mask url is local and needs to be uploaded."""
if is_local_path(self.mask_url):
if not os.path.isfile(self.mask_url):
raise Exception(f"Mask file {self.mask_url} does not exist.")
return True
return False
def __eq__(self, other):
if not isinstance(other, SegmentationAnnotation):
return False
self.annotations = sorted(self.annotations, key=lambda x: x.index)
other.annotations = sorted(other.annotations, key=lambda x: x.index)
return (
(self.annotation_id == other.annotation_id)
and (self.annotations == other.annotations)
and (self.mask_url == other.mask_url)
and (self.reference_id == other.reference_id)
)
class AnnotationTypes(Enum):
BOX = BOX_TYPE
LINE = LINE_TYPE
POLYGON = POLYGON_TYPE
KEYPOINTS = KEYPOINTS_TYPE
CUBOID = CUBOID_TYPE
CATEGORY = CATEGORY_TYPE
MULTICATEGORY = MULTICATEGORY_TYPE
@dataclass
class CategoryAnnotation(Annotation):
"""A category annotation.
::
from nucleus import CategoryAnnotation
category = CategoryAnnotation(
label="dress",
reference_id="image_1",
taxonomy_name="clothing_type",
metadata={"dress_color": "navy"},
track_reference_id="blue_and_black_dress",
)
Parameters:
label (str): The label for this annotation.
reference_id (str): User-defined ID of the image to which to apply this annotation.
taxonomy_name (Optional[str]): The name of the taxonomy this annotation conforms to.
See :meth:`Dataset.add_taxonomy`.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to attach to this annotation.
Strings, floats and ints are supported best by querying and insights
features within Nucleus. For more details see our `metadata guide
<https://nucleus.scale.com/docs/upload-metadata>`_.
track_reference_id: A unique string to identify the annotation as part of a group.
For instance, multiple "car" annotations across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
label: str
reference_id: str
taxonomy_name: Optional[str] = None
metadata: Optional[Dict] = None
track_reference_id: Optional[str] = None
def __post_init__(self):
self.metadata = self.metadata if self.metadata else {}
@classmethod
def from_json(cls, payload: dict):
return cls(
label=payload[LABEL_KEY],
reference_id=payload[REFERENCE_ID_KEY],
taxonomy_name=payload.get(TAXONOMY_NAME_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
def to_payload(self) -> dict:
payload = {
LABEL_KEY: self.label,
TYPE_KEY: CATEGORY_TYPE,
GEOMETRY_KEY: {},
REFERENCE_ID_KEY: self.reference_id,
METADATA_KEY: self.metadata,
TRACK_REFERENCE_ID_KEY: self.track_reference_id,
}
if self.taxonomy_name is not None:
payload[TAXONOMY_NAME_KEY] = self.taxonomy_name
return payload
@dataclass
class MultiCategoryAnnotation(Annotation):
"""This class is not yet supported: MultiCategory annotation support coming soon!"""
labels: List[str]
reference_id: str
taxonomy_name: Optional[str] = None
metadata: Optional[Dict] = None
track_reference_id: Optional[str] = None
def __post_init__(self):
self.metadata = self.metadata if self.metadata else {}
@classmethod
def from_json(cls, payload: dict):
return cls(
labels=payload[LABELS_KEY],
reference_id=payload[REFERENCE_ID_KEY],
taxonomy_name=payload.get(TAXONOMY_NAME_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
def to_payload(self) -> dict:
payload = {
LABELS_KEY: self.labels,
TYPE_KEY: MULTICATEGORY_TYPE,
GEOMETRY_KEY: {},
REFERENCE_ID_KEY: self.reference_id,
METADATA_KEY: self.metadata,
TRACK_REFERENCE_ID_KEY: self.track_reference_id,
}
if self.taxonomy_name is not None:
payload[TAXONOMY_NAME_KEY] = self.taxonomy_name
return payload
@dataclass
class SceneCategoryAnnotation(Annotation):
"""A scene category annotation.
::
from nucleus import SceneCategoryAnnotation
category = SceneCategoryAnnotation(
label="running",
reference_id="scene_1",
taxonomy_name="action",
metadata={
"weather": "clear",
},
)
Parameters:
label (str): The label for this annotation.
reference_id (str): User-defined ID of the scene to which to apply this annotation.
taxonomy_name (Optional[str]): The name of the taxonomy this annotation conforms to.
See :meth:`Dataset.add_taxonomy`.
metadata: Arbitrary key/value dictionary of info to attach to this annotation.
Strings, floats and ints are supported best by querying and insights
features within Nucleus. For more details see our `metadata guide
<https://nucleus.scale.com/docs/upload-metadata>`_.
"""
label: str
reference_id: str
taxonomy_name: Optional[str] = None
metadata: Optional[Dict] = field(default_factory=dict)
@classmethod
def from_json(cls, payload: dict):
return cls(
label=payload[LABEL_KEY],
reference_id=payload[REFERENCE_ID_KEY],
taxonomy_name=payload.get(TAXONOMY_NAME_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
)
def to_payload(self) -> dict:
payload = {
LABEL_KEY: self.label,
TYPE_KEY: CATEGORY_TYPE,
GEOMETRY_KEY: {},
REFERENCE_ID_KEY: self.reference_id,
METADATA_KEY: self.metadata,
}
if self.taxonomy_name is not None:
payload[TAXONOMY_NAME_KEY] = self.taxonomy_name
return payload
@dataclass
class AnnotationList:
"""Wrapper class separating a list of annotations by type."""
box_annotations: List[BoxAnnotation] = field(default_factory=list)
line_annotations: List[LineAnnotation] = field(default_factory=list)
polygon_annotations: List[PolygonAnnotation] = field(default_factory=list)
keypoints_annotations: List[KeypointsAnnotation] = field(
default_factory=list
)
cuboid_annotations: List[CuboidAnnotation] = field(default_factory=list)
category_annotations: List[CategoryAnnotation] = field(
default_factory=list
)
multi_category_annotations: List[MultiCategoryAnnotation] = field(
default_factory=list
)
scene_category_annotations: List[SceneCategoryAnnotation] = field(
default_factory=list
)
segmentation_annotations: List[SegmentationAnnotation] = field(
default_factory=list
)
def add_annotations(self, annotations: List[Annotation]):
for annotation in annotations:
assert isinstance(
annotation, Annotation
), "Expected annotation to be of type 'Annotation"
if isinstance(annotation, BoxAnnotation):
self.box_annotations.append(annotation)
elif isinstance(annotation, LineAnnotation):
self.line_annotations.append(annotation)
elif isinstance(annotation, PolygonAnnotation):
self.polygon_annotations.append(annotation)
elif isinstance(annotation, CuboidAnnotation):
self.cuboid_annotations.append(annotation)
elif isinstance(annotation, KeypointsAnnotation):
self.keypoints_annotations.append(annotation)
elif isinstance(annotation, CategoryAnnotation):
self.category_annotations.append(annotation)
elif isinstance(annotation, MultiCategoryAnnotation):
self.multi_category_annotations.append(annotation)
elif isinstance(annotation, SceneCategoryAnnotation):
self.scene_category_annotations.append(annotation)
else:
assert isinstance(
annotation, SegmentationAnnotation
), f"Unexpected annotation type: {type(annotation)}"
self.segmentation_annotations.append(annotation)
def items(self):
return self.__dict__.items()
def __len__(self):
return (
len(self.box_annotations)
+ len(self.line_annotations)
+ len(self.polygon_annotations)
+ len(self.keypoints_annotations)
+ len(self.cuboid_annotations)
+ len(self.category_annotations)
+ len(self.multi_category_annotations)
+ len(self.scene_category_annotations)
+ len(self.segmentation_annotations)
)
def is_local_path(path: str) -> bool:
return urlparse(path).scheme not in {"https", "http", "s3", "gs"}
def check_all_mask_paths_remote(
annotations: Sequence[Annotation],
):
for annotation in annotations:
if hasattr(annotation, MASK_URL_KEY):
if is_local_path(getattr(annotation, MASK_URL_KEY)):
raise ValueError(
"Found an annotation with a local path, which is not currently"
f"supported for asynchronous upload. Use a remote path instead, or try synchronous upload. {annotation}"
)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/annotation.py
| 0.835919 | 0.169509 |
annotation.py
|
pypi
|
import asyncio
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, BinaryIO, Callable, Sequence, Tuple
import aiohttp
import nest_asyncio
from tqdm import tqdm
from nucleus.constants import DEFAULT_NETWORK_TIMEOUT_SEC
from nucleus.errors import NucleusAPIError
from nucleus.retry_strategy import RetryStrategy
from .logger import logger
if TYPE_CHECKING:
from . import NucleusClient
@dataclass
class FileFormField:
name: str
filename: str
value: BinaryIO
content_type: str
FileFormData = Sequence[FileFormField]
UPLOAD_SEMAPHORE = asyncio.Semaphore(10)
class FormDataContextHandler:
"""A context handler for file form data that handles closing all files in a request.
Why do I need to wrap my requests in such a funny way?
1. Form data must be regenerated on each request to avoid errors
see https://github.com/Rapptz/discord.py/issues/6531
2. Files must be properly open/closed for each request.
3. We need to be able to do 1/2 above multiple times so that we can implement retries
properly.
Write a function that returns a tuple of form data and file pointers, then pass it to the
constructor of this class, and this class will handle the rest for you.
"""
def __init__(
self,
form_data_and_file_pointers_fn: Callable[
..., Tuple[FileFormData, Sequence[BinaryIO]]
],
):
self._form_data_and_file_pointer_fn = form_data_and_file_pointers_fn
self._file_pointers = None
def __enter__(self):
(
file_form_data,
self._file_pointers,
) = self._form_data_and_file_pointer_fn()
form = aiohttp.FormData()
for field in file_form_data:
form.add_field(
name=field.name,
filename=field.filename,
value=field.value,
content_type=field.content_type,
)
return form
def __exit__(self, exc_type, exc_val, exc_tb):
for file_pointer in self._file_pointers:
file_pointer.close()
def get_event_loop():
try:
loop = asyncio.get_event_loop()
except RuntimeError: # no event loop running:
loop = asyncio.new_event_loop()
else:
nest_asyncio.apply(loop)
return loop
def make_many_form_data_requests_concurrently(
client: "NucleusClient",
requests: Sequence[FormDataContextHandler],
route: str,
progressbar: tqdm,
):
"""
Makes an async post request with form data to a Nucleus endpoint.
Args:
client: The client to use for the request.
requests: Each requst should be a FormDataContextHandler object which will
handle generating form data, and opening/closing files for each request.
route: route for the request.
progressbar: A tqdm progress bar to use for showing progress to the user.
"""
loop = get_event_loop()
return loop.run_until_complete(
form_data_request_helper(client, requests, route, progressbar)
)
async def form_data_request_helper(
client: "NucleusClient",
requests: Sequence[FormDataContextHandler],
route: str,
progressbar: tqdm,
):
"""
Makes an async post request with files to a Nucleus endpoint.
Args:
client: The client to use for the request.
requests: Each request should be a FormDataContextHandler object which will
handle generating form data, and opening/closing files for each request.
route: route for the request.
"""
async with aiohttp.ClientSession() as session:
tasks = [
asyncio.ensure_future(
_post_form_data(
client=client,
request=request,
route=route,
session=session,
progressbar=progressbar,
)
)
for request in requests
]
return await asyncio.gather(*tasks)
async def _post_form_data(
client: "NucleusClient",
request: FormDataContextHandler,
route: str,
session: aiohttp.ClientSession,
progressbar: tqdm,
):
"""
Makes an async post request with files to a Nucleus endpoint.
Args:
client: The client to use for the request.
request: The request to make (See FormDataContextHandler for more details.)
route: route for the request.
session: The session to use for the request.
"""
endpoint = f"{client.endpoint}/{route}"
logger.info("Posting to %s", endpoint)
async with UPLOAD_SEMAPHORE:
for sleep_time in RetryStrategy.sleep_times() + [-1]:
with request as form:
async with session.post(
endpoint,
data=form,
auth=aiohttp.BasicAuth(client.api_key, ""),
timeout=DEFAULT_NETWORK_TIMEOUT_SEC,
) as response:
logger.info(
"API request has response code %s", response.status
)
try:
data = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
# In case of 404, the server returns text
data = await response.text()
if (
response.status in RetryStrategy.statuses
and sleep_time != -1
):
time.sleep(sleep_time)
continue
if response.status == 503:
raise TimeoutError(
"The request to upload your max is timing out, please lower local_files_per_upload_request in your api call."
)
if not response.ok:
raise NucleusAPIError(
endpoint,
session.post,
aiohttp_response=(
response.status,
response.reason,
data,
),
)
progressbar.update(1)
return data
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/async_utils.py
| 0.833392 | 0.201951 |
async_utils.py
|
pypi
|
from typing import List, Optional, Union
import requests
from nucleus.annotation import check_all_mask_paths_remote
from nucleus.annotation_uploader import PredictionUploader
from nucleus.async_job import AsyncJob
from nucleus.utils import (
format_prediction_response,
serialize_and_write_to_presigned_url,
)
from .constants import (
ANNOTATIONS_KEY,
DEFAULT_ANNOTATION_UPDATE_MODE,
REQUEST_ID_KEY,
UPDATE_KEY,
)
from .prediction import (
BoxPrediction,
CuboidPrediction,
PolygonPrediction,
SegmentationPrediction,
from_json,
)
class ModelRun:
"""
This class is deprecated and will be removed from the python client.
"""
def __init__(self, model_run_id: str, dataset_id: str, client):
self.model_run_id = model_run_id
self._client = client
self.dataset_id = dataset_id
def __repr__(self):
return f"ModelRun(model_run_id='{self.model_run_id}', dataset_id='{self.dataset_id}', client={self._client})"
def __eq__(self, other):
if self.model_run_id == other.model_run_id:
if self._client == other._client:
return True
return False
def info(self) -> dict:
"""
provides information about the Model Run:
model_id -- Model Id corresponding to the run
name -- A human-readable name of the model project.
status -- Status of the Model Run.
metadata -- An arbitrary metadata blob specified for the run.
:return:
{
"model_id": str,
"name": str,
"status": str,
"metadata": Dict[str, Any],
}
"""
return self._client.model_run_info(self.model_run_id)
def commit(self, payload: Optional[dict] = None) -> dict:
"""
Commits the model run. Starts matching algorithm defined by payload.
class_agnostic -- A flag to specify if matching algorithm should be class-agnostic or not.
Default value: True
allowed_label_matches -- An optional list of AllowedMatch objects to specify allowed matches
for ground truth and model predictions.
If specified, 'class_agnostic' flag is assumed to be False
Type 'AllowedMatch':
{
ground_truth_label: string, # A label for ground truth annotation.
model_prediction_label: string, # A label for model prediction that can be matched with
# corresponding ground truth label.
}
payload:
{
"class_agnostic": boolean,
"allowed_label_matches": List[AllowedMatch],
}
:return: {"model_run_id": str}
"""
if payload is None:
payload = {}
return self._client.commit_model_run(self.model_run_id, payload)
def predict(
self,
annotations: List[
Union[
BoxPrediction,
PolygonPrediction,
CuboidPrediction,
SegmentationPrediction,
]
],
update: bool = DEFAULT_ANNOTATION_UPDATE_MODE,
asynchronous: bool = False,
batch_size: int = 5000,
remote_files_per_upload_request: int = 20,
local_files_per_upload_request: int = 10,
) -> Union[dict, AsyncJob]:
"""
Uploads model outputs as predictions for a model_run. Returns info about the upload.
Args:
annotations: Predictions to upload for this model run,
update: If True, existing predictions for the same (reference_id, annotation_id)
will be overwritten. If False, existing predictions will be skipped.
asynchronous: Whether or not to process the upload asynchronously (and
return an :class:`AsyncJob` object). Default is False.
batch_size: Number of predictions processed in each concurrent batch.
Default is 5000. If you get timeouts when uploading geometric annotations,
you can try lowering this batch size. This is only relevant for
asynchronous=False.
remote_files_per_upload_request: Number of remote files to upload in each
request. Segmentations have either local or remote files, if you are
getting timeouts while uploading segmentations with remote urls, you
should lower this value from its default of 20. This is only relevant for
asynchronous=False
local_files_per_upload_request: Number of local files to upload in each
request. Segmentations have either local or remote files, if you are
getting timeouts while uploading segmentations with local files, you
should lower this value from its default of 10. The maximum is 10.
This is only relevant for asynchronous=False
:return:
{
"model_run_id": str,
"predictions_processed": int,
"predictions_ignored": int,
}
"""
uploader = PredictionUploader(
model_run_id=self.model_run_id, client=self._client
)
uploader.check_for_duplicate_ids(annotations)
if asynchronous:
check_all_mask_paths_remote(annotations)
request_id = serialize_and_write_to_presigned_url(
annotations, self.dataset_id, self._client
)
response = self._client.make_request(
payload={REQUEST_ID_KEY: request_id, UPDATE_KEY: update},
route=f"modelRun/{self.model_run_id}/predict?async=1",
)
return AsyncJob.from_json(response, self._client)
return uploader.upload(
annotations=annotations,
update=update,
batch_size=batch_size,
remote_files_per_upload_request=remote_files_per_upload_request,
local_files_per_upload_request=local_files_per_upload_request,
)
def iloc(self, i: int):
"""
Returns Model Run Info For Dataset Item by its number.
:param i: absolute number of Dataset Item for a dataset corresponding to the model run.
:return: List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction, SegmentationPrediction]],
}
"""
response = self._client.predictions_iloc(self.model_run_id, i)
return format_prediction_response(response)
def refloc(self, reference_id: str):
"""
Returns Model Run Info For Dataset Item by its reference_id.
:param reference_id: reference_id of a dataset item.
:return: List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction, SegmentationPrediction]],
"""
response = self._client.get(
f"modelRun/{self.model_run_id}/refloc/{reference_id}"
)
return format_prediction_response(response)
def loc(self, dataset_item_id: str):
"""
Returns Model Run Info For Dataset Item by its id.
:param dataset_item_id: internally controlled id for dataset item.
:return:
{
"annotations": List[Box2DPrediction],
}
"""
response = self._client.predictions_loc(
self.model_run_id, dataset_item_id
)
return format_prediction_response(response)
def prediction_loc(self, reference_id: str, annotation_id: str):
"""
Returns info for single Prediction by its reference id and annotation id.
:param reference_id: the user specified id for the image
:param annotation_id: the user specified id for the prediction, or if one was not provided, the Scale internally generated id for the prediction
:return:
BoxPrediction | PolygonPrediction | CuboidPrediction
"""
response = self._client.make_request(
{},
f"modelRun/{self.model_run_id}/prediction/loc/{reference_id}/{annotation_id}",
requests.get,
)
return from_json(response)
def ungrouped_export(self):
json_response = self._client.make_request(
payload={},
route=f"modelRun/{self.model_run_id}/ungrouped",
requests_command=requests.get,
)
return format_prediction_response({ANNOTATIONS_KEY: json_response})
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/model_run.py
| 0.935516 | 0.254402 |
model_run.py
|
pypi
|
import io
import json
import uuid
from collections import defaultdict
from typing import IO, TYPE_CHECKING, Dict, List, Sequence, Type, Union
import requests
from requests.models import HTTPError
from nucleus.annotation import (
Annotation,
BoxAnnotation,
CategoryAnnotation,
CuboidAnnotation,
KeypointsAnnotation,
LineAnnotation,
MultiCategoryAnnotation,
PolygonAnnotation,
SegmentationAnnotation,
)
from nucleus.errors import NucleusAPIError
from .constants import (
ANNOTATION_TYPES,
ANNOTATIONS_KEY,
BOX_TYPE,
CATEGORY_TYPE,
CUBOID_TYPE,
EXPORTED_SCALE_TASK_INFO_ROWS,
ITEM_KEY,
KEYPOINTS_TYPE,
LINE_TYPE,
MAX_PAYLOAD_SIZE,
MULTICATEGORY_TYPE,
NEXT_TOKEN_KEY,
PAGE_SIZE_KEY,
PAGE_TOKEN_KEY,
POLYGON_TYPE,
PREDICTIONS_KEY,
REFERENCE_ID_KEY,
SCALE_TASK_INFO_KEY,
SCENE_KEY,
SEGMENTATION_TYPE,
)
from .dataset_item import DatasetItem
from .prediction import (
BoxPrediction,
CategoryPrediction,
CuboidPrediction,
KeypointsPrediction,
LinePrediction,
PolygonPrediction,
SceneCategoryPrediction,
SegmentationPrediction,
)
from .scene import LidarScene, VideoScene
STRING_REPLACEMENTS = {
"\\\\n": "\n",
"\\\\t": "\t",
'\\\\"': '"',
}
if TYPE_CHECKING:
from . import NucleusClient
class KeyErrorDict(dict):
"""Wrapper for response dicts with deprecated keys.
Parameters:
**kwargs: Mapping from the deprecated key to a warning message.
"""
def __init__(self, **kwargs):
self._deprecated = {}
for key, msg in kwargs.items():
if not isinstance(key, str):
raise TypeError(
f"All keys must be strings! Received non-string '{key}'"
)
if not isinstance(msg, str):
raise TypeError(
f"All warning messages must be strings! Received non-string '{msg}'"
)
self._deprecated[key] = msg
super().__init__()
def __missing__(self, key):
"""Raises KeyError for deprecated keys, otherwise uses base dict logic."""
if key in self._deprecated:
raise KeyError(self._deprecated[key])
try:
super().__missing__(key)
except AttributeError as e:
raise KeyError(key) from e
def format_prediction_response(
response: dict,
) -> Union[
dict,
List[
Union[
BoxPrediction,
PolygonPrediction,
LinePrediction,
KeypointsPrediction,
CuboidPrediction,
CategoryPrediction,
SceneCategoryPrediction,
SegmentationPrediction,
]
],
]:
"""Helper function to convert JSON response from endpoints to python objects
Args:
response: JSON dictionary response from REST endpoint.
Returns:
annotation_response: Dictionary containing a list of annotations for each type,
keyed by the type name.
"""
annotation_payload = response.get(ANNOTATIONS_KEY, None)
if not annotation_payload:
# An error occurred
return response
annotation_response = {}
type_key_to_class: Dict[
str,
Union[
Type[BoxPrediction],
Type[PolygonPrediction],
Type[LinePrediction],
Type[CuboidPrediction],
Type[CategoryPrediction],
Type[KeypointsPrediction],
Type[SceneCategoryPrediction],
Type[SegmentationPrediction],
],
] = {
BOX_TYPE: BoxPrediction,
LINE_TYPE: LinePrediction,
POLYGON_TYPE: PolygonPrediction,
CUBOID_TYPE: CuboidPrediction,
CATEGORY_TYPE: CategoryPrediction,
KEYPOINTS_TYPE: KeypointsPrediction,
SEGMENTATION_TYPE: SegmentationPrediction,
}
for type_key in annotation_payload:
type_class = type_key_to_class[type_key]
annotation_response[type_key] = [
type_class.from_json(annotation)
for annotation in annotation_payload[type_key]
]
return annotation_response
def format_dataset_item_response(response: dict) -> dict:
"""Format the raw client response into api objects.
Args:
response: JSON dictionary response from REST endpoint
Returns:
item_dict: A dictionary with two entries, one for the dataset item, and another
for all of the associated annotations.
"""
if ANNOTATIONS_KEY not in response:
raise ValueError(
f"Server response was missing the annotation key: {response}"
)
if ITEM_KEY not in response:
raise ValueError(
f"Server response was missing the item key: {response}"
)
item = response[ITEM_KEY]
annotation_payload = response[ANNOTATIONS_KEY]
annotation_response = {}
for annotation_type in ANNOTATION_TYPES:
if annotation_type in annotation_payload:
annotation_response[annotation_type] = [
Annotation.from_json(ann)
for ann in annotation_payload[annotation_type]
]
return {
ITEM_KEY: DatasetItem.from_json(item),
ANNOTATIONS_KEY: annotation_response,
}
def format_scale_task_info_response(response: dict) -> Union[Dict, List[Dict]]:
"""Format the raw client response into api objects.
Args:
response: JSON dictionary response from REST endpoint
Returns:
A dictionary with two entries, one for the dataset item, and another
for all of the associated Scale tasks.
"""
if EXPORTED_SCALE_TASK_INFO_ROWS not in response:
# Payload is empty so an error occurred
return response
ret = []
for row in response[EXPORTED_SCALE_TASK_INFO_ROWS]:
if ITEM_KEY in row:
ret.append(
{
ITEM_KEY: DatasetItem.from_json(row[ITEM_KEY]),
SCALE_TASK_INFO_KEY: row[SCALE_TASK_INFO_KEY],
}
)
elif SCENE_KEY in row:
ret.append(row)
return ret
def convert_export_payload(api_payload, has_predictions: bool = False):
"""Helper function to convert raw JSON to API objects
Args:
api_payload: JSON dictionary response from REST endpoint
Returns:
return_payload: A list of dictionaries for each dataset item. Each dictionary
is in the same format as format_dataset_item_response: one key for the
dataset item, another for the annotations.
"""
return_payload = []
for row in api_payload:
return_payload_row = {}
return_payload_row[ITEM_KEY] = DatasetItem.from_json(row[ITEM_KEY])
annotations = defaultdict(list)
if row.get(SEGMENTATION_TYPE) is not None:
segmentation = row[SEGMENTATION_TYPE]
segmentation[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
annotations[SEGMENTATION_TYPE] = SegmentationAnnotation.from_json(
segmentation
)
for polygon in row[POLYGON_TYPE]:
polygon[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
annotations[POLYGON_TYPE].append(
PolygonAnnotation.from_json(polygon)
)
for line in row[LINE_TYPE]:
line[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
annotations[LINE_TYPE].append(LineAnnotation.from_json(line))
for keypoints in row[KEYPOINTS_TYPE]:
keypoints[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
annotations[KEYPOINTS_TYPE].append(
KeypointsAnnotation.from_json(keypoints)
)
for box in row[BOX_TYPE]:
box[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
annotations[BOX_TYPE].append(BoxAnnotation.from_json(box))
for cuboid in row[CUBOID_TYPE]:
cuboid[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
annotations[CUBOID_TYPE].append(CuboidAnnotation.from_json(cuboid))
for category in row[CATEGORY_TYPE]:
category[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
annotations[CATEGORY_TYPE].append(
CategoryAnnotation.from_json(category)
)
for multicategory in row[MULTICATEGORY_TYPE]:
multicategory[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
annotations[MULTICATEGORY_TYPE].append(
MultiCategoryAnnotation.from_json(multicategory)
)
return_payload_row[
ANNOTATIONS_KEY if not has_predictions else PREDICTIONS_KEY
] = annotations
return_payload.append(return_payload_row)
return return_payload
def serialize_and_write(
upload_units: Sequence[
Union[DatasetItem, Annotation, LidarScene, VideoScene]
],
file_pointer,
):
"""Helper function serialize and write payload to file
Args:
upload_units: Sequence of items, annotations or scenes
file_pointer: Pointer of the file to write to
"""
bytes_written = 0
if len(upload_units) == 0:
raise ValueError(
"Expecting at least one object when serializing objects to upload, but got zero. Please try again."
)
for unit in upload_units:
try:
if isinstance(
unit, (DatasetItem, Annotation, LidarScene, VideoScene)
):
bytes_written += file_pointer.write(unit.to_json() + "\n")
else:
bytes_written += file_pointer.write(json.dumps(unit) + "\n")
except TypeError as e:
type_name = type(unit).__name__
message = (
f"The following {type_name} could not be serialized: {unit}\n"
)
message += (
"This is usually an issue with a custom python object being "
"present in the metadata. Please inspect this error and adjust the "
"metadata so it is json-serializable: only python primitives such as "
"strings, ints, floats, lists, and dicts. For example, you must "
"convert numpy arrays into list or lists of lists.\n"
)
message += f"The specific error was {e}"
raise ValueError(message) from e
if bytes_written > MAX_PAYLOAD_SIZE:
raise ValueError(
f"Payload of {bytes_written} bytes exceed maximum payload size of {MAX_PAYLOAD_SIZE} bytes. Please reduce payload size and try again."
)
def upload_to_presigned_url(presigned_url: str, file_pointer: IO):
# TODO optimize this further to deal with truly huge files and flaky internet connection.
upload_response = requests.put(presigned_url, file_pointer)
if not upload_response.ok:
raise HTTPError(
f"Tried to put a file to url, but failed with status {upload_response.status_code}. The detailed error was: {upload_response.text}"
)
def serialize_and_write_to_presigned_url(
upload_units: Sequence[
Union[DatasetItem, Annotation, LidarScene, VideoScene]
],
dataset_id: str,
client,
):
"""This helper function can be used to serialize a list of API objects to NDJSON."""
request_id = uuid.uuid4().hex
response = client.make_request(
payload={},
route=f"dataset/{dataset_id}/signedUrl/{request_id}",
requests_command=requests.get,
)
strio = io.StringIO()
serialize_and_write(upload_units, strio)
strio.seek(0)
upload_to_presigned_url(response["signed_url"], strio)
return request_id
def replace_double_slashes(s: str) -> str:
for key, val in STRING_REPLACEMENTS.items():
s = s.replace(key, val)
return s
def paginate_generator(
client: "NucleusClient",
endpoint: str,
result_key: str,
page_size: int = 100000,
**kwargs,
):
next_token = None
while True:
try:
response = client.make_request(
{
PAGE_TOKEN_KEY: next_token,
PAGE_SIZE_KEY: page_size,
**kwargs,
},
endpoint,
requests.post,
)
except NucleusAPIError as e:
if e.status_code == 503:
e.message += f"/n Your request timed out while trying to get a page size of {page_size}. Try lowering the page_size."
raise e
next_token = response[NEXT_TOKEN_KEY]
for json_value in response[result_key]:
yield json_value
if not next_token:
break
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/utils.py
| 0.813942 | 0.166472 |
utils.py
|
pypi
|
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Type, Union
from .annotation import (
BoxAnnotation,
CategoryAnnotation,
CuboidAnnotation,
Keypoint,
KeypointsAnnotation,
LineAnnotation,
Point,
Point3D,
PolygonAnnotation,
SceneCategoryAnnotation,
Segment,
SegmentationAnnotation,
)
from .constants import (
ANNOTATION_ID_KEY,
ANNOTATIONS_KEY,
BOX_TYPE,
CATEGORY_TYPE,
CLASS_PDF_KEY,
CONFIDENCE_KEY,
CUBOID_TYPE,
DIMENSIONS_KEY,
EMBEDDING_VECTOR_KEY,
GEOMETRY_KEY,
HEIGHT_KEY,
KEYPOINTS_KEY,
KEYPOINTS_NAMES_KEY,
KEYPOINTS_SKELETON_KEY,
KEYPOINTS_TYPE,
LABEL_KEY,
LINE_TYPE,
MASK_URL_KEY,
METADATA_KEY,
POLYGON_TYPE,
POSITION_KEY,
REFERENCE_ID_KEY,
TAXONOMY_NAME_KEY,
TRACK_REFERENCE_ID_KEY,
TYPE_KEY,
VERTICES_KEY,
WIDTH_KEY,
X_KEY,
Y_KEY,
YAW_KEY,
)
def from_json(payload: dict):
"""Instantiates prediction object from schematized JSON dict payload."""
type_key_to_type: Dict[str, Type[Prediction]] = {
BOX_TYPE: BoxPrediction,
LINE_TYPE: LinePrediction,
POLYGON_TYPE: PolygonPrediction,
KEYPOINTS_TYPE: KeypointsPrediction,
CUBOID_TYPE: CuboidPrediction,
CATEGORY_TYPE: CategoryPrediction,
}
type_key = payload.get(TYPE_KEY, None)
PredictionCls = type_key_to_type.get(type_key, SegmentationPrediction)
return PredictionCls.from_json(payload)
class SegmentationPrediction(SegmentationAnnotation):
"""Predicted segmentation mask on a 2D image.
::
from nucleus import SegmentationPrediction
segmentation = SegmentationPrediction(
mask_url="s3://your-bucket-name/pred-seg-masks/image_2_pred_mask_id_1.png",
annotations=[
Segment(label="grass", index="1"),
Segment(label="road", index="2"),
Segment(label="bus", index="3", metadata={"vehicle_color": "yellow"}),
Segment(label="tree", index="4")
],
reference_id="image_2",
annotation_id="image_2_pred_mask_1",
)
Parameters:
mask_url (str): A URL pointing to the segmentation prediction mask which is
accessible to Scale. This "URL" can also be a path to a local file.
The mask is an HxW int8 array saved in PNG format,
with each pixel value ranging from [0, N), where N is the number of
possible classes (for semantic segmentation) or instances (for instance
segmentation).
The height and width of the mask must be the same as the
original image. One example for semantic segmentation: the mask is 0
for pixels where there is background, 1 where there is a car, and 2
where there is a pedestrian.
Another example for instance segmentation: the mask is 0 for one car,
1 for another car, 2 for a motorcycle and 3 for another motorcycle.
The class name for each value in the mask is stored in the list of
Segment objects passed for "annotations"
annotations (List[:class:`Segment`]): The list of mappings between the integer values contained
in mask_url and string class labels. In the semantic segmentation
example above these would map that 0 to background, 1 to car and 2 to
pedestrian. In the instance segmentation example above, 0 and 1 would
both be mapped to car, 2 and 3 would both be mapped to motorcycle
reference_id (str): User-defined ID of the image to which to apply this annotation.
annotation_id (Optional[str]): For segmentation predictions, this value is ignored
because there can only be one segmentation prediction per dataset item.
Therefore regardless of annotation ID, if there is an existing
segmentation on a dataset item, it will be ignored unless update=True
is passed to :meth:`Dataset.annotate`, in which case it will be overwritten.
Storing a custom ID here may be useful in order to tie this annotation
to an external database, and its value will be returned for any export.
"""
@classmethod
def from_json(cls, payload: dict):
return cls(
mask_url=payload[MASK_URL_KEY],
annotations=[
Segment.from_json(ann)
for ann in payload.get(ANNOTATIONS_KEY, [])
],
reference_id=payload[REFERENCE_ID_KEY],
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
# metadata=payload.get(METADATA_KEY, None), # TODO(sc: 422637)
)
class BoxPrediction(BoxAnnotation):
"""Prediction of a bounding box.
Parameters:
label (str): The label for this annotation (e.g. car, pedestrian, bicycle)
x (Union[float, int]): The distance, in pixels, between the left border
of the bounding box and the left border of the image.
y (Union[float, int]): The distance, in pixels, between the top border
of the bounding box and the top border of the image.
width (Union[float, int]): The width in pixels of the annotation.
height (Union[float, int]): The height in pixels of the annotation.
reference_id (str): User-defined ID of the image to which to apply this
annotation.
confidence: 0-1 indicating the confidence of the prediction.
annotation_id (Optional[str]): The annotation ID that uniquely
identifies this annotation within its target dataset item. Upon ingest,
a matching annotation id will be ignored by default, and updated if
update=True for dataset.annotate. If no annotation ID is passed, one
will be automatically generated using the label, x, y, width, and
height, so that you can make inserts idempotently and identical boxes
will be ignored.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to
attach to this annotation. Strings, floats and ints are supported best
by querying and insights features within Nucleus. For more details see
our `metadata guide <https://nucleus.scale.com/docs/upload-metadata>`_.
Coordinate metadata may be provided to enable the Map Chart in the Nucleus Dataset charts page.
These values can be specified as `{ "lat": 52.5, "lon": 13.3, ... }`.
class_pdf: An optional complete class probability distribution on this
annotation. Each value should be between 0 and 1 (inclusive), and sum up to
1 as a complete distribution. This can be useful for computing entropy to
surface places where the model is most uncertain.
embedding_vector (Optional[List]): Custom embedding vector for this object annotation.
If any custom object embeddings have been uploaded previously to this dataset,
this vector must match the dimensions of the previously ingested vectors.
track_reference_id: A unique string to identify the prediction as part of a group.
For instance, multiple "car" predictions across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
def __init__(
self,
label: str,
x: Union[float, int],
y: Union[float, int],
width: Union[float, int],
height: Union[float, int],
reference_id: str,
confidence: Optional[float] = None,
annotation_id: Optional[str] = None,
metadata: Optional[Dict] = None,
class_pdf: Optional[Dict] = None,
embedding_vector: Optional[list] = None,
track_reference_id: Optional[str] = None,
):
super().__init__(
label=label,
x=x,
y=y,
width=width,
height=height,
reference_id=reference_id,
annotation_id=annotation_id,
metadata=metadata,
embedding_vector=embedding_vector,
track_reference_id=track_reference_id,
)
self.confidence = confidence
self.class_pdf = class_pdf
def to_payload(self) -> dict:
payload = super().to_payload()
if self.confidence is not None:
payload[CONFIDENCE_KEY] = self.confidence
if self.class_pdf is not None:
payload[CLASS_PDF_KEY] = self.class_pdf
return payload
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
x=geometry.get(X_KEY, 0),
y=geometry.get(Y_KEY, 0),
width=geometry.get(WIDTH_KEY, 0),
height=geometry.get(HEIGHT_KEY, 0),
reference_id=payload[REFERENCE_ID_KEY],
confidence=payload.get(CONFIDENCE_KEY, None),
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
class_pdf=payload.get(CLASS_PDF_KEY, None),
embedding_vector=payload.get(EMBEDDING_VECTOR_KEY, None),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
class LinePrediction(LineAnnotation):
"""Prediction of a line.
Parameters:
label (str): The label for this prediction (e.g. car, pedestrian, bicycle).
vertices (List[:class:`Point`]): The list of points making up the line.
reference_id (str): User-defined ID of the image to which to apply this
annotation.
confidence: 0-1 indicating the confidence of the prediction.
annotation_id (Optional[str]): The annotation ID that uniquely identifies
this annotation within its target dataset item. Upon ingest, a matching
annotation id will be ignored by default, and updated if update=True
for dataset.annotate.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to
attach to this prediction. Strings, floats and ints are supported best
by querying and insights features within Nucleus. For more details see
our `metadata guide <https://nucleus.scale.com/docs/upload-metadata>`_.
class_pdf: An optional complete class probability distribution on this
annotation. Each value should be between 0 and 1 (inclusive), and sum up to
1 as a complete distribution. This can be useful for computing entropy to
surface places where the model is most uncertain.
track_reference_id: A unique string to identify the prediction as part of a group.
For instance, multiple "car" predictions across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
def __init__(
self,
label: str,
vertices: List[Point],
reference_id: str,
confidence: Optional[float] = None,
annotation_id: Optional[str] = None,
metadata: Optional[Dict] = None,
class_pdf: Optional[Dict] = None,
track_reference_id: Optional[str] = None,
):
super().__init__(
label=label,
vertices=vertices,
reference_id=reference_id,
annotation_id=annotation_id,
metadata=metadata,
track_reference_id=track_reference_id,
)
self.confidence = confidence
self.class_pdf = class_pdf
def to_payload(self) -> dict:
payload = super().to_payload()
if self.confidence is not None:
payload[CONFIDENCE_KEY] = self.confidence
if self.class_pdf is not None:
payload[CLASS_PDF_KEY] = self.class_pdf
return payload
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
vertices=[
Point.from_json(_) for _ in geometry.get(VERTICES_KEY, [])
],
reference_id=payload[REFERENCE_ID_KEY],
confidence=payload.get(CONFIDENCE_KEY, None),
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
class_pdf=payload.get(CLASS_PDF_KEY, None),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
class PolygonPrediction(PolygonAnnotation):
"""Prediction of a polygon.
Parameters:
label (str): The label for this annotation (e.g. car, pedestrian, bicycle).
vertices List[:class:`Point`]: The list of points making up the polygon.
reference_id (str): User-defined ID of the image to which to apply this
annotation.
confidence: 0-1 indicating the confidence of the prediction.
annotation_id (Optional[str]): The annotation ID that uniquely identifies
this annotation within its target dataset item. Upon ingest, a matching
annotation id will be ignored by default, and updated if update=True
for dataset.annotate.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to
attach to this annotation. Strings, floats and ints are supported best
by querying and insights features within Nucleus. For more details see
our `metadata guide <https://nucleus.scale.com/docs/upload-metadata>`_.
class_pdf: An optional complete class probability distribution on this
annotation. Each value should be between 0 and 1 (inclusive), and sum up to
1 as a complete distribution. This can be useful for computing entropy to
surface places where the model is most uncertain.
embedding_vector: Custom embedding vector for this object annotation.
If any custom object embeddings have been uploaded previously to this dataset,
this vector must match the dimensions of the previously ingested vectors.
track_reference_id: A unique string to identify the prediction as part of a group.
For instance, multiple "car" predictions across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
def __init__(
self,
label: str,
vertices: List[Point],
reference_id: str,
confidence: Optional[float] = None,
annotation_id: Optional[str] = None,
metadata: Optional[Dict] = None,
class_pdf: Optional[Dict] = None,
embedding_vector: Optional[list] = None,
track_reference_id: Optional[str] = None,
):
super().__init__(
label=label,
vertices=vertices,
reference_id=reference_id,
annotation_id=annotation_id,
metadata=metadata,
embedding_vector=embedding_vector,
track_reference_id=track_reference_id,
)
self.confidence = confidence
self.class_pdf = class_pdf
def to_payload(self) -> dict:
payload = super().to_payload()
if self.confidence is not None:
payload[CONFIDENCE_KEY] = self.confidence
if self.class_pdf is not None:
payload[CLASS_PDF_KEY] = self.class_pdf
return payload
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
vertices=[
Point.from_json(_) for _ in geometry.get(VERTICES_KEY, [])
],
reference_id=payload[REFERENCE_ID_KEY],
confidence=payload.get(CONFIDENCE_KEY, None),
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
class_pdf=payload.get(CLASS_PDF_KEY, None),
embedding_vector=payload.get(EMBEDDING_VECTOR_KEY, None),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
class KeypointsPrediction(KeypointsAnnotation):
"""Prediction of keypoints.
Parameters:
label (str): The label for this annotation (e.g. car, pedestrian, bicycle).
keypoints (List[:class:`Keypoint`]): The list of keypoints objects.
names (List[str]): A list that corresponds to the names of each keypoint.
skeleton (List[List[int]]): A list of 2-length lists indicating a beginning
and ending index for each line segment in the skeleton of this keypoint label.
reference_id (str): User-defined ID of the image to which to apply this
annotation.
confidence: 0-1 indicating the confidence of the prediction.
annotation_id (Optional[str]): The annotation ID that uniquely identifies
this annotation within its target dataset item. Upon ingest, a matching
annotation id will be ignored by default, and updated if update=True
for dataset.annotate.
metadata (Optional[Dict]): Arbitrary key/value dictionary of info to
attach to this annotation. Strings, floats and ints are supported best
by querying and insights features within Nucleus. For more details see
our `metadata guide <https://nucleus.scale.com/docs/upload-metadata>`_.
class_pdf: An optional complete class probability distribution on this
annotation. Each value should be between 0 and 1 (inclusive), and sum up to
1 as a complete distribution. This can be useful for computing entropy to
surface places where the model is most uncertain.
track_reference_id: A unique string to identify the prediction as part of a group.
For instance, multiple "car" predictions across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
def __init__(
self,
label: str,
keypoints: List[Keypoint],
names: List[str],
skeleton: List[List[int]],
reference_id: str,
confidence: Optional[float] = None,
annotation_id: Optional[str] = None,
metadata: Optional[Dict] = None,
class_pdf: Optional[Dict] = None,
track_reference_id: Optional[str] = None,
):
super().__init__(
label=label,
keypoints=keypoints,
names=names,
skeleton=skeleton,
reference_id=reference_id,
annotation_id=annotation_id,
metadata=metadata,
track_reference_id=track_reference_id,
)
self.confidence = confidence
self.class_pdf = class_pdf
def to_payload(self) -> dict:
payload = super().to_payload()
if self.confidence is not None:
payload[CONFIDENCE_KEY] = self.confidence
if self.class_pdf is not None:
payload[CLASS_PDF_KEY] = self.class_pdf
return payload
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
keypoints=[
Keypoint.from_json(_) for _ in geometry.get(KEYPOINTS_KEY, [])
],
names=geometry[KEYPOINTS_NAMES_KEY],
skeleton=geometry[KEYPOINTS_SKELETON_KEY],
reference_id=payload[REFERENCE_ID_KEY],
confidence=payload.get(CONFIDENCE_KEY, None),
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
class_pdf=payload.get(CLASS_PDF_KEY, None),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
class CuboidPrediction(CuboidAnnotation):
"""A prediction of 3D cuboid.
Parameters:
label (str): The label for this annotation (e.g. car, pedestrian, bicycle)
position (:class:`Point3D`): The point at the center of the cuboid
dimensions (:class:`Point3D`): The length (x), width (y), and height (z) of the cuboid
yaw (float): The rotation, in radians, about the Z axis of the cuboid
reference_id (str): User-defined ID of the image to which to apply this annotation.
confidence: 0-1 indicating the confidence of the prediction.
annotation_id (Optional[str]): The annotation ID that uniquely identifies this
annotation within its target dataset item. Upon ingest, a matching
annotation id will be ignored by default, and updated if update=True
for dataset.annotate.
metadata (Optional[str]): Arbitrary key/value dictionary of info to attach to this
annotation. Strings, floats and ints are supported best by querying
and insights features within Nucleus. For more details see our `metadata
guide <https://nucleus.scale.com/docs/upload-metadata>`_.
class_pdf: An optional complete class probability distribution on this
annotation. Each value should be between 0 and 1 (inclusive), and sum up to
1 as a complete distribution. This can be useful for computing entropy to
surface places where the model is most uncertain.
track_reference_id: A unique string to identify the prediction as part of a group.
For instance, multiple "car" predictions across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
def __init__(
self,
label: str,
position: Point3D,
dimensions: Point3D,
yaw: float,
reference_id: str,
confidence: Optional[float] = None,
annotation_id: Optional[str] = None,
metadata: Optional[Dict] = None,
class_pdf: Optional[Dict] = None,
track_reference_id: Optional[str] = None,
):
super().__init__(
label=label,
position=position,
dimensions=dimensions,
yaw=yaw,
reference_id=reference_id,
annotation_id=annotation_id,
metadata=metadata,
track_reference_id=track_reference_id,
)
self.confidence = confidence
self.class_pdf = class_pdf
def to_payload(self) -> dict:
payload = super().to_payload()
if self.confidence is not None:
payload[CONFIDENCE_KEY] = self.confidence
if self.class_pdf is not None:
payload[CLASS_PDF_KEY] = self.class_pdf
return payload
@classmethod
def from_json(cls, payload: dict):
geometry = payload.get(GEOMETRY_KEY, {})
return cls(
label=payload.get(LABEL_KEY, 0),
position=Point3D.from_json(geometry.get(POSITION_KEY, {})),
dimensions=Point3D.from_json(geometry.get(DIMENSIONS_KEY, {})),
yaw=geometry.get(YAW_KEY, 0),
reference_id=payload[REFERENCE_ID_KEY],
confidence=payload.get(CONFIDENCE_KEY, None),
annotation_id=payload.get(ANNOTATION_ID_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
class_pdf=payload.get(CLASS_PDF_KEY, None),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
class CategoryPrediction(CategoryAnnotation):
"""A prediction of a category.
Parameters:
label: The label for this annotation (e.g. car, pedestrian, bicycle).
reference_id: The reference ID of the image you wish to apply this annotation to.
taxonomy_name: The name of the taxonomy this annotation conforms to.
See :meth:`Dataset.add_taxonomy`.
confidence: 0-1 indicating the confidence of the prediction.
class_pdf: An optional complete class probability distribution on this
prediction. Each value should be between 0 and 1 (inclusive), and sum up to
1 as a complete distribution. This can be useful for computing entropy to
surface places where the model is most uncertain.
metadata: Arbitrary key/value dictionary of info to attach to this annotation.
Strings, floats and ints are supported best by querying and insights
features within Nucleus. For more details see our `metadata guide
<https://nucleus.scale.com/docs/upload-metadata>`_.
track_reference_id: A unique string to identify the prediction as part of a group.
For instance, multiple "car" predictions across several dataset items may have
the same `track_reference_id` such as "red_car".
"""
def __init__(
self,
label: str,
reference_id: str,
taxonomy_name: Optional[str] = None,
confidence: Optional[float] = None,
metadata: Optional[Dict] = None,
class_pdf: Optional[Dict] = None,
track_reference_id: Optional[str] = None,
):
super().__init__(
label=label,
taxonomy_name=taxonomy_name,
reference_id=reference_id,
metadata=metadata,
track_reference_id=track_reference_id,
)
self.confidence = confidence
self.class_pdf = class_pdf
def to_payload(self) -> dict:
payload = super().to_payload()
if self.confidence is not None:
payload[CONFIDENCE_KEY] = self.confidence
if self.class_pdf is not None:
payload[CLASS_PDF_KEY] = self.class_pdf
return payload
@classmethod
def from_json(cls, payload: dict):
return cls(
label=payload.get(LABEL_KEY, 0),
taxonomy_name=payload.get(TAXONOMY_NAME_KEY, None),
reference_id=payload[REFERENCE_ID_KEY],
confidence=payload.get(CONFIDENCE_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
class_pdf=payload.get(CLASS_PDF_KEY, None),
track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
)
class SceneCategoryPrediction(SceneCategoryAnnotation):
"""A prediction of a category for a scene.
::
from nucleus import SceneCategoryPrediction
category = SceneCategoryPrediction(
label="running",
reference_id="scene_1",
taxonomy_name="action",
confidence=0.83,
metadata={
"weather": "clear",
},
)
Parameters:
label: The label for this annotation (e.g. action, subject, scenario).
reference_id: The reference ID of the scene you wish to apply this annotation to.
taxonomy_name: The name of the taxonomy this annotation conforms to.
See :meth:`Dataset.add_taxonomy`.
confidence: 0-1 indicating the confidence of the prediction.
metadata: Arbitrary key/value dictionary of info to attach to this annotation.
Strings, floats and ints are supported best by querying and insights
features within Nucleus. For more details see our `metadata guide
<https://nucleus.scale.com/docs/upload-metadata>`_.
"""
def __init__(
self,
label: str,
reference_id: str,
taxonomy_name: Optional[str] = None,
confidence: Optional[float] = None,
metadata: Optional[Dict] = None,
):
super().__init__(
label=label,
taxonomy_name=taxonomy_name,
reference_id=reference_id,
metadata=metadata,
)
self.confidence = confidence
def to_payload(self) -> dict:
payload = super().to_payload()
if self.confidence is not None:
payload[CONFIDENCE_KEY] = self.confidence
return payload
@classmethod
def from_json(cls, payload: dict):
return cls(
label=payload.get(LABEL_KEY, 0),
taxonomy_name=payload.get(TAXONOMY_NAME_KEY, None),
reference_id=payload[REFERENCE_ID_KEY],
confidence=payload.get(CONFIDENCE_KEY, None),
metadata=payload.get(METADATA_KEY, {}),
)
Prediction = Union[
BoxPrediction,
LinePrediction,
PolygonPrediction,
KeypointsPrediction,
CuboidPrediction,
CategoryPrediction,
SceneCategoryPrediction,
SegmentationPrediction,
]
@dataclass
class PredictionList:
"""Wrapper class separating a list of predictions by type."""
box_predictions: List[BoxPrediction] = field(default_factory=list)
line_predictions: List[LinePrediction] = field(default_factory=list)
polygon_predictions: List[PolygonPrediction] = field(default_factory=list)
keypoints_predictions: List[KeypointsPrediction] = field(
default_factory=list
)
cuboid_predictions: List[CuboidPrediction] = field(default_factory=list)
category_predictions: List[CategoryPrediction] = field(
default_factory=list
)
scene_category_predictions: List[SceneCategoryPrediction] = field(
default_factory=list
)
segmentation_predictions: List[SegmentationPrediction] = field(
default_factory=list
)
def items(self):
return self.__dict__.items()
def add_predictions(self, predictions: List[Prediction]):
for prediction in predictions:
if isinstance(prediction, BoxPrediction):
self.box_predictions.append(prediction)
elif isinstance(prediction, LinePrediction):
self.line_predictions.append(prediction)
elif isinstance(prediction, PolygonPrediction):
self.polygon_predictions.append(prediction)
elif isinstance(prediction, KeypointsPrediction):
self.keypoints_predictions.append(prediction)
elif isinstance(prediction, CuboidPrediction):
self.cuboid_predictions.append(prediction)
elif isinstance(prediction, CategoryPrediction):
self.category_predictions.append(prediction)
elif isinstance(prediction, SceneCategoryPrediction):
self.scene_category_predictions.append(prediction)
else:
assert isinstance(
prediction, SegmentationPrediction
), f"Unexpected prediction type: {type(prediction)}"
self.segmentation_predictions.append(prediction)
def __len__(self):
return (
len(self.box_predictions)
+ len(self.line_predictions)
+ len(self.polygon_predictions)
+ len(self.keypoints_predictions)
+ len(self.cuboid_predictions)
+ len(self.category_predictions)
+ len(self.scene_category_predictions)
+ len(self.segmentation_predictions)
)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/prediction.py
| 0.921012 | 0.427277 |
prediction.py
|
pypi
|
from typing import Any, Dict, List, Optional, Union
from .annotation import (
BoxAnnotation,
CategoryAnnotation,
CuboidAnnotation,
MultiCategoryAnnotation,
PolygonAnnotation,
SceneCategoryAnnotation,
SegmentationAnnotation,
)
from .constants import (
ANNOTATION_METADATA_SCHEMA_KEY,
ANNOTATION_UPDATE_KEY,
ANNOTATIONS_KEY,
ITEMS_KEY,
LABELS_KEY,
METADATA_KEY,
MODEL_BUNDLE_NAME_KEY,
MODEL_ID_KEY,
MODEL_TAGS_KEY,
NAME_KEY,
REFERENCE_ID_KEY,
SCENES_KEY,
SEGMENTATIONS_KEY,
TAXONOMY_NAME_KEY,
TYPE_KEY,
UPDATE_KEY,
)
from .dataset_item import DatasetItem
from .prediction import (
BoxPrediction,
CategoryPrediction,
CuboidPrediction,
PolygonPrediction,
SceneCategoryPrediction,
SegmentationPrediction,
)
from .scene import LidarScene, VideoScene
def construct_append_payload(
dataset_items: List[DatasetItem], force: bool = False
) -> dict:
items = []
for item in dataset_items:
items.append(item.to_payload())
return (
{ITEMS_KEY: items}
if not force
else {ITEMS_KEY: items, UPDATE_KEY: True}
)
def construct_append_scenes_payload(
scene_list: Union[List[LidarScene], List[VideoScene]],
update: Optional[bool] = False,
) -> dict:
scenes = []
for scene in scene_list:
scenes.append(scene.to_payload())
return {SCENES_KEY: scenes, UPDATE_KEY: update}
def construct_annotation_payload(
annotation_items: List[
Union[
BoxAnnotation,
PolygonAnnotation,
CuboidAnnotation,
CategoryAnnotation,
MultiCategoryAnnotation,
SceneCategoryAnnotation,
SegmentationAnnotation,
]
],
update: bool,
) -> dict:
annotations = [
annotation.to_payload()
for annotation in annotation_items
if not isinstance(annotation, SegmentationAnnotation)
]
segmentations = [
annotation.to_payload()
for annotation in annotation_items
if isinstance(annotation, SegmentationAnnotation)
]
payload: Dict[str, Any] = {ANNOTATION_UPDATE_KEY: update}
if annotations:
payload[ANNOTATIONS_KEY] = annotations
if segmentations:
payload[SEGMENTATIONS_KEY] = segmentations
return payload
def construct_segmentation_payload(
annotation_items: Union[
List[SegmentationAnnotation], List[SegmentationPrediction]
],
update: bool,
) -> dict:
annotations = []
for annotation_item in annotation_items:
annotations.append(annotation_item.to_payload())
return {SEGMENTATIONS_KEY: annotations, ANNOTATION_UPDATE_KEY: update}
def construct_box_predictions_payload(
box_predictions: List[
Union[
BoxPrediction,
PolygonPrediction,
CuboidPrediction,
CategoryPrediction,
SceneCategoryPrediction,
]
],
update: bool,
) -> dict:
predictions = []
for prediction in box_predictions:
predictions.append(prediction.to_payload())
return {ANNOTATIONS_KEY: predictions, ANNOTATION_UPDATE_KEY: update}
def construct_model_creation_payload(
name: str,
reference_id: str,
metadata: Optional[Dict],
bundle_name: Optional[str],
tags: Optional[List[str]],
) -> dict:
payload = {
NAME_KEY: name,
REFERENCE_ID_KEY: reference_id,
METADATA_KEY: metadata if metadata else {},
}
if bundle_name:
payload[MODEL_BUNDLE_NAME_KEY] = bundle_name
if tags:
payload[MODEL_TAGS_KEY] = tags
return payload
def construct_model_run_creation_payload(
name: str,
reference_id: Optional[str],
model_id: Optional[str],
metadata: Optional[Dict],
annotation_metadata_schema: Optional[Dict] = None,
) -> dict:
payload = {
NAME_KEY: name,
METADATA_KEY: metadata if metadata else {},
}
if reference_id:
payload[REFERENCE_ID_KEY] = reference_id
if model_id:
payload[MODEL_ID_KEY] = model_id
return {
NAME_KEY: name,
REFERENCE_ID_KEY: reference_id,
METADATA_KEY: metadata if metadata else {},
ANNOTATION_METADATA_SCHEMA_KEY: annotation_metadata_schema,
}
def construct_taxonomy_payload(
taxonomy_name: str, taxonomy_type: str, labels: List[str], update: bool
) -> dict:
return {
TAXONOMY_NAME_KEY: taxonomy_name,
TYPE_KEY: taxonomy_type,
LABELS_KEY: labels,
UPDATE_KEY: update,
}
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/payload_constructor.py
| 0.835685 | 0.186687 |
payload_constructor.py
|
pypi
|
import time
from dataclasses import dataclass
from typing import Dict, List
import requests
from nucleus.constants import (
JOB_CREATION_TIME_KEY,
JOB_ID_KEY,
JOB_LAST_KNOWN_STATUS_KEY,
JOB_TYPE_KEY,
STATUS_KEY,
)
from nucleus.utils import replace_double_slashes
JOB_POLLING_INTERVAL = 5
@dataclass
class AsyncJob:
"""Object used to check the status or errors of a long running asynchronous operation.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
dataset = client.get_dataset("ds_bwkezj6g5c4g05gqp1eg")
# When kicking off an asynchronous job, store the return value as a variable
job = dataset.append(items=YOUR_DATASET_ITEMS, asynchronous=True)
# Poll for status or errors
print(job.status())
print(job.errors())
# Block until job finishes
job.sleep_until_complete()
"""
job_id: str
job_last_known_status: str
job_type: str
job_creation_time: str
client: "NucleusClient" # type: ignore # noqa: F821
def status(self) -> Dict[str, str]:
"""Fetches status of the job and an informative message on job progress.
Returns:
A dict of the job ID, status (one of Running, Completed, or Errored),
an informative message on the job progress, and number of both completed
and total steps.
::
{
"job_id": "job_c19xcf9mkws46gah0000",
"status": "Completed",
"message": "Job completed successfully.",
"job_progress": "0.33",
"completed_steps": "1",
"total_steps:": "3",
}
"""
response = self.client.make_request(
payload={},
route=f"job/{self.job_id}",
requests_command=requests.get,
)
self.job_last_known_status = response[STATUS_KEY]
return response
def errors(self) -> List[str]:
"""Fetches a list of the latest errors generated by the asynchronous job.
Useful for debugging failed or partially successful jobs.
Returns:
A list of strings containing the 10,000 most recently generated errors.
::
[
'{"annotation":{"label":"car","type":"box","geometry":{"x":50,"y":60,"width":70,"height":80},"referenceId":"bad_ref_id","annotationId":"attempted_annot_upload","metadata":{}},"error":"Item with id bad_ref_id does not exist."}'
]
"""
errors = self.client.make_request(
payload={},
route=f"job/{self.job_id}/errors",
requests_command=requests.get,
)
return [replace_double_slashes(error) for error in errors]
def sleep_until_complete(self, verbose_std_out=True):
"""Blocks until the job completes or errors.
Parameters:
verbose_std_out (Optional[bool]): Whether or not to verbosely log while
sleeping. Defaults to True.
"""
start_time = time.perf_counter()
while 1:
status = self.status()
time.sleep(JOB_POLLING_INTERVAL)
if verbose_std_out:
print(
f"Status at {time.perf_counter() - start_time} s: {status}"
)
if status["status"] == "Running":
continue
break
if verbose_std_out:
print(
f"Finished at {time.perf_counter() - start_time} s: {status}"
)
final_status = status
if final_status["status"] == "Errored":
raise JobError(final_status, self)
@classmethod
def from_json(cls, payload: dict, client):
# TODO: make private
return cls(
job_id=payload[JOB_ID_KEY],
job_last_known_status=payload[JOB_LAST_KNOWN_STATUS_KEY],
job_type=payload[JOB_TYPE_KEY],
job_creation_time=payload[JOB_CREATION_TIME_KEY],
client=client,
)
class JobError(Exception):
def __init__(self, job_status: Dict[str, str], job: AsyncJob):
final_status_message = job_status["message"]
final_status = job_status["status"]
message = (
f"The job reported a final status of {final_status} "
"This could, however, mean a partial success with some successes and some failures. "
f"The final status message was: {final_status_message} \n"
f"For more detailed error messages you can call {str(job)}.errors()"
)
message = replace_double_slashes(message)
super().__init__(message)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/async_job.py
| 0.640748 | 0.228565 |
async_job.py
|
pypi
|
from typing import Dict, List, Optional, Union
import requests
from .async_job import AsyncJob
from .constants import METADATA_KEY, MODEL_TAGS_KEY, NAME_KEY, REFERENCE_ID_KEY
from .dataset import Dataset
from .model_run import ModelRun
from .prediction import (
BoxPrediction,
CuboidPrediction,
PolygonPrediction,
SegmentationPrediction,
)
class Model:
"""A model that can be used to upload predictions to a dataset.
By uploading model predictions to Nucleus, you can compare your predictions
to ground truth annotations and discover problems with your Models or
:class:`Dataset`.
You can also upload predictions for unannotated images, letting you query
them based on model predictions. This can help you prioritize which
unlabeled data to label next.
Within Nucleus, Models work in the following way:
1. You first :meth:`create a Model<NucleusClient.create_model>`. You can do this
just once and reuse the model on multiple datasets.
2. You then :meth:`upload predictions <Dataset.upload_predictions>` to a dataset.
3. Trigger :meth:`calculation of metrics <Dataset.calculate_evaluation_metrics>`
in order to view model debugging insights.
The above steps above will allow you to visualize model performance within
Nucleus, or compare multiple models that have been run on the same Dataset.
Note that you can always add more predictions to a dataset, but then you
will need to re-run the calculation of metrics in order to have them be
correct.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
dataset = client.get_dataset(YOUR_DATASET_ID)
prediction_1 = nucleus.BoxPrediction(
label="label",
x=0,
y=0,
width=10,
height=10,
reference_id="1",
confidence=0.9,
class_pdf={"label": 0.9, "other_label": 0.1},
)
prediction_2 = nucleus.BoxPrediction(
label="label",
x=0,
y=0,
width=10,
height=10,
reference_id="2",
confidence=0.2,
class_pdf={"label": 0.2, "other_label": 0.8},
)
model = client.create_model(
name="My Model", reference_id="My-CNN", metadata={"timestamp": "121012401"}
)
# For small ingestions, we recommend synchronous ingestion
response = dataset.upload_predictions(model, [prediction_1, prediction_2])
# For large ingestions, we recommend asynchronous ingestion
job = dataset.upload_predictions(
model, [prediction_1, prediction_2], asynchronous=True
)
# Check current status
job.status()
# Sleep until ingestion is done
job.sleep_until_complete()
# Check errors
job.errors()
dataset.calculate_evaluation_metrics(model)
Models cannot be instantiated directly and instead must be created via API
endpoint, using :meth:`NucleusClient.create_model`.
"""
def __init__(
self,
model_id,
name,
reference_id,
metadata,
client,
bundle_name=None,
tags=None,
):
self.id = model_id
self.name = name
self.reference_id = reference_id
self.metadata = metadata
self.bundle_name = bundle_name
self.tags = tags if tags else []
self._client = client
def __repr__(self):
return f"Model(model_id='{self.id}', name='{self.name}', reference_id='{self.reference_id}', metadata={self.metadata}, bundle_name={self.bundle_name}, tags={self.tags}, client={self._client})"
def __eq__(self, other):
return (
(self.id == other.id)
and (self.name == other.name)
and (self.metadata == other.metadata)
and (self._client == other._client)
and (self.bundle_name == other.bundle_name)
)
def __hash__(self):
return hash(self.id)
@classmethod
def from_json(cls, payload: dict, client):
"""Instantiates model object from schematized JSON dict payload."""
return cls(
model_id=payload["id"],
name=payload["name"],
reference_id=payload["ref_id"],
metadata=payload["metadata"] or None,
client=client,
)
def create_run(
self,
name: str,
dataset: Dataset,
predictions: List[
Union[
BoxPrediction,
PolygonPrediction,
CuboidPrediction,
SegmentationPrediction,
]
],
metadata: Optional[Dict] = None,
asynchronous: bool = False,
) -> ModelRun:
# This method, as well as model runs in general are now deprecated.
# Instead models will automatically generate a model run when applied to
# a dataset using dataset.upload_predictions(model, predictions). Therefore
# there is no longer any need to create a model run, since you can upload
# predictions without needing to explicitly create a model run.
# When uploading to a dataset twice using the same model, the same model
# run will be reused by Nucleus.
payload: dict = {
NAME_KEY: name,
REFERENCE_ID_KEY: self.reference_id,
}
if metadata:
payload[METADATA_KEY] = metadata
model_run: ModelRun = self._client.create_model_run(
dataset.id, payload
)
model_run.predict(predictions, asynchronous=asynchronous)
return model_run
def evaluate(self, scenario_test_names: List[str]) -> AsyncJob:
"""Evaluates this on the specified Unit Tests. ::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
model = client.list_models()[0]
scenario_test = client.validate.create_scenario_test(
"sample_scenario_test", "YOUR_SLICE_ID"
)
model.evaluate(["sample_scenario_test"])
Args:
scenario_test_names: list of unit tests to evaluate
Returns:
AsyncJob object of evaluation job
"""
response = self._client.make_request(
{"test_names": scenario_test_names},
f"validate/{self.id}/evaluate",
requests_command=requests.post,
)
return AsyncJob.from_json(response, self._client)
def run(self, dataset_id: str, slice_id: Optional[str]) -> str:
"""Runs inference on the bundle associated with the model on the dataset. ::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
model = client.list_models()[0]
model.run("ds_123456")
Args:
dataset_id: The ID of the dataset to run inference on.
job_id: The ID of the :class:`AsyncJob` used to track job progress.
slice_id: The ID of the slice of the dataset to run inference on.
"""
response = self._client.make_request(
{"dataset_id": dataset_id, "slice_id": slice_id},
f"model/run/{self.id}/",
requests_command=requests.post,
)
return response
def add_tags(self, tags: List[str]):
"""Tag the model with custom tag names. ::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
model = client.list_models()[0]
model.add_tags(["tag_A", "tag_B"])
Args:
tags: list of tag names
"""
response: requests.Response = self._client.make_request(
{MODEL_TAGS_KEY: tags},
f"model/{self.id}/tag",
requests_command=requests.post,
return_raw_response=True,
)
if response.ok:
self.tags.extend(tags)
return response.json()
def remove_tags(self, tags: List[str]):
"""Remove tag(s) from the model. ::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
model = client.list_models()[0]
model.remove_tags(["tag_x"])
Args:
tags: list of tag names to remove
"""
response: requests.Response = self._client.make_request(
{MODEL_TAGS_KEY: tags},
f"model/{self.id}/tag",
requests_command=requests.delete,
return_raw_response=True,
)
if response.ok:
self.tags = list(filter(lambda t: t not in tags, self.tags))
return response.json()
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/model.py
| 0.944542 | 0.513059 |
model.py
|
pypi
|
import json
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, Optional
import requests
from .constants import (
DATASET_ID_KEY,
METADATA_KEY,
OVERWRITE_KEY,
REFERENCE_ID_KEY,
)
if TYPE_CHECKING:
from . import Connection
@dataclass # pylint: disable=R0902
class Track: # pylint: disable=R0902
"""A track is a class of objects (annotation or prediction) that forms a one-to-many relationship
with objects, wherein an object is an instance of a track.
Args:
reference_id (str): A user-specified name of the track that describes the class of objects it represents.
metadata: Arbitrary key/value dictionary of info to attach to this track.
"""
_connection: "Connection"
dataset_id: str
reference_id: str
metadata: Optional[dict] = None
def __repr__(self):
return f"Track(dataset_id='{self.dataset_id}', reference_id='{self.reference_id}', metadata={self.metadata})"
def __eq__(self, other):
return (
(self.dataset_id == other.dataset_id)
and (self.reference_id == other.reference_id)
and (self.metadata == other.metadata)
)
@classmethod
def from_json(cls, payload: dict, connection: "Connection"):
"""Instantiates track object from schematized JSON dict payload."""
return cls(
_connection=connection,
reference_id=str(payload[REFERENCE_ID_KEY]),
dataset_id=str(payload[DATASET_ID_KEY]),
metadata=payload.get(METADATA_KEY, None),
)
def to_payload(self) -> dict:
"""Serializes track object to schematized JSON dict."""
payload: Dict[str, Any] = {
REFERENCE_ID_KEY: self.reference_id,
DATASET_ID_KEY: self.dataset_id,
METADATA_KEY: self.metadata,
}
return payload
def to_json(self) -> str:
"""Serializes track object to schematized JSON string."""
return json.dumps(self.to_payload(), allow_nan=False)
def update(
self,
metadata: Optional[dict] = None,
overwrite_metadata: bool = False,
) -> None:
"""
Updates the Track's metadata.
Parameters:
metadata (Optional[dict]): An arbitrary dictionary of additional data about this track that can be stored
and retrieved.
overwrite_metadata (Optional[bool]): If metadata is provided and overwrite_metadata = True, then the track's
entire metadata object will be overwritten. Otherwise, only the keys in metadata will be overwritten.
"""
self._connection.make_request(
payload={
REFERENCE_ID_KEY: self.reference_id,
METADATA_KEY: metadata,
OVERWRITE_KEY: overwrite_metadata,
},
route=f"dataset/{self.dataset_id}/tracks",
requests_command=requests.post,
)
self.metadata = (
metadata
if overwrite_metadata
else (
{**self.metadata, **metadata}
if self.metadata is not None and metadata is not None
else metadata
)
)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/track.py
| 0.930411 | 0.160661 |
track.py
|
pypi
|
from typing import Set
from .constants import (
DATASET_ID_KEY,
ERROR_CODES,
ERROR_ITEMS,
ERROR_PAYLOAD,
IGNORED_ITEMS,
NEW_ITEMS,
UPDATED_ITEMS,
)
from .dataset_item import DatasetItem
def json_list_to_dataset_item(item_list):
return [DatasetItem.from_json(item) for item in item_list]
class UploadResponse:
"""Response for long upload job. For internal use only!
Parameters:
json: Payload from which to construct the UploadResponse.
Attributes:
dataset_id: The scale-generated id for the dataset that was uploaded to
new_items: How many items are new in the upload
updated_items: How many items were updated
ignored_items: How many items were ignored
upload_errors: A list of errors encountered during upload
error_codes: A set of all the error codes encountered during upload
error_payload: The detailed error payload returned from the endpoint.
"""
def __init__(self, json: dict):
dataset_id = json.get(DATASET_ID_KEY)
new_items = json.get(NEW_ITEMS, 0)
updated_items = json.get(UPDATED_ITEMS, 0)
ignored_items = json.get(IGNORED_ITEMS, 0)
upload_errors = json.get(ERROR_ITEMS, 0)
upload_error_payload = json_list_to_dataset_item(
json.get(ERROR_PAYLOAD, [])
)
self.dataset_id = dataset_id
self.new_items = new_items
self.updated_items = updated_items
self.ignored_items = ignored_items
self.upload_errors = upload_errors
self.error_codes: Set[str] = set()
self.error_payload = upload_error_payload
def __repr__(self):
return f"UploadResponse(json={self.json()})"
def __eq__(self, other):
return self.json() == other.json()
def update_response(self, json):
"""
:param json: { new_items: int, updated_items: int, ignored_items: int, upload_errors: int, }
"""
assert self.dataset_id == json.get(DATASET_ID_KEY)
self.new_items += json.get(NEW_ITEMS, 0)
self.updated_items += json.get(UPDATED_ITEMS, 0)
self.ignored_items += json.get(IGNORED_ITEMS, 0)
self.upload_errors += json.get(ERROR_ITEMS, 0)
if json.get(ERROR_PAYLOAD, None):
self.error_payload.extend(json.get(ERROR_PAYLOAD, None))
def record_error(self, response, num_uploads):
"""
:param response: json response
:param num_uploads: len of itemss tried to upload
"""
status = response.status_code
self.error_codes.add(status)
self.upload_errors += num_uploads
def json(self) -> dict:
"""
return: { new_items: int, updated_items: int, ignored_items: int, upload_errors: int, }
"""
result = {
DATASET_ID_KEY: self.dataset_id,
NEW_ITEMS: self.new_items,
UPDATED_ITEMS: self.updated_items,
IGNORED_ITEMS: self.ignored_items,
ERROR_ITEMS: self.upload_errors,
}
if self.error_payload:
result[ERROR_PAYLOAD] = self.error_payload
if self.error_codes:
result[ERROR_ITEMS] = self.upload_errors
result[ERROR_CODES] = self.error_codes
return result
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/upload_response.py
| 0.856498 | 0.166777 |
upload_response.py
|
pypi
|
from typing import List
from nucleus.async_job import AsyncJob
from nucleus.connection import Connection
from .constants import EVAL_FUNCTION_KEY, SCENARIO_TEST_ID_KEY, EntityLevel
from .data_transfer_objects.eval_function import (
CreateEvalFunction,
EvalFunctionEntry,
GetEvalFunctions,
)
from .data_transfer_objects.scenario_test import (
CreateScenarioTestRequest,
EvalFunctionListEntry,
)
from .errors import CreateScenarioTestError
from .eval_functions.available_eval_functions import AvailableEvalFunctions
from .eval_functions.base_eval_function import EvalFunctionConfig
from .scenario_test import ScenarioTest
SUCCESS_KEY = "success"
EVAL_FUNCTIONS_KEY = "eval_functions"
class Validate:
"""Model CI Python Client extension."""
def __init__(self, api_key: str, endpoint: str):
self.connection = Connection(api_key, endpoint)
def __repr__(self):
return f"Validate(connection='{self.connection}')"
def __eq__(self, other):
return self.connection == other.connection
@property
def eval_functions(self) -> AvailableEvalFunctions:
"""List all available evaluation functions which can be used to set up evaluation criteria.::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
# Creates an EvaluationCriterion by using a comparison op
scenario_test_criterion = client.validate.eval_functions.bbox_iou() > 0.5
Returns:
:class:`AvailableEvalFunctions`: A container for all the available eval functions
"""
response = self.connection.get(
"validate/eval_fn",
)
payload = GetEvalFunctions.parse_obj(response)
return AvailableEvalFunctions(payload.eval_functions)
def create_scenario_test(
self,
name: str,
slice_id: str,
evaluation_functions: List[EvalFunctionConfig],
) -> ScenarioTest:
"""Creates a new Scenario Test from an existing Nucleus :class:`Slice`:. ::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
scenario_test = client.validate.create_scenario_test(
name="sample_scenario_test",
slice_id="YOUR_SLICE_ID",
evaluation_functions=[client.validate.eval_functions.bbox_iou()]
)
Args:
name: unique name of test
slice_id: id of (pre-defined) slice of items to evaluate test on.
evaluation_functions: :class:`EvalFunctionEntry` defines an evaluation metric for the test.
Created with an element from the list of available eval functions. See :class:`eval_functions`.
Returns:
Created ScenarioTest object.
"""
if not evaluation_functions:
raise CreateScenarioTestError(
"Must pass an evaluation_function to the scenario test! I.e. "
"evaluation_functions=[client.validate.eval_functions.bbox_iou()]"
)
external_fns = [
f.eval_func_entry.is_external_function
for f in evaluation_functions
]
if any(external_fns):
assert all(
external_fns
), "Cannot create scenario tests with mixed placeholder and non-placeholder evaluation functions"
response = self.connection.post(
CreateScenarioTestRequest(
name=name,
slice_id=slice_id,
evaluation_functions=[
EvalFunctionListEntry(
id=ef.id, eval_func_arguments=ef.eval_func_arguments
)
for ef in evaluation_functions
],
).dict(),
"validate/scenario_test",
)
return ScenarioTest.from_id(
response[SCENARIO_TEST_ID_KEY], self.connection
)
def get_scenario_test(self, scenario_test_id: str) -> ScenarioTest:
response = self.connection.get(
f"validate/scenario_test/{scenario_test_id}",
)
return ScenarioTest.from_id(
response["unit_test"]["id"], self.connection
)
@property
def scenario_tests(self) -> List[ScenarioTest]:
"""Lists all Scenario Tests of the current user. ::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
scenario_test = client.validate.create_scenario_test(
"sample_scenario_test", "slc_bx86ea222a6g057x4380"
)
client.validate.scenario_tests
Returns:
A list of ScenarioTest objects.
"""
response = self.connection.get(
"validate/scenario_test/details",
)
tests = [
ScenarioTest.from_response(payload, self.connection)
for payload in response
]
return tests
def delete_scenario_test(self, scenario_test_id: str) -> bool:
"""Deletes a Scenario Test. ::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
scenario_test = client.validate.scenario_tests[0]
success = client.validate.delete_scenario_test(scenario_test.id)
Args:
scenario_test_id: unique ID of scenario test
Returns:
Whether deletion was successful.
"""
response = self.connection.delete(
f"validate/scenario_test/{scenario_test_id}",
)
return response[SUCCESS_KEY]
def evaluate_model_on_scenario_tests(
self, model_id: str, scenario_test_names: List[str]
) -> AsyncJob:
"""Evaluates the given model on the specified Scenario Tests. ::
import nucleus
client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
model = client.list_models()[0]
scenario_test = client.validate.create_scenario_test(
"sample_scenario_test", "slc_bx86ea222a6g057x4380"
)
job = client.validate.evaluate_model_on_scenario_tests(
model_id=model.id,
scenario_test_names=["sample_scenario_test"],
)
job.sleep_until_complete() # Not required. Will block and update on status of the job.
Args:
model_id: ID of model to evaluate
scenario_test_names: list of scenario test names of test to evaluate
Returns:
AsyncJob object of evaluation job
"""
response = self.connection.post(
{"test_names": scenario_test_names},
f"validate/{model_id}/evaluate",
)
return AsyncJob.from_json(response, self.connection)
def metrics(self, model_id: str):
response = self.connection.post(
{},
f"validate/{model_id}/metrics",
)
jobs = [AsyncJob.from_json(job, self.connection) for job in response]
return jobs
def create_external_eval_function(
self,
name: str,
level: EntityLevel = EntityLevel.ITEM,
) -> EvalFunctionEntry:
"""Creates a new external evaluation function. This external function can be used to upload evaluation
results with functions defined and computed by the customer, without having to share the source code of the
respective function.
Args:
name: unique name of evaluation function
level: level at which the eval function is run, defaults to EntityLevel.ITEM.
Raises:
- NucleusAPIError if the creation of the function fails on the server side
- ValidationError if the evaluation name is not well defined
Returns:
Created EvalFunctionConfig object.
"""
response = self.connection.post(
CreateEvalFunction(
name=name,
is_external_function=True,
serialized_fn=None,
raw_source=None,
level=level,
).dict(),
"validate/eval_fn",
)
return EvalFunctionEntry.parse_obj(response[EVAL_FUNCTION_KEY])
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/validate/client.py
| 0.938343 | 0.370425 |
client.py
|
pypi
|
from typing import Any, Dict, List, Optional
from pydantic import validator
from ...pydantic_base import ImmutableModel
from ..constants import ThresholdComparison
class EvaluationCriterion(ImmutableModel):
"""
An Evaluation Criterion is defined as an evaluation function, threshold, and comparator.
It describes how to apply an evaluation function
Notes:
To define the evaluation criteria for a scenario test we've created some syntactic sugar to make it look closer to an
actual function call, and we also hide away implementation details related to our data model that simply are not clear,
UX-wise.
Instead of defining criteria like this::
from nucleus.validate.data_transfer_objects.eval_function import (
EvaluationCriterion,
ThresholdComparison,
)
criteria = [
EvaluationCriterion(
eval_function_id="ef_c6m1khygqk400918ays0", # bbox_recall
threshold_comparison=ThresholdComparison.GREATER_THAN,
threshold=0.5,
),
]
we define it like this::
bbox_recall = client.validate.eval_functions.bbox_recall
criteria = [
bbox_recall() > 0.5
]
The chosen method allows us to document the available evaluation functions in an IDE friendly fashion and hides away
details like internal IDs (`"ef_...."`).
The actual `EvaluationCriterion` is created by overloading the comparison operators for the base class of an evaluation
function. Instead of the comparison returning a bool, we've made it create an `EvaluationCriterion` with the correct
signature to send over the wire to our API.
Parameters:
eval_function_id (str): ID of evaluation function
threshold_comparison (:class:`ThresholdComparison`): comparator for evaluation. i.e. threshold=0.5 and threshold_comparator > implies that a test only passes if score > 0.5.
threshold (float): numerical threshold that together with threshold comparison, defines success criteria for test evaluation.
eval_func_arguments: Arguments to pass to the eval function constructor
"""
# TODO: Having only eval_function_id hurts readability -> Add function name
eval_function_id: str
threshold_comparison: ThresholdComparison
threshold: float
eval_func_arguments: Dict[str, Any]
@validator("eval_function_id")
def valid_eval_function_id(cls, v): # pylint: disable=no-self-argument
if not v.startswith("ef_"):
raise ValueError(f"Expected field to start with 'ef_', got '{v}'")
return v
class EvalFunctionEntry(ImmutableModel):
"""Encapsulates information about an evaluation function for Model CI."""
id: str
name: str
is_public: bool
is_external_function: bool = False
user_id: str
serialized_fn: Optional[str] = None
raw_source: Optional[str] = None
class GetEvalFunctions(ImmutableModel):
"""Expected format from GET validate/eval_fn"""
eval_functions: List[EvalFunctionEntry]
class CreateEvalFunction(ImmutableModel):
"""Expected payload to POST validate/eval_fn"""
name: str
is_external_function: bool
serialized_fn: Optional[str] = None
raw_source: Optional[str] = None
level: Optional[str] = None
@validator("name")
def name_is_valid(cls, v): # pylint: disable=no-self-argument
if " " in v:
raise ValueError(
f"No spaces allowed in an evaluation function name, got '{v}'"
)
if len(v) == 0 or len(v) > 255:
raise ValueError(
"Name of evaluation function must be between 1-255 characters long"
)
return v
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/validate/data_transfer_objects/eval_function.py
| 0.825555 | 0.547101 |
eval_function.py
|
pypi
|
import abc
from typing import Any, Dict
from ..constants import ThresholdComparison
from ..data_transfer_objects.eval_function import (
EvalFunctionEntry,
EvaluationCriterion,
)
class EvalFunctionConfig(abc.ABC):
"""Abstract base class for concrete implementations of EvalFunctionsConfigs
Operating on this class with comparison operators produces an EvaluationCriterion
"""
def __init__(self, eval_func_entry: EvalFunctionEntry):
self.eval_func_entry = eval_func_entry
self.id = eval_func_entry.id
self.name = eval_func_entry.name
self.eval_func_arguments: Dict[str, Any] = {}
def __repr__(self):
return f"<EvalFunction: name={self.name}, id={self.id}>"
@classmethod
@abc.abstractmethod
def expected_name(cls) -> str:
"""Name to look for in the EvalFunctionDefinitions"""
def __call__(self, **kwargs) -> "EvalFunctionConfig":
"""Adding call to prepare for being able to pass parameters to function
Notes:
Technically now you could do something like eval_function > 0.5 but we want it
to look like eval_function() > 0.5 to support eval_function(parameters) > 0.5
in the future
"""
self.eval_func_arguments.update(**kwargs)
return self
def __gt__(self, other) -> EvaluationCriterion:
return self._op_to_test_metric(ThresholdComparison.GREATER_THAN, other)
def __ge__(self, other) -> EvaluationCriterion:
return self._op_to_test_metric(
ThresholdComparison.GREATER_THAN_EQUAL_TO, other
)
def __lt__(self, other) -> EvaluationCriterion:
return self._op_to_test_metric(ThresholdComparison.LESS_THAN, other)
def __le__(self, other) -> EvaluationCriterion:
return self._op_to_test_metric(
ThresholdComparison.LESS_THAN_EQUAL_TO, other
)
def _op_to_test_metric(self, comparison: ThresholdComparison, value):
return EvaluationCriterion(
eval_function_id=self.eval_func_entry.id,
threshold_comparison=comparison,
threshold=value,
eval_func_arguments=self.eval_func_arguments,
)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/validate/eval_functions/base_eval_function.py
| 0.857141 | 0.251452 |
base_eval_function.py
|
pypi
|
import itertools
from typing import Callable, Dict, List, Optional, Union
from nucleus.validate.eval_functions.base_eval_function import (
EvalFunctionConfig,
)
from ...metrics.filtering import ListOfAndFilters, ListOfOrAndFilters
from ..data_transfer_objects.eval_function import EvalFunctionEntry
from ..errors import EvalFunctionNotAvailableError
from .config_classes.segmentation import (
SegmentationFWAVACCConfig,
SegmentationIOUConfig,
SegmentationMAPConfig,
SegmentationPrecisionConfig,
SegmentationRecallConfig,
)
# TODO(gunnar) split up into modules
# pylint: disable=too-many-lines
class PolygonIOUConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`PolygonIOU` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_iou: BoundingBoxIOU = client.validate.eval_functions.poly_iou
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_iou(confidence_threshold=0.8) > 0.5]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "poly_iou"
class PolygonMAPConfig(EvalFunctionConfig):
def __call__(
self,
iou_threshold: float = 0.5,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`PolygonMAP` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_map: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.poly_map
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_map(iou_threshold=0.6) > 0.8]
)
Args:
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
iou_threshold=iou_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "poly_map"
class PolygonRecallConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`PolygonRecall` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_recall: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.poly_recall
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_recall(iou_threshold=0.6, confidence_threshold=0.4) > 0.9]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "poly_recall"
class PolygonPrecisionConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`PolygonPrecision` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_precision: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.poly_precision
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_precision(iou_threshold=0.6, confidence_threshold=0.4) > 0.9]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "poly_precision"
class SegmentationToPolyIOUConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`PolygonIOU` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_iou: BoundingBoxIOU = client.validate.eval_functions.poly_iou
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_iou(confidence_threshold=0.8) > 0.5]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "segmentation_to_poly_iou"
class SegmentationToPolyMAPConfig(EvalFunctionConfig):
def __call__(
self,
iou_thresholds: Union[List[float], str] = "coco",
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`PolygonMAP` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_map: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.poly_map
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_map(iou_threshold=0.6) > 0.8]
)
Args:
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
iou_thresholds=iou_thresholds,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "segmentation_to_poly_map"
class SegmentationToPolyRecallConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`PolygonRecall` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_recall: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.poly_recall
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_recall(iou_threshold=0.6, confidence_threshold=0.4) > 0.9]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "segmentation_to_poly_recall"
class SegmentationToPolyPrecisionConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`PolygonPrecision` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_precision: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.poly_precision
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_precision(iou_threshold=0.6, confidence_threshold=0.4) > 0.9]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "segmentation_to_poly_precision"
class SegmentationToPolyAveragePrecisionConfig(EvalFunctionConfig):
def __call__(
self,
label: str = "label",
iou_threshold: float = 0.5,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Initializes SegmentationToPolyAveragePrecision object.
Args:
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.5
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
label=label,
iou_threshold=iou_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "segmentation_to_poly_ap"
class BoundingBoxIOUConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`BoundingBoxIOU` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
bbox_iou: BoundingBoxIOU = client.validate.eval_functions.bbox_iou
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[bbox_iou(confidence_threshold=0.8) > 0.5]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "bbox_iou"
class BoundingBoxMAPConfig(EvalFunctionConfig):
def __call__(
self,
iou_threshold: float = 0.5,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`BoundingBoxMAP` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
bbox_map: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.bbox_map
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[bbox_map(iou_threshold=0.6) > 0.8]
)
Args:
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
iou_threshold=iou_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "bbox_map"
class BoundingBoxRecallConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`BoundingBoxRecall` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
bbox_recall: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.bbox_recall
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[bbox_recall(iou_threshold=0.6, confidence_threshold=0.4) > 0.9]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "bbox_recall"
class BoundingBoxPrecisionConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`BoundingBoxPrecision` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
bbox_precision: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.bbox_precision
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[bbox_precision(iou_threshold=0.6, confidence_threshold=0.4) > 0.9]
)
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "bbox_precision"
class CuboidIOU2DConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = True,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configure a call to CuboidIOU object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to True
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
iou_2d=True,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "cuboid_iou_2d"
class CuboidIOU3DConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = True,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configure a call to CuboidIOU object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to True
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
iou_2d: whether to return the BEV 2D IOU if true, or the 3D IOU if false.
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
iou_2d=False,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "cuboid_iou_3d"
class CuboidPrecisionConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = True,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configure a call to CuboidPrecision object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to True
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
iou_2d: whether to return the BEV 2D IOU if true, or the 3D IOU if false.
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "cuboid_precision"
class CuboidRecallConfig(EvalFunctionConfig):
def __call__(
self,
enforce_label_match: bool = True,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configure a call to a CuboidRecall object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to True
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
iou_2d: whether to return the BEV 2D IOU if true, or the 3D IOU if false.
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
enforce_label_match=enforce_label_match,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "cuboid_recall"
class CategorizationF1Config(EvalFunctionConfig):
def __call__(
self,
confidence_threshold: Optional[float] = None,
f1_method: Optional[str] = None,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
""" Configure an evaluation of :class:`CategorizationF1`.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
cat_f1: CategorizationF1 = client.validate.eval_functions.cat_f1
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[cat_f1(confidence_threshold=0.6, f1_method="weighted") > 0.7]
)
Args:
confidence_threshold: minimum confidence threshold for predictions to be taken into account for evaluation.
Must be in [0, 1]. Default 0.0
f1_method: {'micro', 'macro', 'samples','weighted', 'binary'}, \
default='macro'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
confidence_threshold=confidence_threshold,
f1_method=f1_method,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
)
@classmethod
def expected_name(cls) -> str:
return "cat_f1"
class CustomEvalFunction(EvalFunctionConfig):
@classmethod
def expected_name(cls) -> str:
raise NotImplementedError(
"Custom evaluation functions are coming soon"
) # Placeholder: See super().eval_func_entry for actual name
class ExternalEvalFunction(EvalFunctionConfig):
def __call__(self, **kwargs):
raise NotImplementedError("Cannot call an external function")
@classmethod
def expected_name(cls) -> str:
return "external_function"
class StandardEvalFunction(EvalFunctionConfig):
"""Class for standard Model CI eval functions that have not been added as attributes on
AvailableEvalFunctions yet.
"""
@classmethod
def expected_name(cls) -> str:
return "public_function" # Placeholder: See super().eval_func_entry for actual name
class EvalFunctionNotAvailable(EvalFunctionConfig):
def __init__(
self, not_available_name: str
): # pylint: disable=super-init-not-called
self.not_available_name = not_available_name
def __call__(self, *args, **kwargs):
self._raise_error()
def _op_to_test_metric(self, *args, **kwargs):
self._raise_error()
def _raise_error(self):
raise EvalFunctionNotAvailableError(
f"Eval function '{self.not_available_name}' is not available to the current user. "
f"Is Model CI enabled for the user?"
)
@classmethod
def expected_name(cls) -> str:
return "public_function" # Placeholder: See super().eval_func_entry for actual name
EvalFunction = Union[
BoundingBoxIOUConfig,
BoundingBoxMAPConfig,
BoundingBoxPrecisionConfig,
BoundingBoxRecallConfig,
CuboidRecallConfig,
CuboidIOU2DConfig,
CuboidIOU3DConfig,
CuboidPrecisionConfig,
CategorizationF1Config,
CustomEvalFunction,
ExternalEvalFunction,
EvalFunctionNotAvailable,
StandardEvalFunction,
PolygonMAPConfig,
PolygonIOUConfig,
PolygonRecallConfig,
PolygonPrecisionConfig,
SegmentationToPolyRecallConfig,
SegmentationToPolyIOUConfig,
SegmentationToPolyMAPConfig,
SegmentationToPolyPrecisionConfig,
SegmentationToPolyAveragePrecisionConfig,
SegmentationFWAVACCConfig,
SegmentationIOUConfig,
SegmentationPrecisionConfig,
SegmentationRecallConfig,
SegmentationMAPConfig,
]
class AvailableEvalFunctions:
"""Collection class that acts as a common entrypoint to access evaluation functions. Standard evaluation functions
provided by Scale are attributes of this class.
The available evaluation functions are listed in the sample below::
e = client.validate.eval_functions
unit_test_criteria = [
e.bbox_iou() > 5,
e.bbox_map() > 0.95,
e.bbox_precision() > 0.8,
e.bbox_recall() > 0.5,
]
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, available_functions: List[EvalFunctionEntry]):
assert (
available_functions
), "Passed no available functions for current user. Is the feature flag enabled?"
self._public_func_entries: Dict[str, EvalFunctionEntry] = {
f.name: f for f in available_functions if f.is_public
}
# NOTE: Public are assigned
self._public_to_function: Dict[str, EvalFunctionConfig] = {}
self._custom_to_function: Dict[str, CustomEvalFunction] = {
f.name: CustomEvalFunction(f)
for f in available_functions
if not f.is_public and not f.is_external_function
}
self._external_to_function: Dict[str, ExternalEvalFunction] = {
f.name: ExternalEvalFunction(f)
for f in available_functions
if f.is_external_function
}
self.bbox_iou: BoundingBoxIOUConfig = (
self._assign_eval_function_if_defined(BoundingBoxIOUConfig)
) # type: ignore
self.bbox_precision: BoundingBoxPrecisionConfig = self._assign_eval_function_if_defined(
BoundingBoxPrecisionConfig # type: ignore
)
self.bbox_recall: BoundingBoxRecallConfig = self._assign_eval_function_if_defined(
BoundingBoxRecallConfig # type: ignore
)
self.bbox_map: BoundingBoxMAPConfig = self._assign_eval_function_if_defined(
BoundingBoxMAPConfig # type: ignore
)
self.cat_f1: CategorizationF1Config = self._assign_eval_function_if_defined(
CategorizationF1Config # type: ignore
)
self.cuboid_iou_2d: CuboidIOU2DConfig = self._assign_eval_function_if_defined(CuboidIOU2DConfig) # type: ignore
self.cuboid_iou_3d: CuboidIOU3DConfig = self._assign_eval_function_if_defined(CuboidIOU3DConfig) # type: ignore
self.cuboid_precision: CuboidPrecisionConfig = (
self._assign_eval_function_if_defined(CuboidPrecisionConfig)
) # type: ignore
self.cuboid_recall: CuboidRecallConfig = (
self._assign_eval_function_if_defined(CuboidRecallConfig)
) # type: ignore
self.poly_iou: PolygonIOUConfig = self._assign_eval_function_if_defined(PolygonIOUConfig) # type: ignore
self.poly_precision: PolygonPrecisionConfig = self._assign_eval_function_if_defined(
PolygonPrecisionConfig # type: ignore
)
self.poly_recall: PolygonRecallConfig = self._assign_eval_function_if_defined(
PolygonRecallConfig # type: ignore
)
self.poly_map: PolygonMAPConfig = self._assign_eval_function_if_defined(
PolygonMAPConfig # type: ignore
)
self.segmentation_to_poly_iou: SegmentationToPolyIOUConfig = (
self._assign_eval_function_if_defined(SegmentationToPolyIOUConfig)
) # type: ignore
self.segmentation_to_poly_precision: SegmentationToPolyPrecisionConfig = self._assign_eval_function_if_defined(
SegmentationToPolyPrecisionConfig # type: ignore
)
self.segmentation_to_poly_recall: SegmentationToPolyRecallConfig = self._assign_eval_function_if_defined(
SegmentationToPolyRecallConfig # type: ignore
)
self.segmentation_to_poly_map: SegmentationToPolyMAPConfig = self._assign_eval_function_if_defined(
SegmentationToPolyMAPConfig # type: ignore
)
self.segmentation_to_poly_ap: SegmentationToPolyAveragePrecisionConfig = self._assign_eval_function_if_defined(
SegmentationToPolyAveragePrecisionConfig # type: ignore
)
self.seg_iou: SegmentationIOUConfig = self._assign_eval_function_if_defined(
SegmentationIOUConfig # type: ignore
)
self.seg_recall: SegmentationRecallConfig = self._assign_eval_function_if_defined(
SegmentationRecallConfig # type: ignore
)
self.seg_map: SegmentationMAPConfig = self._assign_eval_function_if_defined(
SegmentationMAPConfig # type: ignore
)
self.seg_precision: SegmentationPrecisionConfig = self._assign_eval_function_if_defined(
SegmentationPrecisionConfig # type: ignore
)
self.seg_fwavacc: SegmentationFWAVACCConfig = self._assign_eval_function_if_defined(
SegmentationFWAVACCConfig # type: ignore
)
# Add public entries that have not been implemented as an attribute on this class
for func_entry in self._public_func_entries.values():
if func_entry.name not in self._public_to_function:
self._public_to_function[
func_entry.name
] = StandardEvalFunction(func_entry)
def __repr__(self):
"""Standard functions are ones Scale provides and custom ones customer defined"""
# NOTE: setting to lower to be consistent with attribute names
functions_lower = [
str(name).lower() for name in self._public_func_entries.keys()
]
return (
f"<AvailableEvaluationFunctions: public: {functions_lower} "
f"private: {list(self._custom_to_function.keys())} "
f"external: {list(self._external_to_function.keys())}"
)
@property
def public_functions(self) -> Dict[str, EvalFunctionConfig]:
"""Standard functions provided by Model CI.
Notes:
These functions are also available as attributes on :class:`AvailableEvalFunctions`
Returns:
Dict of function name to :class:`BaseEvalFunction`.
"""
return self._public_to_function
@property
def private_functions(self) -> Dict[str, CustomEvalFunction]:
"""Private functions uploaded to Model CI
Returns:
Dict of function name to :class:`CustomEvalFunction`.
"""
return self._custom_to_function
@property
def external_functions(self) -> Dict[str, ExternalEvalFunction]:
"""External functions uploaded to Model CI
Returns:
Dict of function name to :class:`ExternalEvalFunction`.
"""
return self._external_to_function
def _assign_eval_function_if_defined(
self,
eval_function_constructor: Callable[[EvalFunctionEntry], EvalFunction],
):
"""Helper function for book-keeping and assignment of standard Scale provided functions that are accessible
via attribute access
"""
# TODO(gunnar): Too convoluted .. simplify
expected_name = eval_function_constructor.expected_name() # type: ignore
if expected_name in self._public_func_entries:
definition = self._public_func_entries[expected_name]
eval_function = eval_function_constructor(definition)
self._public_to_function[expected_name] = eval_function # type: ignore
return eval_function
else:
return EvalFunctionNotAvailable(expected_name)
def from_id(self, eval_function_id: str):
for eval_func in itertools.chain(
self._public_to_function.values(),
self._custom_to_function.values(),
self._external_to_function.values(),
):
if eval_func.id == eval_function_id:
return eval_func
raise EvalFunctionNotAvailableError(
f"Could not find Eval Function with id {eval_function_id}"
)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/validate/eval_functions/available_eval_functions.py
| 0.825976 | 0.407805 |
available_eval_functions.py
|
pypi
|
from typing import Optional, Union
from nucleus.validate.eval_functions.base_eval_function import (
EvalFunctionConfig,
)
from ....metrics.filtering import ListOfAndFilters, ListOfOrAndFilters
class SegmentationIOUConfig(EvalFunctionConfig):
def __call__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`SegmentationIOU` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_iou: BoundingBoxIOU = client.validate.eval_functions.seg_iou
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_iou(confidence_threshold=0.8) > 0.5]
)
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "seg_iou"
class SegmentationMAPConfig(EvalFunctionConfig):
def __call__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`SegmentationMAP` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
seg_map: SegmentationMAP= client.validate.eval_functions.seg_map
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[seg_map(iou_threshold=0.6) > 0.8]
)
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "seg_map"
class SegmentationRecallConfig(EvalFunctionConfig):
def __call__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`SegmentationRecall` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
seg_recall = client.validate.eval_functions.seg_recall
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[seg_recall(, confidence_threshold=0.4) > 0.9]
)
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "seg_recall"
class SegmentationPrecisionConfig(EvalFunctionConfig):
def __call__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Configures a call to :class:`SegmentationPrecision` object.
::
import nucleus
client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
poly_precision: BoundingBoxMeanAveragePrecision= client.validate.eval_functions.poly_precision
slice_id = "slc_<your_slice>"
scenario_test = client.validate.create_scenario_test(
"Example test",
slice_id=slice_id,
evaluation_criteria=[poly_precision(iou_threshold=0.6, confidence_threshold=0.4) > 0.9]
)
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "seg_precision"
class SegmentationFWAVACCConfig(EvalFunctionConfig):
def __call__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
**kwargs,
):
"""Initializes SegmentationFWAVACC object.
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
return super().__call__(
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
**kwargs,
)
@classmethod
def expected_name(cls) -> str:
return "seg_fwavacc"
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/validate/eval_functions/config_classes/segmentation.py
| 0.957596 | 0.479382 |
segmentation.py
|
pypi
|
import abc
from typing import List, Optional, Set, Tuple, Union
import numpy as np
from nucleus.annotation import AnnotationList, Segment, SegmentationAnnotation
from nucleus.metrics.base import MetricResult
from nucleus.metrics.filtering import ListOfAndFilters, ListOfOrAndFilters
from nucleus.prediction import PredictionList, SegmentationPrediction
from .base import Metric, ScalarResult
from .segmentation_loader import DummyLoader, SegmentationMaskLoader
from .segmentation_utils import (
FALSE_POSITIVES,
convert_to_instance_seg_confusion,
fast_confusion_matrix,
non_max_suppress_confusion,
setup_iou_thresholds,
)
# pylint: disable=useless-super-delegation
class SegmentationMaskMetric(Metric):
def __init__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
iou_threshold: float = 0.5,
):
"""Initializes PolygonMetric abstract object.
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
# TODO -> add custom filtering to Segmentation(Annotation|Prediction).annotations.(metadata|label)
super().__init__(annotation_filters, prediction_filters)
self.loader: SegmentationMaskLoader = DummyLoader()
self.iou_threshold = iou_threshold
def call_metric(
self, annotations: AnnotationList, predictions: PredictionList
) -> ScalarResult:
assert (
len(annotations.segmentation_annotations) <= 1
), f"Expected only one segmentation mask, got {annotations.segmentation_annotations}"
assert (
len(predictions.segmentation_predictions) <= 1
), f"Expected only one segmentation mask, got {predictions.segmentation_predictions}"
annotation = (
annotations.segmentation_annotations[0]
if annotations.segmentation_annotations
else None
)
prediction = (
predictions.segmentation_predictions[0]
if predictions.segmentation_predictions
else None
)
if (
annotation
and prediction
and annotation.annotations
and prediction.annotations
):
annotation_img = self.get_mask_channel(annotation)
pred_img = self.get_mask_channel(prediction)
return self._metric_impl(
np.asarray(annotation_img, dtype=np.int32),
np.asarray(pred_img, dtype=np.int32),
annotation,
prediction,
)
else:
return ScalarResult(0, weight=0)
def get_mask_channel(self, ann_or_pred):
"""Some annotations are stored as RGB instead of L (single-channel).
We expect the image to be faux-single-channel with all the channels repeating so we choose the first one.
"""
img = self.loader.fetch(ann_or_pred.mask_url)
if len(img.shape) > 2:
# TODO: Do we have to do anything more advanced? Currently expect all channels to have same data
min_dim = np.argmin(img.shape)
if min_dim == 0:
img = img[0, :, :]
elif min_dim == 1:
img = img[:, 0, :]
else:
img = img[:, :, 0]
return img
@abc.abstractmethod
def _metric_impl(
self,
annotation_img: "np.ndarray",
prediction_img: "np.ndarray",
annotation: SegmentationAnnotation,
prediction: SegmentationPrediction,
):
pass
def _calculate_confusion_matrix(
self,
annotation,
annotation_img,
prediction,
prediction_img,
iou_threshold,
) -> Tuple["np.ndarray", Set[int]]:
"""This calculates a confusion matrix with ground_truth_index X predicted_index summary
Notes:
If filtering has been applied we filter out missing segments from the confusion matrix.
Returns:
Class-based confusion matrix and a set of indexes that are not considered a part of the taxonomy (and are
only considered for FPs not as a part of mean calculations)
TODO(gunnar): Allow pre-seeding confusion matrix (all of the metrics calculate the same confusion matrix ->
we can calculate it once and then use it for all other metrics in the chain)
"""
# NOTE: This creates a max(class_index) * max(class_index) MAT. If we have np.int32 this could become
# huge. We could probably use a sparse matrix instead or change the logic to only create count(index) ** 2
# matrix (we only need to keep track of available indexes)
num_classes = (
max(
max((a.index for a in annotation.annotations)),
max((a.index for a in prediction.annotations)),
)
+ 1 # to include 0
)
confusion = fast_confusion_matrix(
annotation_img, prediction_img, num_classes
)
confusion = self._filter_confusion_matrix(
confusion, annotation, prediction
)
confusion = non_max_suppress_confusion(confusion, iou_threshold)
false_positive = Segment(FALSE_POSITIVES, index=confusion.shape[0] - 1)
if annotation.annotations[-1].label != FALSE_POSITIVES:
annotation.annotations.append(false_positive)
if annotation.annotations is not prediction.annotations:
# Probably likely that this structure is re-used -> check if same list instance and only append once
# TODO(gunnar): Should this uniqueness be handled by the base class?
prediction.annotations.append(false_positive)
# TODO(gunnar): Detect non_taxonomy classes for segmentation as well as instance segmentation
non_taxonomy_classes = set()
if self._is_instance_segmentation(annotation, prediction):
(
confusion,
_,
non_taxonomy_classes,
) = convert_to_instance_seg_confusion(
confusion, annotation, prediction
)
else:
ann_labels = list(
dict.fromkeys(s.label for s in annotation.annotations)
)
pred_labels = list(
dict.fromkeys(s.label for s in prediction.annotations)
)
missing_or_filtered_labels = set(ann_labels) - set(pred_labels)
non_taxonomy_classes = {
segment.index
for segment in annotation.annotations
if segment.label in missing_or_filtered_labels
}
return confusion, non_taxonomy_classes
def _is_instance_segmentation(self, annotation, prediction):
"""Guesses that we're dealing with instance segmentation if we have multiple segments with same label.
Degenerate case is same as semseg so fine to misclassify in that case.
"""
# This is a trick to get ordered sets
ann_labels = list(
dict.fromkeys(s.label for s in annotation.annotations)
)
pred_labels = list(
dict.fromkeys(s.label for s in prediction.annotations)
)
# NOTE: We assume instance segmentation if labels are duplicated in annotations or predictions
is_instance_segmentation = len(ann_labels) != len(
annotation.annotations
) or len(pred_labels) != len(prediction.annotations)
return is_instance_segmentation
def _filter_confusion_matrix(self, confusion, annotation, prediction):
if self.annotation_filters or self.prediction_filters:
new_confusion = np.zeros_like(confusion)
# we mask the confusion matrix instead of the images
if self.annotation_filters:
annotation_indexes = {
segment.index for segment in annotation.annotations
}
for row in annotation_indexes:
new_confusion[row, :] = confusion[row, :]
if self.prediction_filters:
prediction_indexes = {
segment.index for segment in prediction.annotations
}
for col in prediction_indexes:
new_confusion[:, col] = confusion[:, col]
confusion = new_confusion
return confusion
class SegmentationIOU(SegmentationMaskMetric):
def __init__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
iou_threshold: float = 0.5,
):
"""Initializes PolygonIOU object.
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
super().__init__(
annotation_filters,
prediction_filters,
iou_threshold,
)
def _metric_impl(
self,
annotation_img: "np.ndarray",
prediction_img: "np.ndarray",
annotation: SegmentationAnnotation,
prediction: SegmentationPrediction,
) -> ScalarResult:
confusion, non_taxonomy_classes = self._calculate_confusion_matrix(
annotation,
annotation_img,
prediction,
prediction_img,
self.iou_threshold,
)
with np.errstate(divide="ignore", invalid="ignore"):
tp = confusion[:-1, :-1]
fp = confusion[:, -1]
iou = np.diag(tp) / (
tp.sum(axis=1) + tp.sum(axis=0) + fp.sum() - np.diag(tp)
)
non_taxonomy_classes = non_taxonomy_classes - {
confusion.shape[1] - 1
}
iou.put(list(non_taxonomy_classes), np.nan)
mean_iou = np.nanmean(iou)
return ScalarResult(value=mean_iou, weight=annotation_img.size) # type: ignore
def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
return ScalarResult.aggregate(results) # type: ignore
class SegmentationPrecision(SegmentationMaskMetric):
def __init__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
iou_threshold: float = 0.5,
):
"""Calculates mean per-class precision
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
super().__init__(
annotation_filters,
prediction_filters,
iou_threshold,
)
def _metric_impl(
self,
annotation_img: "np.ndarray",
prediction_img: "np.ndarray",
annotation: SegmentationAnnotation,
prediction: SegmentationPrediction,
) -> ScalarResult:
confusion, non_taxonomy_classes = self._calculate_confusion_matrix(
annotation,
annotation_img,
prediction,
prediction_img,
self.iou_threshold,
)
with np.errstate(divide="ignore", invalid="ignore"):
# TODO(gunnar): Logic can be simplified
confused = confusion[:-1, :-1]
tp = confused.diagonal()
fp = confusion[:, -1][:-1] + confused.sum(axis=0) - tp
tp_and_fp = tp + fp
precision = tp / tp_and_fp
non_taxonomy_classes = non_taxonomy_classes - {
confusion.shape[1] - 1
}
precision.put(list(non_taxonomy_classes), np.nan)
avg_precision = np.nanmean(precision)
return ScalarResult(value=np.nan_to_num(avg_precision), weight=confusion.sum()) # type: ignore
def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
return ScalarResult.aggregate(results) # type: ignore
class SegmentationRecall(SegmentationMaskMetric):
"""Calculates the recall for a segmentation mask"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
iou_threshold: float = 0.5,
):
"""Initializes PolygonRecall object.
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
super().__init__(
annotation_filters,
prediction_filters,
iou_threshold,
)
def _metric_impl(
self,
annotation_img: "np.ndarray",
prediction_img: "np.ndarray",
annotation: SegmentationAnnotation,
prediction: SegmentationPrediction,
) -> ScalarResult:
confusion, non_taxonomy_classes = self._calculate_confusion_matrix(
annotation,
annotation_img,
prediction,
prediction_img,
self.iou_threshold,
)
with np.errstate(divide="ignore", invalid="ignore"):
recall = confusion.diagonal() / confusion.sum(axis=1)
recall.put(
list(non_taxonomy_classes), np.nan
) # We don't consider non taxonomy classes, i.e. FPs and background
mean_recall = np.nanmean(recall)
return ScalarResult(value=np.nan_to_num(mean_recall), weight=annotation_img.size) # type: ignore
def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
return ScalarResult.aggregate(results) # type: ignore
class SegmentationMAP(SegmentationMaskMetric):
"""Calculates the mean average precision per class for segmentation masks
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonMAP
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonMAP()
metric(annotations, predictions)
"""
iou_setups = {"coco"}
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
iou_thresholds: Union[List[float], str] = "coco",
):
"""Initializes PolygonRecall object.
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
map_thresholds: Provide a list of threshold to compute over or literal "coco"
"""
super().__init__(
annotation_filters,
prediction_filters,
)
self.iou_thresholds = setup_iou_thresholds(iou_thresholds)
def _metric_impl(
self,
annotation_img: "np.ndarray",
prediction_img: "np.ndarray",
annotation: SegmentationAnnotation,
prediction: SegmentationPrediction,
) -> ScalarResult:
ap_per_threshold = []
weight = 0
for iou_threshold in self.iou_thresholds:
ap = SegmentationPrecision(
self.annotation_filters, self.prediction_filters, iou_threshold
)
ap.loader = self.loader
ap_result = ap(
AnnotationList(segmentation_annotations=[annotation]),
PredictionList(segmentation_predictions=[prediction]),
)
ap_per_threshold.append(ap_result.value) # type: ignore
weight += ap_result.weight # type: ignore
thresholds = np.concatenate([[0], self.iou_thresholds, [1]])
steps = np.diff(thresholds)
mean_ap = (
np.array(ap_per_threshold + [ap_per_threshold[-1]]) * steps
).sum()
return ScalarResult(mean_ap, weight=weight)
def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
return ScalarResult.aggregate(results) # type: ignore
class SegmentationFWAVACC(SegmentationMaskMetric):
"""Calculates the frequency weighted average of the class-wise Jaccard index
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonRecall
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonRecall()
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
iou_threshold: float = 0.5,
):
"""Initializes SegmentationFWAVACC object.
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
super().__init__(
annotation_filters,
prediction_filters,
iou_threshold,
)
def _metric_impl(
self,
annotation_img: "np.ndarray",
prediction_img: "np.ndarray",
annotation: SegmentationAnnotation,
prediction: SegmentationPrediction,
) -> ScalarResult:
confusion, non_taxonomy_classes = self._calculate_confusion_matrix(
annotation,
annotation_img,
prediction,
prediction_img,
self.iou_threshold,
)
with np.errstate(divide="ignore", invalid="ignore"):
iu = np.diag(confusion) / (
confusion.sum(axis=1)
+ confusion.sum(axis=0)
- np.diag(confusion)
)
predicted_counts = confusion.sum(axis=0).astype(np.float_)
predicted_counts.put(list(non_taxonomy_classes), np.nan)
freq = predicted_counts / np.nansum(predicted_counts)
iu.put(list(non_taxonomy_classes), np.nan)
fwavacc = (
np.nan_to_num(freq[freq > 0]) * np.nan_to_num(iu[freq > 0])
).sum()
mean_fwavacc = np.nanmean(fwavacc)
return ScalarResult(value=np.nan_to_num(mean_fwavacc), weight=confusion.sum()) # type: ignore
def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
return ScalarResult.aggregate(results) # type: ignore
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/segmentation_metrics.py
| 0.73678 | 0.54583 |
segmentation_metrics.py
|
pypi
|
import sys
from abc import abstractmethod
from typing import List, Optional, Union
import numpy as np
from nucleus.annotation import AnnotationList, BoxAnnotation, PolygonAnnotation
from nucleus.prediction import BoxPrediction, PolygonPrediction, PredictionList
from .base import Metric, ScalarResult
from .custom_types import BoxOrPolygonAnnotation, BoxOrPolygonPrediction
from .filtering import ListOfAndFilters, ListOfOrAndFilters
from .filters import confidence_filter, polygon_label_filter
from .metric_utils import compute_average_precision
from .polygon_utils import (
get_true_false_positives_confidences,
group_boxes_or_polygons_by_label,
iou_assignments,
label_match_wrapper,
num_true_positives,
)
class PolygonMetric(Metric):
"""Abstract class for metrics of box and polygons.
The PolygonMetric class automatically filters incoming annotations and
predictions for only box and polygon annotations. It also filters
predictions whose confidence is less than the provided confidence_threshold.
Finally, it provides support for enforcing matching labels. If
`enforce_label_match` is set to True, then annotations and predictions will
only be matched if they have the same label.
To create a new concrete PolygonMetric, override the `eval` function
with logic to define a metric between box/polygon annotations and predictions.
::
from typing import List
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import ScalarResult, PolygonMetric
from nucleus.metrics.polygon_utils import BoxOrPolygonAnnotation, BoxOrPolygonPrediction
class MyPolygonMetric(PolygonMetric):
def eval(
self,
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> ScalarResult:
value = (len(annotations) - len(predictions)) ** 2
weight = len(annotations)
return ScalarResult(value, weight)
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = MyPolygonMetric()
metric(annotations, predictions)
"""
def __init__(
self,
enforce_label_match: bool = False,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes PolygonMetric abstract object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Default False
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
super().__init__(annotation_filters, prediction_filters)
self.enforce_label_match = enforce_label_match
assert 0 <= confidence_threshold <= 1
self.confidence_threshold = confidence_threshold
@abstractmethod
def eval(
self,
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> ScalarResult:
# Main evaluation function that subclasses must override.
pass
def aggregate_score(self, results: List[ScalarResult]) -> ScalarResult: # type: ignore[override]
return ScalarResult.aggregate(results)
def call_metric(
self, annotations: AnnotationList, predictions: PredictionList
) -> ScalarResult:
if self.confidence_threshold > 0:
predictions = confidence_filter(
predictions, self.confidence_threshold
)
polygon_annotations: List[Union[BoxAnnotation, PolygonAnnotation]] = []
polygon_annotations.extend(annotations.box_annotations)
polygon_annotations.extend(annotations.polygon_annotations)
polygon_predictions: List[Union[BoxPrediction, PolygonPrediction]] = []
polygon_predictions.extend(predictions.box_predictions)
polygon_predictions.extend(predictions.polygon_predictions)
eval_fn = label_match_wrapper(self.eval)
result = eval_fn(
polygon_annotations,
polygon_predictions,
enforce_label_match=self.enforce_label_match,
)
return result
class PolygonIOU(PolygonMetric):
"""Calculates the average IOU between box or polygon annotations and predictions.
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonIOU
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonIOU()
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes PolygonIOU object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
super().__init__(
enforce_label_match,
confidence_threshold,
annotation_filters,
prediction_filters,
)
def eval(
self,
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> ScalarResult:
iou_assigns = iou_assignments(
annotations, predictions, self.iou_threshold
)
weight = max(len(annotations), len(predictions))
avg_iou = iou_assigns.sum() / max(weight, sys.float_info.epsilon)
return ScalarResult(avg_iou, weight)
class PolygonPrecision(PolygonMetric):
"""Calculates the precision between box or polygon annotations and predictions.
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonPrecision
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonPrecision()
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes PolygonPrecision object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.5
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
super().__init__(
enforce_label_match,
confidence_threshold,
annotation_filters,
prediction_filters,
)
def eval(
self,
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> ScalarResult:
true_positives = num_true_positives(
annotations, predictions, self.iou_threshold
)
weight = len(predictions)
return ScalarResult(
true_positives / max(weight, sys.float_info.epsilon), weight
)
class PolygonRecall(PolygonMetric):
"""Calculates the recall between box or polygon annotations and predictions.
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonRecall
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonRecall()
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes PolygonRecall object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.5
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
super().__init__(
enforce_label_match,
confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
)
def eval(
self,
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> ScalarResult:
true_positives = num_true_positives(
annotations, predictions, self.iou_threshold
)
weight = len(annotations) + sys.float_info.epsilon
return ScalarResult(
true_positives / max(weight, sys.float_info.epsilon), weight
)
class PolygonAveragePrecision(PolygonMetric):
"""Calculates the average precision between box or polygon annotations and predictions.
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonAveragePrecision
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonAveragePrecision(label="car")
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
label,
iou_threshold: float = 0.5,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes PolygonRecall object.
Args:
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.5
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
self.label = label
super().__init__(
enforce_label_match=False,
confidence_threshold=0,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
)
def eval(
self,
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> ScalarResult:
annotations_filtered = polygon_label_filter(annotations, self.label)
predictions_filtered = polygon_label_filter(predictions, self.label)
(
true_false_positives,
confidences,
) = get_true_false_positives_confidences(
annotations_filtered, predictions_filtered, self.iou_threshold
)
if np.all(confidences):
idxes = np.argsort(-confidences)
true_false_positives_sorted = true_false_positives[idxes]
else:
true_false_positives_sorted = true_false_positives
cumulative_true_positives = np.cumsum(true_false_positives_sorted)
total_predictions = np.arange(1, len(true_false_positives) + 1)
precisions = cumulative_true_positives / total_predictions
recalls = cumulative_true_positives / len(annotations)
average_precision = compute_average_precision(precisions, recalls)
weight = 1
return ScalarResult(average_precision, weight)
class PolygonMAP(PolygonMetric):
"""Calculates the mean average precision between box or polygon annotations and predictions.
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonMAP
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonMAP()
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
iou_threshold: float = 0.5,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes PolygonRecall object.
Args:
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.5
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
super().__init__(
enforce_label_match=False,
confidence_threshold=0,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
)
def eval(
self,
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> ScalarResult:
grouped_inputs = group_boxes_or_polygons_by_label(
annotations, predictions
)
results: List[ScalarResult] = []
for label, group in grouped_inputs.items():
annotations_group, predictions_group = group
metric = PolygonAveragePrecision(label)
result = metric.eval(annotations_group, predictions_group)
results.append(result)
average_result = ScalarResult.aggregate(results)
return average_result
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/polygon_metrics.py
| 0.862279 | 0.474692 |
polygon_metrics.py
|
pypi
|
import logging
import sys
from functools import wraps
from typing import TYPE_CHECKING, Dict, List, Tuple
from nucleus.annotation import BoxAnnotation, PolygonAnnotation
from .base import ScalarResult
from .custom_types import BoxOrPolygonAnnotation, BoxOrPolygonPrediction
from .errors import PolygonAnnotationTypeError
if TYPE_CHECKING:
try:
from shapely.geometry import Polygon
except (ModuleNotFoundError, OSError):
from ..package_not_installed import PackageNotInstalled
Polygon = PackageNotInstalled
import numpy as np
def polygon_annotation_to_shape(
annotation: BoxOrPolygonAnnotation,
) -> "Polygon":
try:
from shapely.geometry import ( # pylint: disable=redefined-outer-name
Polygon,
)
except (ModuleNotFoundError, OSError):
from ..package_not_installed import ( # pylint: disable=redefined-outer-name
PackageNotInstalled,
)
Polygon = PackageNotInstalled
if isinstance(annotation, BoxAnnotation):
xmin = annotation.x - annotation.width / 2
xmax = annotation.x + annotation.width / 2
ymin = annotation.y - annotation.height / 2
ymax = annotation.y + annotation.height / 2
return Polygon(
[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
)
elif isinstance(annotation, PolygonAnnotation):
return Polygon([(point.x, point.y) for point in annotation.vertices])
else:
raise PolygonAnnotationTypeError()
def _iou(annotation: "Polygon", prediction: "Polygon") -> float:
intersection = annotation.intersection(prediction).area
union = annotation.area + prediction.area - intersection
return intersection / max(union, sys.float_info.epsilon)
def _iou_matrix(
annotations: List["Polygon"], predictions: List["Polygon"]
) -> "np.ndarray":
import numpy as np
iou_matrix = np.empty((len(predictions), len(annotations)))
for i, prediction in enumerate(predictions):
for j, annotation in enumerate(annotations):
iou_matrix[i, j] = _iou(annotation, prediction)
return iou_matrix
def _iou_assignments_for_same_reference_id(
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
iou_threshold: float,
) -> Tuple["np.ndarray", "np.ndarray", "np.ndarray"]:
# NOTE: Late imports to speed up CLI invocation
import numpy as np
from scipy.optimize import linear_sum_assignment
# Matches annotations and precitions of the same reference ID.
# Returns a tuple of the list of all IoU values of valid assignments, a
# list of the indices of predictions matched to annotations (-1 if
# unmatched), and a list of all indices of annotations matched to
# predictions.
# Check that all annotations and predictions have same reference ID.
reference_ids = set(annotation.reference_id for annotation in annotations)
reference_ids |= set(prediction.reference_id for prediction in predictions)
assert (
len(reference_ids) <= 1
), "Expected annotations and predictions to have same reference ID."
# Convert annotation and predictions to shapely.geometry.Polygon objects
polygon_annotations = list(map(polygon_annotation_to_shape, annotations))
polygon_predictions = list(map(polygon_annotation_to_shape, predictions))
invalid_anns = [
ann
for ann, poly in zip(annotations, polygon_annotations)
if not poly.is_valid
]
invalid_preds = [
pred
for pred, poly in zip(predictions, polygon_predictions)
if not poly.is_valid
]
if invalid_anns or invalid_preds:
# Filter out invalid polys
polygon_annotations = [
poly
for ann, poly in zip(annotations, polygon_annotations)
if poly.is_valid
]
polygon_predictions = [
poly
for pred, poly in zip(predictions, polygon_predictions)
if poly.is_valid
]
invalid_dataset_ids = set(
ann.reference_id for ann in invalid_anns
).union(set(pred.reference_id for pred in invalid_preds))
# TODO(gunnar): change to .id once field is surfaced)
logging.warning(
"Invalid polygons for dataset items: %s Annotations:%s, predictions: %s",
invalid_dataset_ids,
[a.annotation_id for a in invalid_anns],
[p.annotation_id for p in invalid_preds],
)
# Compute IoU matrix and set IoU values below the threshold to 0.
iou_matrix = _iou_matrix(polygon_annotations, polygon_predictions)
iou_matrix[iou_matrix < iou_threshold] = 0
# Match annotations and predictions using linear sum assignment and filter out
# values below the threshold.
matched_0, matched_1 = linear_sum_assignment(-iou_matrix)
iou_assigns = iou_matrix[matched_0, matched_1]
valid_idxes = iou_assigns >= iou_threshold
iou_assigns = iou_assigns[valid_idxes]
matched_0 = matched_0[valid_idxes]
matched_1 = matched_1[valid_idxes]
anno_to_pred = -np.ones(len(annotations))
pred_to_anno = -np.ones(len(predictions))
anno_to_pred[matched_1] = matched_0
pred_to_anno[matched_0] = matched_1
return iou_assigns, anno_to_pred, pred_to_anno
def group_boxes_or_polygons_by_reference_id(
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> Dict[
str, Tuple[List[BoxOrPolygonAnnotation], List[BoxOrPolygonPrediction]]
]:
"""Groups input annotations and predictions by reference_id.
Args:
annotations: list of input annotations
predictions: list of input predictions
Returns:
Mapping from each reference_id to (annotations, predictions) tuple.
"""
reference_ids = set(annotation.reference_id for annotation in annotations)
reference_ids |= set(prediction.reference_id for prediction in predictions)
grouped: Dict[
str, Tuple[List[BoxOrPolygonAnnotation], List[BoxOrPolygonPrediction]]
] = {reference_id: ([], []) for reference_id in reference_ids}
for annotation in annotations:
grouped[annotation.reference_id][0].append(annotation)
for prediction in predictions:
grouped[prediction.reference_id][1].append(prediction)
return grouped
def group_boxes_or_polygons_by_label(
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
) -> Dict[
str, Tuple[List[BoxOrPolygonAnnotation], List[BoxOrPolygonPrediction]]
]:
"""Groups input annotations and predictions by label.
Args:
annotations: list of input box or polygon annotations
predictions: list of input box or polygon predictions
Returns:
Mapping from each label to (annotations, predictions) tuple
"""
labels = set(annotation.label for annotation in annotations)
labels |= set(prediction.label for prediction in predictions)
grouped: Dict[
str, Tuple[List[BoxOrPolygonAnnotation], List[BoxOrPolygonPrediction]]
] = {label: ([], []) for label in labels}
for annotation in annotations:
grouped[annotation.label][0].append(annotation)
for prediction in predictions:
grouped[prediction.label][1].append(prediction)
return grouped
def iou_assignments(
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
iou_threshold: float,
) -> "np.ndarray":
"""Matches annotations and predictions based on linear sum cost and returns the
intersection-over-union values of the matched annotation-prediction pairs, subject
to the specified IoU threshold. Note that annotations and predictions from
different reference_ids will not be matched with one another.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
Args:
annotations: list of box or polygon annotations
predictions: list of box or polygon predictions
iou_threshold: the intersection-over-union threshold for an
annotation-prediction pair to be considered a match.
Returns:
1D numpy array that contains the IoU values of the matched pairs.
"""
import numpy as np
grouped_inputs = group_boxes_or_polygons_by_reference_id(
annotations, predictions
)
iou_assigns = []
for grouped_annotations, grouped_predictions in grouped_inputs.values():
result_per_reference_id, _, _ = _iou_assignments_for_same_reference_id(
grouped_annotations, grouped_predictions, iou_threshold
)
iou_assigns.append(result_per_reference_id)
return np.concatenate(iou_assigns)
def get_true_false_positives_confidences(
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
iou_threshold: float,
) -> Tuple["np.ndarray", "np.ndarray"]:
"""Matches annotations and predictions based on linear sum cost and returns the
intersection-over-union values of the matched annotation-prediction pairs, subject
to the specified IoU threshold. Note that annotations and predictions from
different reference_ids will not be matched with one another.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
Args:
annotations: list of box or polygon annotations
predictions: list of box or polygon predictions
iou_threshold: the intersection-over-union threshold for an
annotation-prediction pair to be considered a match.
Returns:
1D numpy array that contains the 1 if true positive and 0 if false positive
for each prediction.
1D numpy array of confidence values.
"""
import numpy as np
grouped_inputs = group_boxes_or_polygons_by_reference_id(
annotations, predictions
)
true_false_positives = []
confidences = []
for grouped_annotations, grouped_predictions in grouped_inputs.values():
_, _, pred_to_anno = _iou_assignments_for_same_reference_id(
grouped_annotations, grouped_predictions, iou_threshold
)
true_false_positives.append(pred_to_anno > -1)
confidences.extend([pred.confidence for pred in grouped_predictions])
return np.concatenate(true_false_positives), np.array(confidences)
def num_true_positives(
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
iou_threshold: float,
) -> int:
"""Counts the number of annotations with a matching prediction.
A prediction is considered a match for an annotation if it has not yet been
matched to another annotation, its reference_id is the same as the
annotation, and its IoU with the annotation is at least the iou_threshold.
Args:
annotations: list of box or polygon annotations
predictions: list of box or polygon predictions
iou_threshold: the intersection-over-union threshold for an
annotation-prediction pair to be considered a match.
Returns:
The number of true positives (predictions that are matched to annotations).
"""
iou_assigns = iou_assignments(annotations, predictions, iou_threshold)
true_positives = len(iou_assigns)
return true_positives
def label_match_wrapper(metric_fn):
"""Decorator to add the ability to only apply metric to annotations and
predictions with matching labels.
Args:
metric_fn: Metric function that takes a list of annotations, a list
of predictions, and optional args and kwargs.
Returns:
Metric function which can optionally enforce matching labels.
"""
@wraps(metric_fn)
def wrapper(
annotations: List[BoxOrPolygonAnnotation],
predictions: List[BoxOrPolygonPrediction],
*args,
enforce_label_match: bool = False,
**kwargs,
) -> ScalarResult:
# Simply return the metric if we are not enforcing label matches.
if not enforce_label_match:
return metric_fn(annotations, predictions, *args, **kwargs)
# For each bin of annotations/predictions, compute the metric applied
# only to that bin. Then aggregate results across all bins.
grouped_inputs = group_boxes_or_polygons_by_label(
annotations, predictions
)
metric_results = []
for binned_annotations, binned_predictions in grouped_inputs.values():
metric_result = metric_fn(
binned_annotations, binned_predictions, *args, **kwargs
)
metric_results.append(metric_result)
assert all(
isinstance(r, ScalarResult) for r in metric_results
), "Expected every result to be a ScalarResult"
return ScalarResult.aggregate(metric_results)
return wrapper
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/polygon_utils.py
| 0.655557 | 0.553385 |
polygon_utils.py
|
pypi
|
import sys
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Iterable, List, Optional, Union
from nucleus.annotation import AnnotationList
from nucleus.metrics.errors import EverythingFilteredError
from nucleus.metrics.filtering import (
ListOfAndFilters,
ListOfOrAndFilters,
compose_helpful_filtering_error,
filter_annotation_list,
filter_prediction_list,
)
from nucleus.prediction import PredictionList
class MetricResult(ABC):
"""Base MetricResult class"""
@dataclass
class ScalarResult(MetricResult):
"""A scalar result contains the value of an evaluation, as well as its weight.
The weight is useful when aggregating metrics where each dataset item may hold a
different relative weight. For example, when calculating precision over a dataset,
the denominator of the precision is the number of annotations, and therefore the weight
can be set as the number of annotations.
Attributes:
value (float): The value of the evaluation result
weight (float): The weight of the evaluation result.
"""
value: float
weight: float = 1.0
@staticmethod
def aggregate(results: Iterable["ScalarResult"]) -> "ScalarResult":
"""Aggregates results using a weighted average."""
results = list(filter(lambda x: x.weight != 0, results))
total_weight = sum([result.weight for result in results])
total_value = sum([result.value * result.weight for result in results])
value = total_value / max(total_weight, sys.float_info.epsilon)
return ScalarResult(value, total_weight)
class Metric(ABC):
"""Abstract class for defining a metric, which takes a list of annotations
and predictions and returns a scalar.
To create a new concrete Metric, override the `__call__` function
with logic to define a metric between annotations and predictions. ::
from nucleus import BoxAnnotation, CuboidPrediction, Point3D
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import Metric, MetricResult
from nucleus.metrics.polygon_utils import BoxOrPolygonAnnotation, BoxOrPolygonPrediction
class MyMetric(Metric):
def __call__(
self, annotations: AnnotationList, predictions: PredictionList
) -> MetricResult:
value = (len(annotations) - len(predictions)) ** 2
weight = len(annotations)
return MetricResult(value, weight)
box = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
cuboid = CuboidPrediction(
label="car",
position=Point3D(100, 100, 10),
dimensions=Point3D(5, 10, 5),
yaw=0,
reference_id="pointcloud_1",
confidence=0.8,
annotation_id="pointcloud_1_car_cuboid_1",
metadata={"vehicle_color": "green"}
)
metric = MyMetric()
annotations = AnnotationList(box_annotations=[box])
predictions = PredictionList(cuboid_predictions=[cuboid])
metric(annotations, predictions)
"""
def __init__(
self,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""
Args:
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
self.annotation_filters = annotation_filters
self.prediction_filters = prediction_filters
@abstractmethod
def call_metric(
self, annotations: AnnotationList, predictions: PredictionList
) -> MetricResult:
"""A metric must override this method and return a metric result, given annotations and predictions."""
def __call__(
self, annotations: AnnotationList, predictions: PredictionList
) -> MetricResult:
filtered_anns = filter_annotation_list(
annotations, self.annotation_filters
)
filtered_preds = filter_prediction_list(
predictions, self.prediction_filters
)
self._raise_if_everything_filtered(
annotations, filtered_anns, predictions, filtered_preds
)
return self.call_metric(filtered_anns, filtered_preds)
@abstractmethod
def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
"""A metric must define how to aggregate results from single items to a single ScalarResult.
E.g. to calculate a R2 score with sklearn you could define a custom metric class ::
class R2Result(MetricResult):
y_true: float
y_pred: float
And then define an aggregate_score ::
def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
y_trues = []
y_preds = []
for result in results:
y_true.append(result.y_true)
y_preds.append(result.y_pred)
r2_score = sklearn.metrics.r2_score(y_trues, y_preds)
return ScalarResult(r2_score)
"""
def _raise_if_everything_filtered(
self,
annotations: AnnotationList,
filtered_annotations: AnnotationList,
predictions: PredictionList,
filtered_predictions: PredictionList,
):
msg = []
if len(filtered_annotations) == 0:
msg.extend(
compose_helpful_filtering_error(
annotations, self.annotation_filters
)
)
if len(filtered_predictions) == 0:
msg.extend(
compose_helpful_filtering_error(
predictions, self.prediction_filters
)
)
if msg:
raise EverythingFilteredError("\n".join(msg))
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/base.py
| 0.861989 | 0.519034 |
base.py
|
pypi
|
from functools import wraps
from typing import Dict, List, Tuple
import numpy as np
try:
from shapely.geometry import Polygon
except (ModuleNotFoundError, OSError):
from ..package_not_installed import PackageNotInstalled
Polygon = PackageNotInstalled
from nucleus.annotation import CuboidAnnotation
from nucleus.prediction import CuboidPrediction
from .base import ScalarResult
def group_cuboids_by_label(
annotations: List[CuboidAnnotation],
predictions: List[CuboidPrediction],
) -> Dict[str, Tuple[List[CuboidAnnotation], List[CuboidPrediction]]]:
"""Groups input annotations and predictions by label.
Args:
annotations: list of input cuboid annotations
predictions: list of input cuboid predictions
Returns:
Mapping from each label to (annotations, predictions) tuple
"""
labels = set(annotation.label for annotation in annotations)
labels |= set(prediction.label for prediction in predictions)
grouped: Dict[
str, Tuple[List[CuboidAnnotation], List[CuboidPrediction]]
] = {label: ([], []) for label in labels}
for annotation in annotations:
grouped[annotation.label][0].append(annotation)
for prediction in predictions:
grouped[prediction.label][1].append(prediction)
return grouped
def label_match_wrapper(metric_fn):
"""Decorator to add the ability to only apply metric to annotations and
predictions with matching labels.
Args:
metric_fn: Metric function that takes a list of annotations, a list
of predictions, and optional args and kwargs.
Returns:
Metric function which can optionally enforce matching labels.
"""
@wraps(metric_fn)
def wrapper(
annotations: List[CuboidAnnotation],
predictions: List[CuboidPrediction],
*args,
enforce_label_match: bool = False,
**kwargs,
) -> ScalarResult:
# Simply return the metric if we are not enforcing label matches.
if not enforce_label_match:
return metric_fn(annotations, predictions, *args, **kwargs)
# For each bin of annotations/predictions, compute the metric applied
# only to that bin. Then aggregate results across all bins.
grouped_inputs = group_cuboids_by_label(annotations, predictions)
metric_results = []
for binned_annotations, binned_predictions in grouped_inputs.values():
metric_result = metric_fn(
binned_annotations, binned_predictions, *args, **kwargs
)
metric_results.append(metric_result)
assert all(
isinstance(r, ScalarResult) for r in metric_results
), "Expected every result to be a ScalarResult"
return ScalarResult.aggregate(metric_results)
return wrapper
def process_dataitem(dataitem):
processed_item = {}
processed_item["xyz"] = np.array(
[[ann.position.x, ann.position.y, ann.position.z] for ann in dataitem]
)
processed_item["wlh"] = np.array(
[
[ann.dimensions.x, ann.dimensions.y, ann.dimensions.z]
for ann in dataitem
]
)
processed_item["yaw"] = np.array([ann.yaw for ann in dataitem])
return processed_item
def compute_outer_iou(
xyz_0: "np.ndarray",
wlh_0: "np.ndarray",
yaw_0: "np.ndarray",
xyz_1: "np.ndarray",
wlh_1: "np.ndarray",
yaw_1: "np.ndarray",
scale_convention: bool = True,
distance_threshold=25,
) -> Tuple["np.ndarray", "np.ndarray"]:
"""
Computes outer 3D and 2D IoU
:param xyz_0: (n, 3)
:param wlh_0: (n, 3)
:param yaw_0: (n,)
:param xyz_1: (m, 3)
:param wlh_1: (m, 3)
:param yaw_1: (m,)
:param scale_convention: flag whether the internal Scale convention is used (have to be adjusted by pi/2)
:param distance_threshold: computes iou only within this distance (~3x speedup)
:return: (n, m) 3D IoU, (n, m) 2D IoU
"""
bottom_z = np.maximum.outer(
xyz_0[:, 2] - (wlh_0[:, 2] / 2), xyz_1[:, 2] - (wlh_1[:, 2] / 2)
)
top_z = np.minimum.outer(
xyz_0[:, 2] + (wlh_0[:, 2] / 2), xyz_1[:, 2] + (wlh_1[:, 2] / 2)
)
height_intersection = np.maximum(0, top_z - bottom_z)
cuboid_corners_0 = get_batch_cuboid_corners(
xyz_0, wlh_0, yaw_0, scale_convention=scale_convention
)
cuboid_corners_1 = get_batch_cuboid_corners(
xyz_1, wlh_1, yaw_1, scale_convention=scale_convention
)
polygons_1 = [
Polygon(corners_1[[1, 0, 4, 5, 1], :2])
for corners_1 in cuboid_corners_1
]
area_intersection = np.zeros(
(cuboid_corners_0.shape[0], cuboid_corners_1.shape[0]),
dtype=np.float32,
)
if cuboid_corners_0.shape[0] != 0 and cuboid_corners_1.shape[0] != 0:
distance_mask = (
np.linalg.norm(
xyz_0[:, np.newaxis, :] - xyz_1[np.newaxis, :, :], axis=2
)
< distance_threshold
)
for i, corners_0 in enumerate(cuboid_corners_0):
for j, polygon_1 in enumerate(polygons_1):
if distance_mask[i, j]:
area_intersection[i, j] = (
Polygon(corners_0[[1, 0, 4, 5, 1], :2])
.intersection(polygon_1)
.area
)
intersection = height_intersection * area_intersection
area_0 = wlh_0[:, 0] * wlh_0[:, 1]
area_1 = wlh_1[:, 0] * wlh_1[:, 1]
union_2d = np.add.outer(area_0, area_1) - area_intersection
volume_0 = area_0 * wlh_0[:, 2]
volume_1 = area_1 * wlh_1[:, 2]
union = np.add.outer(volume_0, volume_1) - intersection
return intersection / union, area_intersection / union_2d
def get_batch_cuboid_corners(
xyz: "np.ndarray",
wlh: "np.ndarray",
yaw: "np.ndarray",
pitch: "np.ndarray" = None,
roll: "np.ndarray" = None,
scale_convention: bool = True,
) -> "np.ndarray":
"""
Vectorized batch version of get_cuboid_corners
:param xyz: (n, 3)
:param wlh: (n, 3)
:param yaw: (n,)
:param pitch: (n,)
:param roll: (n,)
:param scale_convention: flag whether the internal Scale convention is used (have to be adjusted by pi/2)
:return: (n, 8, 3)
"""
if scale_convention:
yaw = yaw.copy() + np.pi / 2
w, l, h = wlh[:, 0, None], wlh[:, 1, None], wlh[:, 2, None]
x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners = np.stack((x_corners, y_corners, z_corners), axis=1)
rot_mats = get_batch_rotation_matrices(yaw, pitch, roll)
corners = np.matmul(rot_mats, corners)
x, y, z = xyz[:, 0, None], xyz[:, 1, None], xyz[:, 2, None]
corners[:, 0, :] = corners[:, 0, :] + x
corners[:, 1, :] = corners[:, 1, :] + y
corners[:, 2, :] = corners[:, 2, :] + z
return corners.swapaxes(1, 2)
def get_batch_rotation_matrices(
yaw: "np.ndarray", pitch: "np.ndarray" = None, roll: "np.ndarray" = None
) -> "np.ndarray":
if pitch is None:
pitch = np.zeros_like(yaw)
if roll is None:
roll = np.zeros_like(yaw)
cy = np.cos(yaw)
sy = np.sin(yaw)
cp = np.cos(pitch)
sp = np.sin(pitch)
cr = np.cos(roll)
sr = np.sin(roll)
return np.stack(
(
np.stack(
(cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr), 1
),
np.stack(
(sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr), 1
),
np.stack((-sp, cp * sr, cp * cr), 1),
),
1,
)
def associate_cuboids_on_iou(
xyz_0: "np.ndarray",
wlh_0: "np.ndarray",
yaw_0: "np.ndarray",
xyz_1: "np.ndarray",
wlh_1: "np.ndarray",
yaw_1: "np.ndarray",
threshold_in_overlap_ratio: float = 0.1,
use_2d_iou: bool = False,
) -> List[Tuple[int, int]]:
if xyz_0.shape[0] < 1 or xyz_1.shape[0] < 1:
return []
iou_3d, iou_2d = compute_outer_iou(xyz_0, wlh_0, yaw_0, xyz_1, wlh_1, yaw_1)
iou = iou_2d if use_2d_iou else iou_3d
mapping = []
for i, m in enumerate(iou.max(axis=1)):
if m >= threshold_in_overlap_ratio:
mapping.append((i, iou[i].argmax()))
return mapping
def recall_precision(
prediction: List[CuboidPrediction],
groundtruth: List[CuboidAnnotation],
threshold_in_overlap_ratio: float,
use_2d_iou: bool = False,
) -> Dict[str, float]:
"""
Calculates the precision and recall of each lidar frame.
Args:
:param predictions: list of cuboid annotation predictions.
:param ground_truth: list of cuboid annotation groundtruths.
:param threshold: IOU threshold to consider detection as valid. Must be in [0, 1].
:param use_2d_iou: flag whether to use 2d or 3d iou for evaluation.
"""
tp_sum = 0
fp_sum = 0
fn_sum = 0
num_predicted = 0
num_instances = 0
gt_items = process_dataitem(groundtruth)
pred_items = process_dataitem(prediction)
num_predicted += pred_items["xyz"].shape[0]
num_instances += gt_items["xyz"].shape[0]
tp = np.zeros(pred_items["xyz"].shape[0])
fp = np.ones(pred_items["xyz"].shape[0])
fn = np.ones(gt_items["xyz"].shape[0])
mapping = associate_cuboids_on_iou(
pred_items["xyz"],
pred_items["wlh"],
pred_items["yaw"] + np.pi / 2,
gt_items["xyz"],
gt_items["wlh"],
gt_items["yaw"] + np.pi / 2,
threshold_in_overlap_ratio=threshold_in_overlap_ratio,
use_2d_iou=use_2d_iou,
)
for pred_id, gt_id in mapping:
if fn[gt_id] == 0:
continue
tp[pred_id] = 1
fp[pred_id] = 0
fn[gt_id] = 0
tp_sum += tp.sum()
fp_sum += fp.sum()
fn_sum += fn.sum()
return {
"tp_sum": tp_sum,
"fp_sum": fp_sum,
"fn_sum": fn_sum,
"precision": tp_sum / (tp_sum + fp_sum),
"recall": tp_sum / (tp_sum + fn_sum),
"num_predicted": num_predicted,
"num_instances": num_instances,
}
def detection_iou(
prediction: List[CuboidPrediction],
groundtruth: List[CuboidAnnotation],
threshold_in_overlap_ratio: float,
) -> Tuple["np.ndarray", "np.ndarray"]:
"""
Calculates the 2D IOU and 3D IOU overlap between predictions and groundtruth.
Uses linear sum assignment to associate cuboids.
Args:
:param predictions: list of cuboid annotation predictions.
:param ground_truth: list of cuboid annotation groundtruths.
:param threshold: IOU threshold to consider detection as valid. Must be in [0, 1].
"""
gt_items = process_dataitem(groundtruth)
pred_items = process_dataitem(prediction)
meter_2d = []
meter_3d = []
if gt_items["xyz"].shape[0] == 0 or pred_items["xyz"].shape[0] == 0:
return np.array([0.0]), np.array([0.0])
iou_3d, iou_2d = compute_outer_iou(
gt_items["xyz"],
gt_items["wlh"],
gt_items["yaw"],
pred_items["xyz"],
pred_items["wlh"],
pred_items["yaw"],
)
for i, m in enumerate(iou_3d.max(axis=1)):
if m >= threshold_in_overlap_ratio:
j = iou_3d[i].argmax()
meter_3d.append(iou_3d[i, j])
meter_2d.append(iou_2d[i, j])
return np.array(meter_3d), np.array(meter_2d)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/cuboid_utils.py
| 0.945951 | 0.69978 |
cuboid_utils.py
|
pypi
|
import copy
import enum
import functools
import logging
from enum import Enum
from typing import (
Callable,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from rich.console import Console
from rich.table import Table
from nucleus.annotation import (
AnnotationList,
BoxAnnotation,
CategoryAnnotation,
CuboidAnnotation,
LineAnnotation,
MultiCategoryAnnotation,
PolygonAnnotation,
Segment,
SegmentationAnnotation,
)
from nucleus.prediction import (
BoxPrediction,
CategoryPrediction,
CuboidPrediction,
LinePrediction,
PolygonPrediction,
PredictionList,
SceneCategoryPrediction,
SegmentationPrediction,
)
class FilterOp(str, Enum):
GT = ">"
GTE = ">="
LT = "<"
LTE = "<="
EQ = "="
EQEQ = "=="
NEQ = "!="
IN = "in"
NOT_IN = "not in"
class FilterType(str, enum.Enum):
"""The type of the filter decides the getter used for the comparison.
Attributes:
FIELD: Access the attribute field of an object
METADATA: Access the metadata dictionary of an object
SEGMENT_FIELD: Filter segments of a segmentation mask to be considered on segment fields
SEGMENT_METADATA: Filter segments of a segmentation mask based on segment metadata
"""
FIELD = "field"
METADATA = "metadata"
SEGMENT_FIELD = "segment_field"
SEGMENT_METADATA = "segment_metadata"
FilterableBaseVals = Union[str, float, int, bool]
FilterableTypes = Union[
FilterableBaseVals,
Sequence[FilterableBaseVals],
Set[FilterableBaseVals],
Iterable[FilterableBaseVals],
]
AnnotationTypes = Union[
BoxAnnotation,
CategoryAnnotation,
CuboidAnnotation,
LineAnnotation,
MultiCategoryAnnotation,
PolygonAnnotation,
SegmentationAnnotation,
]
PredictionTypes = Union[
BoxPrediction,
CategoryPrediction,
CuboidPrediction,
LinePrediction,
PolygonPrediction,
SceneCategoryPrediction,
SegmentationPrediction,
]
class AnnotationOrPredictionFilter(NamedTuple):
"""Internal type for reconstruction of JSON encoded payload. Type field decides if filter behaves like FieldFilter
or MetadataFilter
Attributes:
key: key to compare with value
op: :class:`FilterOp` or one of [">", ">=", "<", "<=", "=", "==", "!=", "in", "not in"] to define comparison
with value field
value: bool, str, float or int to compare the field with key or list of the same values for 'in' and 'not in'
ops
allow_missing: Allow missing field values. Will REMOVE the object with the missing field from the selection
type: DO NOT USE. Internal type for serialization over the wire. Changing this will change the `NamedTuple`
type as well.
"""
key: str
op: Union[FilterOp, str]
value: FilterableTypes
allow_missing: bool
type: FilterType
class FieldFilter(NamedTuple):
"""Filter on standard field of AnnotationTypes or PredictionTypes
Examples:
FieldFilter("x", ">", 10) would pass every :class:`BoxAnnotation` with `x` attribute larger than 10
FieldFilter("label", "in", ["car", "truck"]) would pass every :class:`BoxAnnotation` with `label`
in ["car", "truck"]
Attributes:
key: key to compare with value
op: :class:`FilterOp` or one of [">", ">=", "<", "<=", "=", "==", "!=", "in", "not in"] to define comparison
with value field
value: bool, str, float or int to compare the field with key or list of the same values for 'in' and 'not in'
ops
allow_missing: Allow missing field values. Will REMOVE the object with the missing field from the selection
type: DO NOT USE. Internal type for serialization over the wire. Changing this will change the `NamedTuple`
type as well.
"""
key: str
op: Union[FilterOp, str]
value: FilterableTypes
allow_missing: bool = False
type: FilterType = FilterType.FIELD
class MetadataFilter(NamedTuple):
"""Filter on customer provided metadata associated with AnnotationTypes or PredictionTypes
Attributes:
key: key to compare with value
op: :class:`FilterOp` or one of [">", ">=", "<", "<=", "=", "==", "!=", "in", "not in"] to define comparison
with value field
value: bool, str, float or int to compare the field with key or list of the same values for 'in' and 'not in'
ops
allow_missing: Allow missing metadata values. Will REMOVE the object with the missing field from the selection
type: DO NOT USE. Internal type for serialization over the wire. Changing this will change the `NamedTuple`
type as well.
"""
key: str
op: Union[FilterOp, str]
value: FilterableTypes
allow_missing: bool = False
type: FilterType = FilterType.METADATA
class SegmentMetadataFilter(NamedTuple):
"""Filter on customer provided metadata associated with Segments of a SegmentationAnnotation or
SegmentationPrediction
Attributes:
key: key to compare with value
op: :class:`FilterOp` or one of [">", ">=", "<", "<=", "=", "==", "!=", "in", "not in"] to define comparison
with value field
value: bool, str, float or int to compare the field with key or list of the same values for 'in' and 'not in'
ops
allow_missing: Allow missing metadata values. Will REMOVE the object with the missing field from the selection
type: DO NOT USE. Internal type for serialization over the wire. Changing this will change the `NamedTuple`
type as well.
"""
key: str
op: Union[FilterOp, str]
value: FilterableTypes
allow_missing: bool = False
type: FilterType = FilterType.SEGMENT_METADATA
class SegmentFieldFilter(NamedTuple):
"""Filter on standard field of Segment(s) of SegmentationAnnotation and SegmentationPrediction
Examples:
SegmentFieldFilter("label", "in", ["grass", "tree"]) would pass every :class:`Segment` of a
:class:`SegmentationAnnotation or :class:`SegmentationPrediction`
Attributes:
key: key to compare with value
op: :class:`FilterOp` or one of [">", ">=", "<", "<=", "=", "==", "!=", "in", "not in"] to define comparison
with value field
value: bool, str, float or int to compare the field with key or list of the same values for 'in' and 'not in'
ops
allow_missing: Allow missing field values. Will REMOVE the object with the missing field from the selection
type: DO NOT USE. Internal type for serialization over the wire. Changing this will change the `NamedTuple`
type as well.
"""
key: str
op: Union[FilterOp, str]
value: FilterableTypes
allow_missing: bool = False
type: FilterType = FilterType.SEGMENT_FIELD
Filter = Union[
FieldFilter,
MetadataFilter,
AnnotationOrPredictionFilter,
SegmentFieldFilter,
SegmentMetadataFilter,
]
OrAndDNFFilters = List[List[Filter]]
OrAndDNFFilters.__doc__ = """\
Disjunctive normal form (DNF) filters.
DNF allows arbitrary boolean logical combinations of single field predicates.
The innermost structures each describe a single field predicate.
The list of inner predicates is interpreted as a conjunction (AND), forming a more selective and multiple column
predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
ListOfOrAndJSONSerialized = List[List[List]]
ListOfOrAndJSONSerialized.__doc__ = """\
JSON serialized form of DNFFilters. The innermost list has to be trivially expandable (*list) to a
:class:`AnnotationOrPredictionFilter`.
Disjunctive normal form (DNF) filters.
DNF allows arbitrary boolean logical combinations of single field predicates.
The innermost structures each describe a single field predicate.
-The list of inner predicates is interpreted as a conjunction (AND), forming a more selective and multiple column
predicate.
-Finally, the most outer list combines these filters as a disjunction (OR).
"""
ListOfOrAndFilters = Union[OrAndDNFFilters, ListOfOrAndJSONSerialized]
ListOfAndJSONSerialized = List[List]
ListOfAndFilterTuple = List[Filter]
ListOfAndFilterTuple.__doc__ = """\
List of AND filters.
The list of predicates is interpreted as a conjunction (AND), forming a multiple field predicate.
If providing a doubly nested list the innermost list has to be trivially expandable (*list) to a
:class:`AnnotationOrPredictionFilter`
"""
ListOfAndFilters = Union[
ListOfAndFilterTuple,
ListOfAndJSONSerialized,
]
DNFFieldOrMetadataFilters = List[
List[Union[FieldFilter, MetadataFilter, AnnotationOrPredictionFilter]]
]
DNFFieldOrMetadataFilters.__doc__ = """\
Disjunctive normal form (DNF) filters.
DNF allows arbitrary boolean logical combinations of single field predicates.
The innermost structures each describe a single field predicate.
-The list of inner predicates is interpreted as a conjunction (AND), forming a more selective and multiple column
predicate.
"""
def _attribute_getter(
field_name: str,
allow_missing: bool,
ann_or_pred: Union[AnnotationTypes, PredictionTypes, Segment],
):
"""Create a function to get object fields"""
if allow_missing:
return (
getattr(ann_or_pred, field_name)
if hasattr(ann_or_pred, field_name)
else AlwaysFalseComparison()
)
else:
return getattr(ann_or_pred, field_name)
class AlwaysFalseComparison:
"""Helper class to make sure that allow filtering out missing fields (by always returning a false comparison)"""
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return False
def __ne__(self, other):
return False
def _metadata_field_getter(
field_name: str,
allow_missing: bool,
ann_or_pred: Union[AnnotationTypes, PredictionTypes, Segment],
):
"""Create a function to get a metadata field"""
if isinstance(
ann_or_pred, (SegmentationAnnotation, SegmentationPrediction)
):
if allow_missing:
logging.warning(
"Trying to filter metadata on SegmentationAnnotation or Prediction. "
"This will never work until metadata is supported for this format."
)
return AlwaysFalseComparison()
else:
raise RuntimeError(
f"{type(ann_or_pred)} doesn't support metadata filtering"
)
if allow_missing:
return (
ann_or_pred.metadata.get(field_name, AlwaysFalseComparison())
if ann_or_pred.metadata
else AlwaysFalseComparison()
)
else:
return (
ann_or_pred.metadata[field_name]
if ann_or_pred.metadata
else RuntimeError(
f"No metadata on {ann_or_pred}, trying to access {field_name}"
)
)
def _filter_to_comparison_function( # pylint: disable=too-many-return-statements
filter_def: Filter,
) -> Callable[[Union[AnnotationTypes, PredictionTypes, Segment]], bool]:
"""Creates a comparison function from a filter configuration to apply to annotations or predictions
Parameters:
filter_def: Definition of a filter conditions
Returns:
"""
if FilterType(filter_def.type) == FilterType.FIELD:
getter = functools.partial(
_attribute_getter, filter_def.key, filter_def.allow_missing
)
elif FilterType(filter_def.type) == FilterType.METADATA:
getter = functools.partial(
_metadata_field_getter, filter_def.key, filter_def.allow_missing
)
else:
raise NotImplementedError(
f"Unhandled filter type: {filter_def.type}. NOTE: Segmentation filters are handled elsewhere."
)
op = FilterOp(filter_def.op)
if op is FilterOp.GT:
return lambda ann_or_pred: getter(ann_or_pred) > filter_def.value
elif op is FilterOp.GTE:
return lambda ann_or_pred: getter(ann_or_pred) >= filter_def.value
elif op is FilterOp.LT:
return lambda ann_or_pred: getter(ann_or_pred) < filter_def.value
elif op is FilterOp.LTE:
return lambda ann_or_pred: getter(ann_or_pred) <= filter_def.value
elif op is FilterOp.EQ or op is FilterOp.EQEQ:
return lambda ann_or_pred: getter(ann_or_pred) == filter_def.value
elif op is FilterOp.NEQ:
return lambda ann_or_pred: getter(ann_or_pred) != filter_def.value
elif op is FilterOp.IN:
return lambda ann_or_pred: getter(ann_or_pred) in set(
filter_def.value # type: ignore
)
elif op is FilterOp.NOT_IN:
return lambda ann_or_pred: getter(ann_or_pred) not in set(
filter_def.value # type:ignore
)
else:
raise RuntimeError(
f"Fell through all op cases, no match for: '{op}' - MetadataFilter: {filter_def},"
)
def _apply_field_or_metadata_filters(
filterable_sequence: Union[
Sequence[AnnotationTypes], Sequence[PredictionTypes], Sequence[Segment]
],
filters: DNFFieldOrMetadataFilters,
):
"""Apply filters to list of annotations or list of predictions or to a list of segments
Attributes:
filterable_sequence: Prediction, Annotation or Segment sequence
filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field
predicates. The innermost structures each describe a single column predicate. The list of inner predicates
is interpreted as a conjunction (AND), forming a more selective `and` multiple column predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
dnf_condition_functions = []
for or_branch in filters:
and_conditions = [
_filter_to_comparison_function(cond) for cond in or_branch
]
dnf_condition_functions.append(and_conditions)
filtered = []
for item in filterable_sequence:
for or_conditions in dnf_condition_functions:
if all(c(item) for c in or_conditions):
filtered.append(item)
break
return filtered
def _split_segment_filters(
dnf_filters: OrAndDNFFilters,
) -> Tuple[OrAndDNFFilters, OrAndDNFFilters]:
"""We treat Segment* filters differently -> this splits filters into two sets, one containing the
standard field, metadata branches and the other the segment filters.
"""
normal_or_branches = []
segment_or_branches = []
for and_branch in dnf_filters:
normal_filters = []
segment_filters = []
for filter_statement in and_branch:
if filter_statement.type in {
FilterType.SEGMENT_METADATA,
FilterType.SEGMENT_FIELD,
}:
segment_filters.append(filter_statement)
else:
normal_filters.append(filter_statement)
normal_or_branches.append(normal_filters)
segment_or_branches.append(segment_filters)
return normal_or_branches, segment_or_branches
def _filter_segments(
anns_or_preds: Union[
Sequence[SegmentationAnnotation], Sequence[SegmentationPrediction]
],
segment_filters: OrAndDNFFilters,
):
"""Filter Segments of a SegmentationAnnotation or Prediction
We have to treat this differently as metadata and labels are on nested Segment objects
"""
if len(segment_filters) == 0 or len(segment_filters[0]) == 0:
return anns_or_preds
# Transform segment filter types to field and metadata to iterate over annotation sub fields
transformed_or_branches = (
[]
) # type: List[List[Union[MetadataFilter, FieldFilter]]]
for and_branch in segment_filters:
transformed_and = [] # type: List[Union[MetadataFilter, FieldFilter]]
for filter_statement in and_branch:
if filter_statement.type == FilterType.SEGMENT_FIELD:
transformed_and.append(
FieldFilter(
filter_statement.key,
filter_statement.op,
filter_statement.value,
filter_statement.allow_missing,
)
)
elif filter_statement.type == FilterType.SEGMENT_METADATA:
transformed_and.append(
MetadataFilter(
filter_statement.key,
filter_statement.op,
filter_statement.value,
filter_statement.allow_missing,
)
)
else:
raise RuntimeError("Encountered a non SEGMENT_* filter type")
transformed_or_branches.append(transformed_and)
segments_filtered = []
for ann_or_pred in anns_or_preds:
if isinstance(
ann_or_pred, (SegmentationAnnotation, SegmentationPrediction)
):
ann_or_pred.annotations = _apply_field_or_metadata_filters(
ann_or_pred.annotations, transformed_or_branches # type: ignore
)
segments_filtered.append(ann_or_pred)
return segments_filtered
def apply_filters(
ann_or_pred: Union[Sequence[AnnotationTypes], Sequence[PredictionTypes]],
filters: Union[ListOfOrAndFilters, ListOfAndFilters],
):
"""Apply filters to list of annotations or list of predictions
Attributes:
ann_or_pred: Prediction or Annotation
filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field
predicates. The innermost structures each describe a single column predicate. The list of inner predicates
is interpreted as a conjunction (AND), forming a more selective `and` multiple column predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
if filters is None or len(filters) == 0:
return ann_or_pred
dnf_filters = ensureDNFFilters(filters)
filters, segment_filters = _split_segment_filters(dnf_filters)
filtered = _apply_field_or_metadata_filters(ann_or_pred, filters) # type: ignore
filtered = _filter_segments(filtered, segment_filters)
return filtered
def ensureDNFFilters(filters) -> OrAndDNFFilters:
"""JSON encoding creates a triple nested lists from the doubly nested tuples. This function creates the
tuple form again."""
if isinstance(
filters[0],
(
MetadataFilter,
FieldFilter,
AnnotationOrPredictionFilter,
SegmentFieldFilter,
SegmentMetadataFilter,
),
):
# Normalize into DNF
filters: ListOfOrAndFilters = [filters] # type: ignore
# NOTE: We have to handle JSON transformed tuples which become two or three layers of lists
if (
isinstance(filters, list)
and isinstance(filters[0], list)
and isinstance(filters[0][0], str)
):
filters = [filters]
if (
isinstance(filters, list)
and isinstance(filters[0], list)
and isinstance(filters[0][0], list)
):
formatted_filter = []
for or_branch in filters:
and_chain = [
AnnotationOrPredictionFilter(*condition)
for condition in or_branch
]
formatted_filter.append(and_chain)
filters = formatted_filter
return filters
def pretty_format_filters_with_or_and(
filters: Optional[Union[ListOfOrAndFilters, ListOfAndFilters]]
):
if filters is None:
return "No filters applied!"
dnf_filters = ensureDNFFilters(filters)
or_branches = []
for or_branch in dnf_filters:
and_statements = []
for and_branch in or_branch:
if and_branch.type == FilterType.FIELD:
class_name = "FieldFilter"
elif and_branch.type == FilterType.METADATA:
class_name = "MetadataFilter"
elif and_branch.type == FilterType.SEGMENT_FIELD:
class_name = "SegmentFieldFilter"
elif and_branch.type == FilterType.SEGMENT_METADATA:
class_name = "SegmentMetadataFilter"
else:
raise RuntimeError(
f"Un-handled filter type: {and_branch.type}"
)
op = (
and_branch.op.value
if isinstance(and_branch.op, FilterOp)
else and_branch.op
)
value_formatted = (
f'"{and_branch.value}"'
if isinstance(and_branch.value, str)
else f"{and_branch.value}".replace("'", '"')
)
statement = (
f'{class_name}("{and_branch.key}", "{op}", {value_formatted})'
)
and_statements.append(statement)
or_branches.append(and_statements)
and_to_join = []
for and_statements in or_branches:
joined_and = " and ".join(and_statements)
if len(or_branches) > 1 and len(and_statements) > 1:
joined_and = "(" + joined_and + ")"
and_to_join.append(joined_and)
full_statement = " or ".join(and_to_join)
return full_statement
def compose_helpful_filtering_error(
ann_or_pred_list: Union[AnnotationList, PredictionList], filters
) -> List[str]:
prefix = (
"Annotations"
if isinstance(ann_or_pred_list, AnnotationList)
else "Predictions"
)
msg = []
msg.append(f"{prefix}: All items filtered out by:")
msg.append(f" {pretty_format_filters_with_or_and(filters)}")
msg.append("")
console = Console()
table = Table(
"Type",
"Count",
"Labels",
title=f"Original {prefix}",
title_justify="left",
)
for ann_or_pred_type, items in ann_or_pred_list.items():
if items and isinstance(
items[-1], (SegmentationAnnotation, SegmentationPrediction)
):
labels = set()
for seg in items:
labels.update(set(s.label for s in seg.annotations))
else:
labels = set(a.label for a in items)
if items:
table.add_row(ann_or_pred_type, str(len(items)), str(list(labels)))
with console.capture() as capture:
console.print(table)
msg.append(capture.get())
return msg
def filter_annotation_list(
annotations: AnnotationList, annotation_filters
) -> AnnotationList:
annotations = copy.deepcopy(annotations)
if annotation_filters is None or len(annotation_filters) == 0:
return annotations
annotations.box_annotations = apply_filters(
annotations.box_annotations, annotation_filters
)
annotations.line_annotations = apply_filters(
annotations.line_annotations, annotation_filters
)
annotations.polygon_annotations = apply_filters(
annotations.polygon_annotations, annotation_filters
)
annotations.cuboid_annotations = apply_filters(
annotations.cuboid_annotations, annotation_filters
)
annotations.category_annotations = apply_filters(
annotations.category_annotations, annotation_filters
)
annotations.multi_category_annotations = apply_filters(
annotations.multi_category_annotations, annotation_filters
)
annotations.segmentation_annotations = apply_filters(
annotations.segmentation_annotations, annotation_filters
)
return annotations
def filter_prediction_list(
predictions: PredictionList, prediction_filters
) -> PredictionList:
predictions = copy.deepcopy(predictions)
if prediction_filters is None or len(prediction_filters) == 0:
return predictions
predictions.box_predictions = apply_filters(
predictions.box_predictions, prediction_filters
)
predictions.line_predictions = apply_filters(
predictions.line_predictions, prediction_filters
)
predictions.polygon_predictions = apply_filters(
predictions.polygon_predictions, prediction_filters
)
predictions.cuboid_predictions = apply_filters(
predictions.cuboid_predictions, prediction_filters
)
predictions.category_predictions = apply_filters(
predictions.category_predictions, prediction_filters
)
predictions.segmentation_predictions = apply_filters(
predictions.segmentation_predictions, prediction_filters
)
return predictions
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/filtering.py
| 0.757794 | 0.44059 |
filtering.py
|
pypi
|
import sys
from abc import abstractmethod
from typing import List, Optional, Union
from nucleus.annotation import AnnotationList, CuboidAnnotation
from nucleus.prediction import CuboidPrediction, PredictionList
from .base import Metric, ScalarResult
from .cuboid_utils import detection_iou, label_match_wrapper, recall_precision
from .filtering import ListOfAndFilters, ListOfOrAndFilters
from .filters import confidence_filter
class CuboidMetric(Metric):
"""Abstract class for metrics of cuboids.
The CuboidMetric class automatically filters incoming annotations and
predictions for only cuboid annotations. It also filters
predictions whose confidence is less than the provided confidence_threshold.
Finally, it provides support for enforcing matching labels. If
`enforce_label_match` is set to True, then annotations and predictions will
only be matched if they have the same label.
To create a new concrete CuboidMetric, override the `eval` function
with logic to define a metric between cuboid annotations and predictions.
"""
def __init__(
self,
enforce_label_match: bool = False,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes CuboidMetric abstract object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Default False
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: MetadataFilter predicates. Predicates are expressed in disjunctive normal form (DNF),
like [[MetadataFilter('x', '==', 0), FieldFilter('label', '==', 'pedestrian')], ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single field predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective and multiple column predicate. Finally, the most outer list combines
these filters as a disjunction (OR).
prediction_filters: MetadataFilter predicates. Predicates are expressed in disjunctive normal form (DNF),
like [[MetadataFilter('x', '==', 0), FieldFilter('label', '==', 'pedestrian')], ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single field predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective and multiple column predicate. Finally, the most outer list combines
these filters as a disjunction (OR).
"""
self.enforce_label_match = enforce_label_match
assert 0 <= confidence_threshold <= 1
self.confidence_threshold = confidence_threshold
super().__init__(annotation_filters, prediction_filters)
@abstractmethod
def eval(
self,
annotations: List[CuboidAnnotation],
predictions: List[CuboidPrediction],
) -> ScalarResult:
# Main evaluation function that subclasses must override.
pass
def aggregate_score(self, results: List[ScalarResult]) -> ScalarResult: # type: ignore[override]
return ScalarResult.aggregate(results)
def call_metric(
self, annotations: AnnotationList, predictions: PredictionList
) -> ScalarResult:
if self.confidence_threshold > 0:
predictions = confidence_filter(
predictions, self.confidence_threshold
)
cuboid_annotations: List[CuboidAnnotation] = []
cuboid_annotations.extend(annotations.cuboid_annotations)
cuboid_predictions: List[CuboidPrediction] = []
cuboid_predictions.extend(predictions.cuboid_predictions)
eval_fn = label_match_wrapper(self.eval)
result = eval_fn(
cuboid_annotations,
cuboid_predictions,
enforce_label_match=self.enforce_label_match,
)
return result
class CuboidIOU(CuboidMetric):
"""Calculates the average IOU between cuboid annotations and predictions."""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
enforce_label_match: bool = True,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
iou_2d: bool = False,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes CuboidIOU object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to True
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
iou_2d: whether to return the BEV 2D IOU if true, or the 3D IOU if false.
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: MetadataFilter predicates. Predicates are expressed in disjunctive normal form (DNF), like
[[MetadataFilter('x', '=', 0), ...], ...]. DNF allows arbitrary boolean logical combinations of single field
predicates. The innermost structures each describe a single column predicate. The list of inner predicates is
interpreted as a conjunction (AND), forming a more selective and multiple column predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: MetadataFilter predicates. Predicates are expressed in disjunctive normal form (DNF), like
[[MetadataFilter('x', '=', 0), ...], ...]. DNF allows arbitrary boolean logical combinations of single field
predicates. The innermost structures each describe a single column predicate. The list of inner predicates is
interpreted as a conjunction (AND), forming a more selective and multiple column predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
self.iou_2d = iou_2d
super().__init__(
enforce_label_match=enforce_label_match,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
)
def eval(
self,
annotations: List[CuboidAnnotation],
predictions: List[CuboidPrediction],
) -> ScalarResult:
iou_3d_metric, iou_2d_metric = detection_iou(
predictions,
annotations,
threshold_in_overlap_ratio=self.iou_threshold,
)
weight = max(len(annotations), len(predictions))
if self.iou_2d:
avg_iou = iou_2d_metric.sum() / max(weight, sys.float_info.epsilon)
else:
avg_iou = iou_3d_metric.sum() / max(weight, sys.float_info.epsilon)
return ScalarResult(avg_iou, weight)
class CuboidPrecision(CuboidMetric):
"""Calculates the average precision between cuboid annotations and predictions."""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
enforce_label_match: bool = True,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
use_2d_iou: bool = False,
):
"""Initializes CuboidIOU object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to True
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: MetadataFilter predicates. Predicates are expressed in disjunctive normal form (DNF), like
[[MetadataFilter('x', '==', 0), ...], ...]. DNF allows arbitrary boolean logical combinations of single field
predicates. The innermost structures each describe a single column predicate. The list of inner predicates is
interpreted as a conjunction (AND), forming a more selective and multiple column predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: MetadataFilter predicates. Predicates are expressed in disjunctive normal form (DNF), like
[[MetadataFilter('x', '==', 0), ...], ...]. DNF allows arbitrary boolean logical combinations of single field
predicates. The innermost structures each describe a single column predicate. The list of inner predicates is
interpreted as a conjunction (AND), forming a more selective and multiple column predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
use_2d_iou: whether to use 2D or 3D IOU for precision calculation.
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
self.use_2d_iou = use_2d_iou
super().__init__(
enforce_label_match=enforce_label_match,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
)
def eval(
self,
annotations: List[CuboidAnnotation],
predictions: List[CuboidPrediction],
) -> ScalarResult:
stats = recall_precision(
predictions,
annotations,
threshold_in_overlap_ratio=self.iou_threshold,
use_2d=self.use_2d_iou,
)
weight = stats["tp_sum"] + stats["fp_sum"]
precision = stats["tp_sum"] / max(weight, sys.float_info.epsilon)
return ScalarResult(precision, weight)
class CuboidRecall(CuboidMetric):
"""Calculates the average recall between cuboid annotations and predictions."""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
enforce_label_match: bool = True,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
use_2d_iou: bool = False,
):
"""Initializes CuboidIOU object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to True
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
use_2d_iou: whether to use 2D or 3D IOU for calculation.
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
self.use_2d_iou = use_2d_iou
super().__init__(
enforce_label_match=enforce_label_match,
confidence_threshold=confidence_threshold,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
)
def eval(
self,
annotations: List[CuboidAnnotation],
predictions: List[CuboidPrediction],
) -> ScalarResult:
stats = recall_precision(
predictions,
annotations,
threshold_in_overlap_ratio=self.iou_threshold,
use_2d_iou=self.use_2d_iou
)
weight = stats["tp_sum"] + stats["fn_sum"]
recall = stats["tp_sum"] / max(weight, sys.float_info.epsilon)
return ScalarResult(recall, weight)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/cuboid_metrics.py
| 0.842151 | 0.590396 |
cuboid_metrics.py
|
pypi
|
import abc
import logging
from enum import Enum
from typing import List, Optional, Union
import numpy as np
from nucleus.annotation import AnnotationList, SegmentationAnnotation
from nucleus.metrics.base import MetricResult
from nucleus.metrics.filtering import (
ListOfAndFilters,
ListOfOrAndFilters,
apply_filters,
)
from nucleus.metrics.segmentation_utils import (
instance_mask_to_polys,
rasterize_polygons_to_segmentation_mask,
setup_iou_thresholds,
transform_poly_codes_to_poly_preds,
)
from nucleus.prediction import PredictionList
from .base import Metric, ScalarResult
from .polygon_metrics import (
PolygonAveragePrecision,
PolygonIOU,
PolygonPrecision,
PolygonRecall,
)
from .segmentation_loader import (
DummyLoader,
InMemoryLoader,
SegmentationMaskLoader,
)
from .segmentation_metrics import (
SegmentationIOU,
SegmentationMAP,
SegmentationPrecision,
SegmentationRecall,
)
class SegToPolyMode(str, Enum):
GENERATE_GT_FROM_POLY = "gt_from_poly"
GENERATE_PRED_POLYS_FROM_MASK = "gt_from_poly"
class SegmentationMaskToPolyMetric(Metric):
def __init__(
self,
enforce_label_match: bool = False,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
mode: SegToPolyMode = SegToPolyMode.GENERATE_GT_FROM_POLY,
):
"""Initializes PolygonMetric abstract object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Default False
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
# Since segmentation annotations are very different from everything else we can't rely on the upper filtering
super().__init__(None, None)
self._annotation_filters = annotation_filters
self._prediction_filters = prediction_filters
self.enforce_label_match = enforce_label_match
assert 0 <= confidence_threshold <= 1
self.confidence_threshold = confidence_threshold
self.loader: SegmentationMaskLoader = DummyLoader()
self.mode = mode
def call_metric(
self, annotations: AnnotationList, predictions: PredictionList
) -> MetricResult:
assert (
len(predictions.segmentation_predictions) <= 1
), f"Expected only one segmentation mask, got {predictions.segmentation_predictions}"
prediction = (
predictions.segmentation_predictions[0]
if predictions.segmentation_predictions
else None
)
annotations.polygon_annotations = apply_filters(
annotations.polygon_annotations, self._annotation_filters # type: ignore
)
annotations.box_annotations = apply_filters(
annotations.box_annotations, self._annotation_filters # type: ignore
)
predictions.segmentation_predictions = apply_filters(
predictions.segmentation_predictions, self._prediction_filters # type: ignore
)
if prediction:
if self.mode == SegToPolyMode.GENERATE_GT_FROM_POLY:
pred_img = self.loader.fetch(prediction.mask_url)
ann_img, segments = rasterize_polygons_to_segmentation_mask(
annotations.polygon_annotations
+ annotations.box_annotations, # type:ignore
pred_img.shape,
)
# TODO: apply Segmentation filters after?
annotations.segmentation_annotations = [
SegmentationAnnotation(
"__no_url",
annotations=segments,
reference_id=annotations.polygon_annotations[
0
].reference_id,
)
]
return self.call_segmentation_metric(
annotations,
np.asarray(ann_img),
predictions,
np.asarray(pred_img),
)
elif self.mode == SegToPolyMode.GENERATE_PRED_POLYS_FROM_MASK:
pred_img = self.loader.fetch(prediction.mask_url)
pred_value, pred_polys = instance_mask_to_polys(
np.asarray(pred_img)
) # typing: ignore
code_to_label = {
s.index: s.label for s in prediction.annotations
}
poly_predictions = transform_poly_codes_to_poly_preds(
prediction.reference_id,
pred_value,
pred_polys,
code_to_label,
)
return self.call_poly_metric(
annotations,
PredictionList(polygon_predictions=poly_predictions),
)
else:
raise RuntimeError(
f"Misonconfigured class. Got mode '{self.mode}', expected one of {list(SegToPolyMode)}"
)
else:
return ScalarResult(0, weight=0)
def call_segmentation_metric(
self,
annotations: AnnotationList,
ann_img: "np.ndarray",
predictions: PredictionList,
pred_img: "np.ndarray",
):
metric = self.configure_metric()
metric.loader = InMemoryLoader(
{
annotations.segmentation_annotations[0].mask_url: ann_img,
predictions.segmentation_predictions[0].mask_url: pred_img,
}
)
return metric(annotations, predictions)
def call_poly_metric(
self, annotations: AnnotationList, predictions: PredictionList
):
metric = self.configure_metric()
return metric(annotations, predictions)
def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
metric = self.configure_metric()
return metric.aggregate_score(results) # type: ignore
@abc.abstractmethod
def configure_metric(self):
pass
class SegmentationToPolyIOU(SegmentationMaskToPolyMetric):
def __init__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.0,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
mode: SegToPolyMode = SegToPolyMode.GENERATE_GT_FROM_POLY,
):
"""Initializes PolygonIOU object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
super().__init__(
enforce_label_match,
confidence_threshold,
annotation_filters,
prediction_filters,
mode,
)
def configure_metric(self):
if self.mode == SegToPolyMode.GENERATE_GT_FROM_POLY:
metric = SegmentationIOU(
self.annotation_filters,
self.prediction_filters,
self.iou_threshold,
)
else:
metric = PolygonIOU(
self.enforce_label_match,
self.iou_threshold,
self.confidence_threshold,
self.annotation_filters,
self.prediction_filters,
)
return metric
class SegmentationToPolyPrecision(SegmentationMaskToPolyMetric):
def __init__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
mode: SegToPolyMode = SegToPolyMode.GENERATE_GT_FROM_POLY,
):
"""Initializes SegmentationToPolyPrecision object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.5
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
super().__init__(
enforce_label_match,
confidence_threshold,
annotation_filters,
prediction_filters,
mode,
)
def configure_metric(self):
if self.mode == SegToPolyMode.GENERATE_GT_FROM_POLY:
metric = SegmentationPrecision(
self.annotation_filters,
self.prediction_filters,
self.iou_threshold,
)
else:
metric = PolygonPrecision(
self.enforce_label_match,
self.iou_threshold,
self.confidence_threshold,
self.annotation_filters,
self.prediction_filters,
)
return metric
class SegmentationToPolyRecall(SegmentationMaskToPolyMetric):
"""Calculates the recall between box or polygon annotations and predictions.
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonRecall
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonRecall()
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
enforce_label_match: bool = False,
iou_threshold: float = 0.5,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
mode: SegToPolyMode = SegToPolyMode.GENERATE_GT_FROM_POLY,
):
"""Initializes PolygonRecall object.
Args:
enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to False
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.5
confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
super().__init__(
enforce_label_match,
confidence_threshold,
annotation_filters,
prediction_filters,
mode,
)
def configure_metric(self):
if self.mode == SegToPolyMode.GENERATE_GT_FROM_POLY:
metric = SegmentationRecall(
self.annotation_filters,
self.prediction_filters,
self.iou_threshold,
)
else:
metric = PolygonRecall(
self.enforce_label_match,
self.iou_threshold,
self.confidence_threshold,
self.annotation_filters,
self.prediction_filters,
)
return metric
class SegmentationToPolyAveragePrecision(SegmentationMaskToPolyMetric):
"""Calculates the average precision between box or polygon annotations and predictions.
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonAveragePrecision
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonAveragePrecision(label="car")
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
label,
iou_threshold: float = 0.5,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
mode: SegToPolyMode = SegToPolyMode.GENERATE_GT_FROM_POLY,
):
"""Initializes PolygonRecall object.
Args:
iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.5
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
assert (
0 <= iou_threshold <= 1
), "IoU threshold must be between 0 and 1."
self.iou_threshold = iou_threshold
self.label = label
super().__init__(
enforce_label_match=False,
confidence_threshold=0,
annotation_filters=annotation_filters,
prediction_filters=prediction_filters,
)
def configure_metric(self):
if self.mode == SegToPolyMode.GENERATE_GT_FROM_POLY:
# TODO(gunnar): Add a label filter
metric = SegmentationPrecision(
self.annotation_filters,
self.prediction_filters,
self.iou_threshold,
)
else:
metric = PolygonAveragePrecision(
self.label,
self.iou_threshold,
self.annotation_filters,
self.prediction_filters,
)
return metric
class SegmentationToPolyMAP(SegmentationMaskToPolyMetric):
"""Calculates the mean average precision between box or polygon annotations and predictions.
::
from nucleus import BoxAnnotation, Point, PolygonPrediction
from nucleus.annotation import AnnotationList
from nucleus.prediction import PredictionList
from nucleus.metrics import PolygonMAP
box_anno = BoxAnnotation(
label="car",
x=0,
y=0,
width=10,
height=10,
reference_id="image_1",
annotation_id="image_1_car_box_1",
metadata={"vehicle_color": "red"}
)
polygon_pred = PolygonPrediction(
label="bus",
vertices=[Point(100, 100), Point(150, 200), Point(200, 100)],
reference_id="image_2",
annotation_id="image_2_bus_polygon_1",
confidence=0.8,
metadata={"vehicle_color": "yellow"}
)
annotations = AnnotationList(box_annotations=[box_anno])
predictions = PredictionList(polygon_predictions=[polygon_pred])
metric = PolygonMAP()
metric(annotations, predictions)
"""
# TODO: Remove defaults once these are surfaced more cleanly to users.
def __init__(
self,
iou_threshold: float = -1,
iou_thresholds: Union[List[float], str] = "coco",
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
mode: SegToPolyMode = SegToPolyMode.GENERATE_GT_FROM_POLY,
):
"""Initializes PolygonRecall object.
Args:
iou_thresholds: IOU thresholds to check AP at
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
if iou_threshold:
logging.warning(
"Got deprecated parameter 'iou_threshold'. Ignoring it."
)
self.iou_thresholds = setup_iou_thresholds(iou_thresholds)
super().__init__(
False, 0, annotation_filters, prediction_filters, mode
)
def configure_metric(self):
if self.mode == SegToPolyMode.GENERATE_GT_FROM_POLY:
# TODO(gunnar): Add a label filter
metric = SegmentationMAP(
self.annotation_filters,
self.prediction_filters,
self.iou_thresholds,
)
else:
def patched_average_precision(annotations, predictions):
ap_per_threshold = []
labels = [p.label for p in predictions.polygon_predictions]
for threshold in self.iou_thresholds:
ap_per_label = []
for label in labels:
call_metric = PolygonAveragePrecision(
label,
iou_threshold=threshold,
annotation_filters=self.annotation_filters,
prediction_filters=self.prediction_filters,
)
result = call_metric(annotations, predictions)
ap_per_label.append(result.value) # type: ignore
ap_per_threshold = np.mean(ap_per_label)
thresholds = np.concatenate([[0], self.iou_thresholds, [1]])
steps = np.diff(thresholds)
mean_ap = (
np.array(ap_per_threshold + [ap_per_threshold[-1]]) * steps
).sum()
return ScalarResult(mean_ap)
metric = patched_average_precision
return metric
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/segmentation_to_poly_metrics.py
| 0.883588 | 0.441974 |
segmentation_to_poly_metrics.py
|
pypi
|
from abc import abstractmethod
from dataclasses import dataclass
from typing import List, Optional, Set, Tuple, Union
from nucleus.annotation import AnnotationList, CategoryAnnotation
from nucleus.metrics.base import Metric, MetricResult, ScalarResult
from nucleus.metrics.filtering import ListOfAndFilters, ListOfOrAndFilters
from nucleus.metrics.filters import confidence_filter
from nucleus.prediction import CategoryPrediction, PredictionList
F1_METHODS = {"micro", "macro", "samples", "weighted", "binary"}
def to_taxonomy_labels(
anns_or_preds: Union[List[CategoryAnnotation], List[CategoryPrediction]]
) -> Set[str]:
"""Transforms annotation or prediction lists to taxonomy labels by joining them with a seperator (->)"""
labels = set()
for item in anns_or_preds:
taxonomy_label = (
f"{item.taxonomy_name}->{item.label}"
if item.taxonomy_name
else item.label
)
labels.add(taxonomy_label)
return labels
@dataclass
class CategorizationResult(MetricResult):
annotations: List[CategoryAnnotation]
predictions: List[CategoryPrediction]
@property
def value(self):
# late import to avoid slow CLI init
from sklearn.metrics import f1_score
annotation_labels = to_taxonomy_labels(self.annotations)
prediction_labels = to_taxonomy_labels(self.predictions)
# TODO: Change task.py interface such that we can return label matching
# NOTE: Returning 1 if all taxonomy labels match else 0
value = f1_score(
list(annotation_labels), list(prediction_labels), average="macro"
)
return value
class CategorizationMetric(Metric):
"""Abstract class for metrics related to Categorization
The Categorization class automatically filters incoming annotations and
predictions for only categorization annotations. It also filters
predictions whose confidence is less than the provided confidence_threshold.
"""
def __init__(
self,
confidence_threshold: float = 0.0,
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""Initializes CategorizationMetric abstract object.
Args:
confidence_threshold: minimum confidence threshold for predictions to be taken into account for evaluation. Must be in [0, 1]. Default 0.0
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
super().__init__(annotation_filters, prediction_filters)
assert 0 <= confidence_threshold <= 1
self.confidence_threshold = confidence_threshold
@abstractmethod
def eval(
self,
annotations: List[
CategoryAnnotation
], # TODO(gunnar): List to conform with other APIs or single instance?
predictions: List[CategoryPrediction],
) -> CategorizationResult:
# Main evaluation function that subclasses must override.
# TODO(gunnar): Allow passing multiple predictions and selecting highest confidence? Allows us to show next
# contender. Are top-5 scores something that we care about?
# TODO(gunnar): How do we handle multi-head classification?
pass
@abstractmethod
def aggregate_score(self, results: List[CategorizationResult]) -> ScalarResult: # type: ignore[override]
pass
def call_metric(
self, annotations: AnnotationList, predictions: PredictionList
) -> CategorizationResult:
if self.confidence_threshold > 0:
predictions = confidence_filter(
predictions, self.confidence_threshold
)
cat_annotations, cat_predictions = self._filter_common_taxonomies(
annotations.category_annotations, predictions.category_predictions
)
result = self.eval(
cat_annotations,
cat_predictions,
)
return result
def _filter_common_taxonomies(
self,
annotations: List[CategoryAnnotation],
predictions: List[CategoryPrediction],
) -> Tuple[List[CategoryAnnotation], List[CategoryPrediction]]:
annotated_taxonomies = {ann.taxonomy_name for ann in annotations}
matching_predictions, matching_taxonomies = self._filter_in_taxonomies(
predictions, annotated_taxonomies
)
matching_annotations, _ = self._filter_in_taxonomies(
annotations, matching_taxonomies
)
return matching_annotations, matching_predictions # type: ignore
def _filter_in_taxonomies(
self,
anns_or_preds: Union[
List[CategoryAnnotation], List[CategoryPrediction]
],
filter_on_taxonomies: Set[Union[None, str]],
) -> Tuple[
Union[List[CategoryAnnotation], List[CategoryPrediction]],
Set[Union[None, str]],
]:
matching_predictions = []
matching_taxonomies = set()
for pred in anns_or_preds:
if pred.taxonomy_name in filter_on_taxonomies:
matching_predictions.append(pred)
matching_taxonomies.add(pred.taxonomy_name)
return matching_predictions, matching_taxonomies
class CategorizationF1(CategorizationMetric):
"""Evaluation method that matches categories and returns a CategorizationF1Result that aggregates to the F1 score"""
def __init__(
self,
confidence_threshold: float = 0.0,
f1_method: str = "macro",
annotation_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
prediction_filters: Optional[
Union[ListOfOrAndFilters, ListOfAndFilters]
] = None,
):
"""
Args:
confidence_threshold: minimum confidence threshold for predictions to be taken into account for evaluation.
Must be in [0, 1]. Default 0.0
f1_method: {'micro', 'macro', 'samples','weighted', 'binary'}, \
default='macro'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
annotation_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
prediction_filters: Filter predicates. Allowed formats are:
ListOfAndFilters where each Filter forms a chain of AND predicates.
or
ListOfOrAndFilters where Filters are expressed in disjunctive normal form (DNF), like
[[MetadataFilter("short_haired", "==", True), FieldFilter("label", "in", ["cat", "dog"]), ...].
DNF allows arbitrary boolean logical combinations of single field predicates. The innermost structures
each describe a single column predicate. The list of inner predicates is interpreted as a conjunction
(AND), forming a more selective `and` multiple field predicate.
Finally, the most outer list combines these filters as a disjunction (OR).
"""
super().__init__(
confidence_threshold, annotation_filters, prediction_filters
)
assert (
f1_method in F1_METHODS
), f"Invalid f1_method {f1_method}, expected one of {F1_METHODS}"
self.f1_method = f1_method
def eval(
self,
annotations: List[CategoryAnnotation],
predictions: List[CategoryPrediction],
) -> CategorizationResult:
"""
Notes: This is a little weird eval function. It essentially only does matching of annotation to label and
the actual metric computation happens in the aggregate step since F1 score only makes sense on a collection.
"""
return CategorizationResult(
annotations=annotations, predictions=predictions
)
def aggregate_score(self, results: List[CategorizationResult]) -> ScalarResult: # type: ignore[override]
# late import to avoid slow CLI init
from sklearn.metrics import f1_score
gt = []
predicted = []
for result in results:
gt.extend(list(to_taxonomy_labels(result.annotations)))
predicted.extend(list(to_taxonomy_labels(result.predictions)))
value = f1_score(gt, predicted, average=self.f1_method)
return ScalarResult(value)
|
/scale_nucleus-0.15.10b0.tar.gz/scale_nucleus-0.15.10b0/nucleus/metrics/categorization_metrics.py
| 0.894732 | 0.633439 |
categorization_metrics.py
|
pypi
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Protocol
import numpy as np
import numpy.typing as npt
from pyquaternion import Quaternion as PyQuaternion
from scipy.spatial.transform import Rotation as R
@dataclass
class Point3D:
x: float
y: float
z: float
@dataclass
class Quaternion:
x: float
y: float
z: float
w: float
class IPose(Protocol):
position: Point3D
heading: Quaternion
@dataclass
class Pose:
"""
A data class representing a Pose in a 3D space. The pose includes both position and orientation.
Attributes:
position: A Point3D object representing the position in 3D space.
heading: A Quaternion object representing the orientation in the 3D space.
"""
position: Point3D
heading: Quaternion
@classmethod
def to_transform(cls, pose: IPose) -> npt.NDArray:
"""
Creates a homogeneous transformation matrix from a Pose.
Args:
pose: An object of type IPose from which to create the transformation matrix.
Returns:
A 4x4 NumPy ndarray representing the homogeneous transformation matrix.
"""
transform_matrix = np.eye(4)
transform_matrix[:3, :3] = PyQuaternion(**pose.heading.__dict__).rotation_matrix
transform_matrix[:3, 3] = [
pose.position.x,
pose.position.y,
pose.position.z,
]
return transform_matrix
@classmethod
def from_transform(cls, transform_matrix: npt.NDArray) -> Pose:
"""
Creates a Pose from a homogeneous transformation matrix.
Args:
transform_matrix: A 4x4 NumPy ndarray representing the homogeneous transformation matrix.
Returns:
A Pose object corresponding to the provided transformation matrix.
"""
quaternion = PyQuaternion(matrix=transform_matrix[:3, :3])
return Pose(
position=Point3D(**{k: v for k, v in zip("xyz", transform_matrix[:3, 3])}),
heading=Quaternion(
x=quaternion.x, y=quaternion.y, z=quaternion.z, w=quaternion.w
),
)
@classmethod
def from_pose_like(cls, pose_like: IPose) -> Pose:
"""
Creates a Pose from another object of type IPose.
Args:
pose_like: An object of type IPose from which to create a Pose.
Returns:
A Pose object with the same attributes as the provided IPose object.
"""
return Pose(position=pose_like.position, heading=pose_like.heading)
@classmethod
def from_rt(cls, rotation: R, translation: Iterable[float]) -> Pose: # type: ignore[no-any-unimported]
"""
Creates a Pose from a Rotation and a translation vector.
Args:
rotation: A scipy Rotation object representing the orientation.
translation: An iterable of floats representing the translation vector in 3D space.
Returns:
A Pose object with the specified rotation and translation.
"""
return Pose(
position=Point3D(*translation),
heading=Quaternion(**{k: v for k, v in zip("xyzw", rotation.as_quat())}),
)
@dataclass
class GPSPose:
lat: float
lon: float
bearing: float
|
/scale_sensor_fusion_io-0.3.2-py3-none-any.whl/scale_sensor_fusion_io/models/Pose.py
| 0.972972 | 0.726256 |
Pose.py
|
pypi
|
import numpy as np
import pandas as pd
from scipy.spatial.transform import Rotation
from scipy.interpolate import interp1d
from typing import Union
from numpy.typing import ArrayLike
class CuboidPath(pd.DataFrame):
"""CuboidPath class representing a list of cuboids at given timestamps, extending pandas DataFrame."""
POSITION = ["x", "y", "z"]
ROTATION = ["yaw", "pitch", "roll"]
DIMENSIONS = ["dx", "dy", "dz"]
COLUMNS = POSITION + ROTATION + DIMENSIONS
def __init__(self, data: Union[ArrayLike, pd.DataFrame], index: ArrayLike = None):
"""Initializes the CuboidPath object.
Args:
data (Union[ArrayLike, pd.DataFrame]): An array or DataFrame of cuboids with shape (N, 9), where N is the number of cuboids.
index (ArrayLike, optional): An array-like object representing the index for the CuboidPath DataFrame. Defaults to None.
"""
super().__init__(
data=data, index=index, columns=CuboidPath.COLUMNS, dtype=float
)
def copy(self):
"""Creates a copy of the current CuboidPath object.
Returns:
CuboidPath: A new CuboidPath object with copied data and index.
"""
return CuboidPath(self.values.copy(), index=self.index)
@property
def positions(self):
return self[CuboidPath.POSITION].values
@property
def rotations(self):
return self[CuboidPath.ROTATION].values
@property
def dimensions(self):
return self[CuboidPath.DIMENSIONS].values
@classmethod
def from_csv(cls, file: str):
"""Creates a CuboidPath object from a CSV file.
Args:
file (str): The path to the CSV file.
Returns:
CuboidPath: A CuboidPath object with data read from the CSV file.
"""
return CuboidPath(pd.read_csv(file, index_col=0))
@classmethod
def identity(cls, n: int = 1, index: ArrayLike = None):
"""Create a CuboidPath object with identity cuboids.
Args:
n (int, optional): The number of identity cuboids. Defaults to 1.
index (ArrayLike, optional): An array-like object representing the index for the CuboidPath DataFrame. Defaults to None.
Returns:
CuboidPath: A CuboidPath object with identity cuboids.
"""
identity_cuboid = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
return CuboidPath(np.tile(identity_cuboid, n).reshape((n, 9)), index=index)
@classmethod
def from_matrix(cls, matrix: ArrayLike, index: ArrayLike = None):
"""Creates a CuboidPath object from transformation matrices.
Args:
matrix (ArrayLike): A 3D array-like object of transformation matrices with shape (N, 4, 4), where N is the number of matrices.
index (ArrayLike, optional): An array-like object representing the index for the CuboidPath DataFrame. Defaults to None.
Returns:
CuboidPath: A CuboidPath object with cuboids represented by the given transformation matrices.
"""
matrix = np.asarray(matrix).reshape((-1, 4, 4))
positions = matrix[:, :3, 3]
rotations = Rotation.from_matrix(matrix[:, :3, :3]).as_euler(
"zyx", degrees=True
)
scales = np.linalg.norm(matrix[:, :3, :3], axis=0)
cuboids = np.hstack([positions, rotations, scales])
return CuboidPath(cuboids, index=index)
def as_matrix(self):
"""Convert the CuboidPath object to transformation matrices.
Returns:
np.ndarray: A 3D array of transformation matrices with shape (N, 4, 4), where N is the number of cuboids.
"""
positions = self.loc[:, CuboidPath.POSITION].values
rotations = self.loc[:, CuboidPath.ROTATION].values
dimensions = self.loc[:, CuboidPath.DIMENSIONS].values
matrix = np.tile(np.eye(4), (len(self), 1, 1))
rotation_matrices = Rotation.from_euler(
"zyx", rotations, degrees=True
).as_matrix()
scale_matrices = np.array([np.diag(s) for s in dimensions])
# Combine scale and rotation
transform_matrices = np.matmul(rotation_matrices, scale_matrices)
matrix[:, :3, :3] = transform_matrices
matrix[:, :3, 3] = positions
return matrix
# Operations
def interpolate(self, index: ArrayLike, fill_value="nearest"):
"""Interpolate the CuboidPath object at the given index.
Args:
index (ArrayLike): An array-like object representing the index for the interpolated cuboids.
fill_value (optional): The fill value for out-of-bounds data. Defaults to np.nan.
Returns:
CuboidPath: A CuboidPath object with poses interpolated at the given index.
"""
if np.array_equal(self.index, index):
return self.copy()
if len(self.index) == 1:
return CuboidPath(self.take([0] * len(index)).values, index=index)
x = self.index
y = self.values
if fill_value == "nearest":
fill_value = (y[0], y[-1])
interpolator = interp1d(
x=x, y=y, bounds_error=False, fill_value=fill_value, axis=0
)
values = interpolator(index)
result = CuboidPath(values, index=index)
return result
|
/scale_sensor_fusion_io-0.3.2-py3-none-any.whl/scale_sensor_fusion_io/models/paths/cuboid_path.py
| 0.949236 | 0.756987 |
cuboid_path.py
|
pypi
|
from __future__ import annotations
from functools import reduce
from typing import Iterator, Union, Optional
import numpy as np
import numpy.typing as npt
import pandas as pd
from numpy.typing import ArrayLike
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation
IDENTITY = (0, 0, 0, 0, 0, 0, 1)
def transform_points(points: npt.NDArray, transform: npt.NDArray):
"""Applies a transformation to points.
Args:
points (npt.NDArray): An array of 3D points with shape (N, 3), where N is the number of points.
transform (npt.NDArray): A 3x4 transformation matrix.
Returns:
npt.NDArray: An array of transformed 3D points with shape (N, 3).
"""
return points @ transform[:3, :3].T + transform[:3, 3]
class PosePath(pd.DataFrame):
"""PosePath class representing a list of poses at given timestamps, extending pandas DataFrame."""
XYZ = ["x", "y", "z"]
QUAT = ["qx", "qy", "qz", "qw"]
COLUMNS = XYZ + QUAT
def __init__(
self, data: Union[ArrayLike, pd.DataFrame], index: Optional[ArrayLike] = None
):
"""Initializes the PosePath object.
Args:
data (Union[ArrayLike, pd.DataFrame]): An array or DataFrame of poses with shape (N, 7), where N is the number of poses.
index (ArrayLike, optional): An array-like object representing the index for the PosePath DataFrame. Defaults to None.
"""
super().__init__(data=data, index=index, columns=PosePath.COLUMNS, dtype=float)
def copy(self) -> PosePath:
"""Creates a copy of the current PosePath object.
Returns:
PosePath: A new PosePath object with copied data and index.
"""
return PosePath(self.values.copy(), index=self.index)
@classmethod
def from_rt(
cls,
rotation: Rotation,
translation: ArrayLike,
index: Optional[ArrayLike] = None,
) -> PosePath:
"""Creates a PosePath object from rotations and translations.
Args:
rotation (Rotation): A Rotation object representing the rotations.
translation (ArrayLike): An array-like object of translations with shape (N, 3), where N is the number of translations.
index (ArrayLike, optional): An array-like object representing the index for the PosePath DataFrame. Defaults to None.
Returns:
PosePath: A PosePath object with the given rotation and translation.
"""
positions = np.asarray(translation).reshape((-1, 3))
headings = rotation.as_quat().reshape((-1, 4))
assert len(headings) == len(positions)
return PosePath(np.hstack([positions, headings]), index=index)
@classmethod
def from_csv(cls, file: str) -> PosePath:
"""Creates a PosePath object from a CSV file.
Args:
file (str): The path to the CSV file.
Returns:
PosePath: A PosePath object with data read from the CSV file.
"""
return PosePath(pd.read_csv(file, index_col=0))
@classmethod
def identity(cls, n: int = 1, index: Optional[ArrayLike] = None) -> PosePath:
"""Create a PosePath object with identity poses.
Args:
n (int, optional): The number of identity poses. Defaults to 1.
index (ArrayLike, optional): An array-like object representing the index for the PosePath DataFrame. Defaults to None.
Returns:
PosePath: A PosePath object with identity poses.
"""
return PosePath(np.tile(IDENTITY, n).reshape((n, 7)), index=index)
@classmethod
def from_matrix(
cls, matrix: ArrayLike, index: Optional[ArrayLike] = None
) -> PosePath:
"""Creates a PosePath object from transformation matrices.
Args:
matrix (ArrayLike): A 3D array-like object of transformation matrices with shape (N, 4, 4), where N is the number of matrices.
index (ArrayLike, optional): An array-like object representing the index for the PosePath DataFrame. Defaults to None.
Returns:
PosePath: A PosePath object with poses represented by the given transformation matrices.
"""
matrix = np.asarray(matrix).reshape((-1, 4, 4))
return PosePath.from_rt(
Rotation.from_matrix(matrix[:, :3, :3]), matrix[:, :3, 3], index=index
)
def as_matrix(self) -> npt.NDArray:
"""Convert the PosePath object to transformation matrices.
Returns:
npt.NDArray: A 3D array of transformation matrices with shape (N, 4, 4), where N is the number of poses.
"""
matrix = np.tile(np.eye(4), (len(self), 1, 1))
matrix[:, :3, :3] = Rotation.from_quat(self.headings).as_matrix()
matrix[:, :3, 3] = self.positions
return matrix
@classmethod
def from_euler(
self, seq: str, angles: ArrayLike, degrees: bool = False
) -> PosePath:
"""Creates a PosePath object from Euler angles.
Args:
seq (str): The Euler sequence of axes, such as 'xyz', 'zyx', etc.
angles (ArrayLike): An array-like object of Euler angles with shape (N, len(seq)), where N is the number of poses.
degrees (bool, optional): If True, angles are in degrees. Defaults to False.
Returns:
PosePath: A PosePath object with poses represented by the given Euler angles.
"""
angles = np.asarray(angles).reshape((-1, len(seq)))
path = PosePath.identity(n=len(angles))
path.headings = Rotation.from_euler(seq, angles, degrees).as_quat()
return path
def as_euler(self, seq: str, degrees: bool = False) -> npt.NDArray:
"""Converts the PosePath object to Euler angles.
Args:
seq (str): The Euler sequence of axes, such as 'xyz', 'zyx', etc.
degrees (bool, optional): If True, angles are in degrees. Defaults to False.
Returns:
np.ndarray: An array of Euler angles with shape (N, len(seq)), where N is the number of poses.
"""
return Rotation.from_quat(self.headings).as_euler(seq, degrees=degrees)
@classmethod
def from_positions(cls, positions: ArrayLike) -> PosePath:
"""Creates a PosePath object with given positions.
Args:
positions (ArrayLike): An array-like object of positions with shape (N, 3), where N is the number of positions.
Returns:
PosePath: A PosePath object with poses represented by the given positions and identity orientations.
"""
positions = np.asarray(positions).reshape((-1, 3))
path = PosePath.identity(len(positions))
path.positions = positions
return path
@property
def positions(self) -> npt.NDArray:
"""Gets the positions of the poses.
Returns:
npt.NDArray: An array of positions with shape (N, 3), where N is the number of poses.
"""
return self.values[:, 0:3]
@positions.setter
def positions(self, values: ArrayLike) -> None:
"""Set the positions of the poses.
Args:
values (ArrayLike): Anarray-like object of positions with shape (N, 3), where N is the number of positions.
"""
self.values[:, 0:3] = np.asarray(values).reshape((-1, 3))
@property
def headings(self) -> npt.NDArray:
"""Gets the orientations (headings) of the poses in quaternions.
Returns:
npt.NDArray: An array of quaternions with shape (N, 4), where N is the number of poses.
"""
return self.values[:, 3:7]
@headings.setter
def headings(self, values: ArrayLike) -> None:
"""Sets the orientations (headings) of the poses in quaternions.
Args:
values (ArrayLike): An array-like object of quaternions with shape (N, 4), where N is the number of orientations.
"""
self.values[:, 3:7] = np.asarray(values).reshape((-1, 4))
# Operations
def interpolate(self, index: ArrayLike, fill_value="nearest") -> PosePath:
"""Interpolate the PosePath object at the given index.
Args:
index (ArrayLike): An array-like object representing the index for the interpolated poses.
fill_value (optional): The fill value for out-of-bounds data. Defaults to np.nan.
Returns:
PosePath: A PosePath object with poses interpolated at the given index.
"""
if np.array_equal(self.index, index):
return self.copy()
if len(self.index) == 1:
return PosePath(self.take([0] * len(index)).values, index=index)
x = self.index
y = np.hstack([self.positions, self.as_euler("zyx")])
if fill_value == "nearest":
fill_value = (y[0], y[-1])
if fill_value == "identity":
fill_value = np.zeros(6)
interpolator = interp1d(
x=x, y=y, bounds_error=False, fill_value=fill_value, axis=0
)
values = interpolator(index)
result = PosePath.from_rt(
Rotation.from_euler("zyx", values[:, 3:]), values[:, :3], index=index
)
return result
def invert(self) -> PosePath:
"""Creates a new PosePath instance with inverted poses.
Returns:
PosePath: A PosePath object with inverted poses.
"""
inv_rotations = Rotation.from_quat(self.headings).inv()
inv_positions = -inv_rotations.apply(self.positions)
return PosePath.from_rt(inv_rotations, inv_positions, index=self.index)
def __matmul__(self, other: Union[PosePath, ArrayLike]) -> PosePath:
"""Matrix multiplication of the PosePath object with another PosePath object or a transformation matrix.
Args:
other (Union['PosePath', ArrayLike]): Another PosePath object or a transformation matrix/array.
Returns:
PosePath: A PosePath object with poses resulting from the matrix multiplication.
"""
if isinstance(other, PosePath):
resampled = other.interpolate(self.index)
return PosePath.from_matrix(
self.as_matrix() @ resampled.as_matrix(), index=self.index
)
if isinstance(other, np.ndarray):
return PosePath.from_matrix(self.as_matrix() @ other, index=self.index)
def __rmatmul__(self, other: Union[PosePath, ArrayLike]) -> PosePath:
"""Right matrix multiplication of the PosePath object with another PosePath object or a transformation matrix.
Args:
other (Union['PosePath', ArrayLike]): Another PosePath object or a transformation matrix/array.
Returns:
PosePath: A PosePath object with poses resulting from the matrix multiplication.
"""
if isinstance(other, PosePath):
resampled = other.interpolate(self.index)
return PosePath.from_matrix(
resampled.as_matrix() @ self.as_matrix(), index=self.index
)
if isinstance(other, np.ndarray):
return PosePath.from_matrix(other @ self.as_matrix(), index=self.index)
@classmethod
def multiply(cls, paths: Iterator[PosePath]) -> PosePath:
"""Composes multiple PosePath objects.
Args:
paths (Iterator['PosePath']): An iterator of PosePath objects.
Returns:
PosePath: A PosePath object with poses resulting from the matrix multiplication of the given PosePath objects.
"""
return reduce(cls.__rmatmul__, paths)
def transform_points(self, points: ArrayLike) -> npt.NDArray:
"""Transform points using the poses in the PosePath object.
This method takes an array-like object of points and transforms them using the poses in the PosePath object.
The input points must have a shape of (N, 3), where N is the number of points. The output is an array of
transformed points with the same shape as the input points.
The following scenarios are considered:
- If the PosePath object has only one pose, this method applies the transformation to all points.
- If the input points have only one point, this method applies all poses to transform that single point.
- If the number of input points is equal to the number of poses in the PosePath object, this method applies
each pose to transform the corresponding point (i.e., pose i transforms point i).
Args:
points (ArrayLike): An array-like object of points with shape (N, 3), where N is the number of points.
Returns:
npt.NDArray: An array of transformed points with shape (N, 3), where N is the number of points.
Raises:
ValueError: If the number of input points and poses do not satisfy the mentioned conditions.
"""
points = np.asarray(points).reshape((-1, 3))
if len(self) == 1:
transform = self[:1].as_matrix()[0]
return transform_points(points, transform)
if len(points) == 1:
return np.array([transform_points(points[0], t) for t in self.as_matrix()])
if len(points) == len(self):
return np.array(
[transform_points(points[i], t) for i, t in enumerate(self.as_matrix())]
)
raise ValueError(
"Expected equal numbers of poses and points, or a single pose, or a single point."
)
def apply_interpolated_transform_to_points(
self, points: ArrayLike, timestamps: ArrayLike, resolution: float = 1e6
) -> npt.NDArray:
"""
Applies interpolated transformations from the PosePath to the given points based on their corresponding timestamps and the specified resolution.
This method groups points that have timestamps closer than the provided resolution value and applies the same transformation
to each point within the group, improving performance by reducing the number of separate interpolations and transformations required.
Parameters
----------
points : ArrayLike
An array-like object containing the 3D points to be transformed, with shape (N, 3), where N is the number of points.
timestamps : ArrayLike
An array-like object containing the timestamps corresponding to each point in the points array, with shape (N,).
resolution : float, optional, default: 1e6
The time resolution for grouping points. Points with timestamps closer than this value will receive the same transform.
Returns
-------
npt.NDArray
A numpy array containing the transformed points, with shape (N, 3).
"""
points = np.asarray(points).reshape((-1, 3))
# Create intervals and groups
timestamps = pd.Series(timestamps)
intervals = (timestamps / resolution).astype(int)
groups = timestamps.groupby(intervals)
# Get groups timestamps and interpolate poses
interval_timestamps = groups.mean()
# Resample poses to generate the transforms
transforms = self.interpolate(interval_timestamps).as_matrix()
transformed_points = np.vstack(
[
transform_points(points[group.index], transforms[index])
for index, [_, group] in enumerate(groups)
]
)
return transformed_points
def make_relative(self) -> PosePath:
"""Creates a new PosePath object with poses relative to the first pose.
Returns:
PosePath: A new PosePath object with poses relative to the first pose in the original PosePath object.
"""
inv_first = self[:1].invert().as_matrix()[0]
return inv_first @ self
def __getitem__(self, key):
result = super().__getitem__(key)
if isinstance(result, pd.Series):
return result
elif isinstance(result, pd.DataFrame) and np.array_equal(
result.columns, PosePath.COLUMNS
):
return PosePath(result)
return result
|
/scale_sensor_fusion_io-0.3.2-py3-none-any.whl/scale_sensor_fusion_io/models/paths/pose_path.py
| 0.977252 | 0.681283 |
pose_path.py
|
pypi
|
from dataclasses import dataclass
from typing import List, Literal, Optional, Union
from enum import Enum
import scale_sensor_fusion_io.spec.sfs as SFS
SensorID = SFS.SensorID
AnnotationID = SFS.AnnotationID
PosePath = SFS.PosePath
PointsSensorPoints = SFS.PointsSensorPoints
PointsSensor = SFS.PointsSensor
LidarSensorPoints = SFS.LidarSensorPoints
LidarSensorFrame = SFS.LidarSensorFrame
LidarSensor = SFS.LidarSensor
RadarSensorPoints = SFS.RadarSensorPoints
RadarSensorFrame = SFS.RadarSensorFrame
RadarSensor = SFS.RadarSensor
DistortionModel = SFS.DistortionModel
CameraDistortion = SFS.CameraDistortion
CameraIntrinsics = SFS.CameraIntrinsics
CameraSensorVideo = SFS.CameraSensorVideo
CameraSensorImage = SFS.CameraSensorImage
CameraSensor = SFS.CameraSensor
OdometrySensor = SFS.OdometrySensor
AttributePath = SFS.AttributePath
CuboidPath = SFS.CuboidPath
CuboidActivationPath = SFS.CuboidActivationPath
CuboidProjectionPath = SFS.CuboidProjectionPath
@dataclass
class StaticPath:
timestamps: List[int]
values: List[bool]
@dataclass
class CuboidMetadata:
static_path: StaticPath
@dataclass
class _CuboidAnnotationBase:
cuboid_metadata: CuboidMetadata
@dataclass
class CuboidAnnotation(SFS.CuboidAnnotation, _CuboidAnnotationBase):
pass
AttributesAnnotation = SFS.AttributesAnnotation
AnnotationPath = SFS.AnnotationPath
Box2DAnnotation = SFS.Box2DAnnotation
Polygon2DAnnotation = SFS.Polygon2DAnnotation
Point2DAnnotation = SFS.Point2DAnnotation
PointAnnotation = SFS.PointAnnotation
PolylineAnnotation = SFS.PolylineAnnotation
Polyline2DAnnotation = SFS.Polyline2DAnnotation
EventAnnotation = SFS.EventAnnotation
LabeledPoint = SFS.LabeledPoint
LabeledPointsAnnotation = SFS.LabeledPointsAnnotation
@dataclass
class LinkMetadata:
anchored: Optional[bool] = False
## NOTE: This is implemented this way to be able to inherit from the SFS dataclasses, which contain defaults
@dataclass
class _LinkAnnotationBase:
metadata: LinkMetadata
@dataclass
class LinkAnnotation(SFS.LinkAnnotation, _LinkAnnotationBase):
pass
class CuboidLayerMode(Enum):
Position = "position"
PositionRotation = "position-rotation"
ZLevel = "z-level"
XY = "XY"
ICP = "icp"
@dataclass
class LocalizationAdjustmentLayerMetadata:
layer_type: Literal["base", "cuboid"]
order: int
name: str
cuboids: Optional[List[CuboidPath]] = None
algorithm: Optional[CuboidLayerMode] = None
## NOTE: This is implemented this way to be able to inherit from the SFS dataclasses, which contain defaults
@dataclass
class _LocalizationAdjustmentAnnotationBase:
layer_metadata: LocalizationAdjustmentLayerMetadata
@dataclass
class LocalizationAdjustmentAnnotation(
SFS.LocalizationAdjustmentAnnotation, _LocalizationAdjustmentAnnotationBase
):
pass
ObjectAnnotation = SFS.ObjectAnnotation
Sensor = Union[CameraSensor, LidarSensor, RadarSensor, OdometrySensor, PointsSensor]
Annotation = Union[
CuboidAnnotation,
AttributesAnnotation,
Box2DAnnotation,
Point2DAnnotation,
Polyline2DAnnotation,
Polygon2DAnnotation,
PolylineAnnotation,
PointAnnotation,
LinkAnnotation,
LabeledPointsAnnotation,
LocalizationAdjustmentAnnotation,
ObjectAnnotation,
]
@dataclass
class Scene:
version: Literal["5.1"] = "5.1"
sensors: Optional[List[Sensor]] = None
annotations: Optional[List[Annotation]] = None
attributes: Optional[List[AttributePath]] = None
time_offset: Optional[int] = None
time_unit: Optional[Literal["microseconds", "nanoseconds"]] = "microseconds"
|
/scale_sensor_fusion_io-0.3.2-py3-none-any.whl/scale_sensor_fusion_io/spec/v5/types.py
| 0.917834 | 0.382545 |
types.py
|
pypi
|
from dataclasses import dataclass
from typing import List, Literal, Optional, Union
import numpy as np
import numpy.typing as npt
SensorID = Union[str, int]
AnnotationID = Union[str, int]
# Define PosePath dataclass
@dataclass
class PosePath:
timestamps: List[int]
values: List[List[float]] # x y z qx qy qz qw
# Define PointsSensorPoints dataclass
@dataclass
class PointsSensorPoints:
positions: npt.NDArray[np.float32]
colors: Optional[npt.NDArray[np.uint8]]
# Define PointsSensor dataclass
@dataclass
class PointsSensor:
id: SensorID
points: PointsSensorPoints
type: Literal["points"] = "points"
parent_id: Optional[SensorID] = None
# Define LidarSensorPoints dataclass
@dataclass
class LidarSensorPoints:
positions: npt.NDArray[np.float32]
colors: Optional[npt.NDArray[np.uint8]] = None
intensities: Optional[npt.NDArray[np.uint8]] = None
timestamps: Optional[Union[npt.NDArray[np.uint32], npt.NDArray[np.uint64]]] = None
# Define LidarSensorFrame dataclass
@dataclass
class LidarSensorFrame:
timestamp: int
points: LidarSensorPoints
# Define LidarSensor dataclass
@dataclass
class LidarSensor:
id: SensorID
poses: PosePath
frames: List[LidarSensorFrame]
parent_id: Optional[SensorID] = None
coordinates: Literal["ego", "world"] = "world"
type: Literal["lidar"] = "lidar"
# Define RadarSensorPoints dataclass
@dataclass
class RadarSensorPoints:
positions: npt.NDArray[np.float32]
directions: Optional[npt.NDArray[np.float32]] = None
lengths: Optional[npt.NDArray[np.float32]] = None
timestamps: Optional[Union[npt.NDArray[np.uint32], npt.NDArray[np.uint64]]] = None
# Define RadarSensorFrame dataclass
@dataclass
class RadarSensorFrame:
timestamp: int
points: RadarSensorPoints
# Define RadarSensor dataclass
@dataclass
class RadarSensor:
id: SensorID
poses: PosePath
frames: List[RadarSensorFrame]
type: Literal["radar"] = "radar"
coordinates: Literal["ego", "world"] = "world"
parent_id: Optional[SensorID] = None
# Define DistortionModel enum
DistortionModel = Literal[
"brown_conrady",
"mod_equi_fish",
"mod_kannala",
"fisheye",
"fisheye_rad_tan_prism",
"cylindrical",
"omnidirectional",
"fisheye_radial_custom",
]
# Define CameraDistortion dataclass
@dataclass
class CameraDistortion:
model: DistortionModel
params: List[float]
# Define CameraIntrinsics dataclass
@dataclass
class CameraIntrinsics:
fx: float
fy: float
cx: float
cy: float
width: int
height: int
distortion: Optional[CameraDistortion]
# Define CameraSensorContent dataclass
@dataclass
class CameraSensorVideo:
timestamps: List[int]
content: npt.NDArray[np.uint8]
fps: float
# Define CameraSensorImages dataclass
@dataclass
class CameraSensorImage:
timestamp: int
content: npt.NDArray[np.uint8]
# Define CameraSensor dataclass
@dataclass
class CameraSensor:
id: SensorID
poses: PosePath
intrinsics: CameraIntrinsics
video: Optional[CameraSensorVideo] = None
images: Optional[List[CameraSensorImage]] = None
type: Literal["camera"] = "camera"
parent_id: Optional[SensorID] = None
# Define OdometrySensor dataclass
@dataclass
class OdometrySensor:
id: SensorID
poses: PosePath
type: Literal["odometry"] = "odometry"
parent_id: Optional[SensorID] = None
# Define AttributePath dataclass
@dataclass
class AttributePath:
name: str
timestamps: List[int]
values: List[Union[str, int, List[str]]]
sensor_id: Optional[SensorID] = None
static: bool = False
# Define CuboidPath dataclass
@dataclass
class CuboidPath:
timestamps: List[int]
values: List[List[float]] # x y z yaw pitch roll dx dy dz
# Define CuboidActivationPath dataclass
@dataclass
class CuboidActivationPath:
sensor_id: SensorID
timestamps: List[int]
durations: List[float]
cuboids: Optional[List[List[float]]] # x y z yaw pitch roll dx dy dz
# Define CuboidProjectionPath dataclass
@dataclass
class CuboidProjectionPath:
sensor_id: SensorID # 2d sensor like cameras
timestamps: List[int]
boxes: List[List[float]] # x y width height
cuboids: Optional[List[List[float]]] # dx dy dz px py pz roll pitch yaw
# Define CuboidAnnotation dataclass
@dataclass
class CuboidAnnotation:
id: AnnotationID
path: CuboidPath
label: Optional[str] = None
stationary: Optional[bool] = False
type: Literal["cuboid"] = "cuboid"
attributes: Optional[List[AttributePath]] = None
sensor_attributes: Optional[List[AttributePath]] = None
sensor_activations: Optional[List[CuboidActivationPath]] = None
sensor_projections: Optional[List[CuboidProjectionPath]] = None
parent_id: Optional[AnnotationID] = None
# Define AttributesAnnotation dataclass
@dataclass
class AttributesAnnotation:
id: AnnotationID
type: Literal["attributes"] = "attributes"
parent_id: Optional[AnnotationID] = None
attributes: Optional[List[AttributePath]] = None
sensor_attributes: Optional[List[AttributePath]] = None
@dataclass
class AnnotationPath:
timestamps: List[int]
values: List[List[float]]
@dataclass
class Point2DAnnotation:
id: AnnotationID
sensor_id: SensorID
path: AnnotationPath # values: [x, y]
type: Literal["point_2d"] = "point_2d"
parent_id: Optional[AnnotationID] = None
stationary: Optional[bool] = False
label: Optional[str] = None
attributes: Optional[List[AttributePath]] = None
@dataclass
class Box2DAnnotation:
id: AnnotationID
sensor_id: SensorID
path: AnnotationPath # values: [left, top, width, height]
type: Literal["box_2d"] = "box_2d"
parent_id: Optional[AnnotationID] = None
stationary: Optional[bool] = False
label: Optional[str] = None
attributes: Optional[List[AttributePath]] = None
class Polyline2DAnnotation:
id: AnnotationID
sensor_id: SensorID
path: AnnotationPath # x_0, y_0, x_1, y_1, ..., x_n, y_n
type: Literal["polyline_2d"] = "polyline_2d"
parent_id: Optional[AnnotationID] = None
stationary: Optional[bool] = False
label: Optional[str] = None
attributes: Optional[List[AttributePath]] = None
@dataclass
class Polygon2DAnnotation:
id: AnnotationID
sensor_id: SensorID
path: AnnotationPath # x_0, y_0, x_1, y_1, ..., x_n, y_n
type: Literal["polygon_2d"] = "polygon_2d"
parent_id: Optional[AnnotationID] = None
stationary: Optional[bool] = False
label: Optional[str] = None
attributes: Optional[List[AttributePath]] = None
@dataclass
class PolylineAnnotation:
id: AnnotationID
sensor_id: SensorID
path: AnnotationPath # x_0, y_0, z_0, x_1, y_1, z_1, ..., x_n, y_n, z_n
type: Literal["polyline"] = "polyline"
is_closed: Optional[bool] = None
parent_id: Optional[AnnotationID] = None
stationary: Optional[bool] = False
label: Optional[str] = None
attributes: Optional[List[AttributePath]] = None
@dataclass
class PointAnnotation:
id: AnnotationID
sensor_id: SensorID
path: AnnotationPath # x, y, z
type: Literal["point"] = "point"
parent_id: Optional[AnnotationID] = None
stationary: Optional[bool] = False
label: Optional[str] = None
attributes: Optional[List[AttributePath]] = None
@dataclass
class EventAnnotation:
id: AnnotationID
start: int
type: Literal["event"] = "event"
parent_id: Optional[AnnotationID] = None
label: Optional[str] = None
attributes: Optional[List[AttributePath]] = None
duration: Optional[int] = None
sensor_id: Optional[SensorID] = None
# Define LabeledPointsAnnotationLabeledPoint dataclass
@dataclass
class LabeledPoint:
sensor_id: SensorID
point_ids: npt.NDArray[np.uint32]
sensor_frame: Optional[int] = None
# Define LabeledPointsAnnotation dataclass
@dataclass
class LabeledPointsAnnotation:
id: AnnotationID
label: str
labeled_points: List[LabeledPoint]
is_instance: bool = False
type: Literal["labeled_points"] = "labeled_points"
parent_id: Optional[AnnotationID] = None
@dataclass
class LocalizationAdjustmentAnnotation:
id: AnnotationID
poses: PosePath
type: Literal["localization_adjustment"] = "localization_adjustment"
parent_id: Optional[AnnotationID] = None
@dataclass
class LinkAnnotation:
id: AnnotationID
sensor_id: SensorID
label: str
is_bidirectional: bool
from_id: AnnotationID
to_id: AnnotationID
type: Literal["link"] = "link"
parent_id: Optional[AnnotationID] = None
attributes: Optional[List[AttributePath]] = None
@dataclass
class ObjectAnnotation:
id: AnnotationID
type: Literal["object"] = "object"
parent_id: Optional[AnnotationID] = None
label: Optional[str] = None
attributes: Optional[List[AttributePath]] = None
Sensor = Union[CameraSensor, LidarSensor, RadarSensor, OdometrySensor, PointsSensor]
Annotation = Union[
CuboidAnnotation,
AttributesAnnotation,
Box2DAnnotation,
Point2DAnnotation,
Polyline2DAnnotation,
Polygon2DAnnotation,
Polyline2DAnnotation,
PolylineAnnotation,
PointAnnotation,
LinkAnnotation,
LabeledPointsAnnotation,
LocalizationAdjustmentAnnotation,
ObjectAnnotation,
]
# Define Scene dataclass
@dataclass
class Scene:
version: Literal["1.0"] = "1.0"
sensors: Optional[List[Sensor]] = None
annotations: Optional[List[Annotation]] = None
attributes: Optional[List[AttributePath]] = None
time_offset: Optional[int] = None
time_unit: Optional[Literal["microseconds", "nanoseconds"]] = "microseconds"
|
/scale_sensor_fusion_io-0.3.2-py3-none-any.whl/scale_sensor_fusion_io/spec/sfs/types.py
| 0.919661 | 0.507446 |
types.py
|
pypi
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Generic, List, Literal, Optional, Sequence, TypeVar, Union
import scale_sensor_fusion_io as sfio
from typing_extensions import TypeAlias
PathField: TypeAlias = Union[int, str]
PathInput: TypeAlias = Union[PathField, List[PathField]]
T = TypeVar("T")
@dataclass
class ParseSuccess(Generic[T]):
data: T
success: Literal[True] = True
@dataclass
class ErrorDetails:
path: List[PathField]
errors: List[str]
def __repr__(self) -> str:
return f"{{ path: '{'.'.join([str(p) for p in self.path])}', errors: {self.errors} }}"
@staticmethod
def from_msg(msg: str, path: Optional[PathInput] = None) -> ErrorDetails:
"""
Helper function to initiate a ErrorDetails from a single error message to reduce boilerplate
"""
return ErrorDetails(
path=(path if type(path) is list else [path]) if path else [], errors=[msg] # type: ignore
)
@staticmethod
def missing_field(field: str, path: Optional[PathInput] = None) -> ErrorDetails:
"""
Helper function to template out details for missing field
"""
return ErrorDetails(
path=(path if type(path) is list else [path]) if path else [], # type: ignore
errors=[f"Missing field: {field}"],
)
@dataclass
class DataValidationError(Exception):
details: List[ErrorDetails]
@staticmethod
def from_msg(msg: str, path: Optional[PathInput] = None) -> DataValidationError:
"""
Helper function to initiate a DataValidationError from a single error message to reduce boilerplate
"""
return DataValidationError(details=[ErrorDetails.from_msg(msg, path)])
def prepend_path(self, path: List[PathField]) -> DataValidationError:
"""Prepend path of error with additional prefix"""
for err in self.details:
err.path = path + err.path
return self
@dataclass
class ParseError(DataValidationError):
details: List[ErrorDetails]
success: Literal[False] = False
@staticmethod
def from_msg(msg: str, path: Optional[PathInput] = None) -> ParseError:
"""
Helper function to initiate a ParseError from a single error message to reduce boilerplate
"""
return ParseError(details=[ErrorDetails.from_msg(msg, path)])
@staticmethod
def missing_field(field: str, path: Optional[PathInput] = None) -> ParseError:
"""
Helper function to template out details for missing field
"""
return ParseError(details=[ErrorDetails.missing_field(field, path)])
ParseResult: TypeAlias = Union[ParseSuccess[T], ParseError]
ValidationResult: TypeAlias = Optional[DataValidationError]
|
/scale_sensor_fusion_io-0.3.2-py3-none-any.whl/scale_sensor_fusion_io/validation/error.py
| 0.931275 | 0.324369 |
error.py
|
pypi
|
from dataclasses import InitVar
from typing import Type, Any, Optional, Union, Collection, TypeVar, Dict, Callable, Mapping, List, Tuple, get_type_hints
T = TypeVar("T", bound=Any)
def transform_value(
type_hooks: Mapping[Union[Type, object], Callable[[Any], Any]], cast: List[Type], target_type: Type, value: Any
) -> Any:
# Generic hook type match
if Any in type_hooks:
value = type_hooks[Any](value)
if is_generic_collection(target_type):
collection_type = extract_origin_type(target_type)
if collection_type and collection_type in type_hooks:
value = type_hooks[collection_type](value)
# Exact hook type match
if target_type in type_hooks:
value = type_hooks[target_type](value)
else:
# Cast to types in cast list
for cast_type in cast:
if is_subclass(target_type, cast_type):
if is_generic_collection(target_type):
origin_collection = extract_origin_collection(target_type)
if is_set(origin_collection):
return list(value)
value = origin_collection(value)
else:
value = target_type(value)
break
# Peel optional types
if is_optional(target_type):
if value is None:
return None
target_type = extract_optional(target_type)
return transform_value(type_hooks, cast, target_type, value)
# For collections (dict/list), transform each item
if is_generic_collection(target_type) and isinstance(value, extract_origin_collection(target_type)):
collection_cls = value.__class__
if issubclass(collection_cls, dict):
key_cls, item_cls = extract_generic(target_type, defaults=(Any, Any))
return collection_cls(
{
transform_value(type_hooks, cast, key_cls, key): transform_value(type_hooks, cast, item_cls, item)
for key, item in value.items()
}
)
item_cls = extract_generic(target_type, defaults=(Any,))[0]
return collection_cls(transform_value(type_hooks, cast, item_cls, item) for item in value)
return value
def get_data_class_hints(data_class: Type[T], globalns: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
type_hints = get_type_hints(data_class, globalns=globalns)
for attr, type_hint in type_hints.items():
if is_init_var(type_hint):
type_hints[attr] = extract_init_var(type_hint)
return type_hints
def extract_origin_collection(collection: Type) -> Type:
try:
return collection.__extra__
except AttributeError:
return collection.__origin__
def extract_origin_type(collection: Type) -> Optional[Type]:
collection_type = extract_origin_collection(collection)
if collection_type is list:
return List
elif collection_type is dict:
return Dict
return None
def is_optional(type_: Type) -> bool:
return is_union(type_) and type(None) in extract_generic(type_)
def extract_optional(optional: Type[Optional[T]]) -> T:
other_members = [member for member in extract_generic(optional) if member is not type(None)]
if other_members:
return Union[tuple(other_members)] # type: ignore
else:
raise ValueError("can not find not-none value")
def is_generic(type_: Type) -> bool:
return hasattr(type_, "__origin__")
def is_union(type_: Type) -> bool:
if is_generic(type_) and type_.__origin__ == Union:
return True
try:
from types import UnionType # type: ignore
return isinstance(type_, UnionType)
except ImportError:
return False
def is_tuple(type_: Type) -> bool:
return is_subclass(type_, tuple)
def is_literal(type_: Type) -> bool:
try:
from typing import Literal # type: ignore
return is_generic(type_) and type_.__origin__ == Literal
except ImportError:
return False
def is_new_type(type_: Type) -> bool:
return hasattr(type_, "__supertype__")
def extract_new_type(type_: Type) -> Type:
return type_.__supertype__
def is_init_var(type_: Type) -> bool:
return isinstance(type_, InitVar) or type_ is InitVar
def is_set(type_: Type) -> bool:
return type_ in (set, frozenset) or isinstance(type_, (frozenset, set))
def extract_init_var(type_: Type) -> Union[Type, Any]:
try:
return type_.type
except AttributeError:
return Any
def is_instance(value: Any, type_: Type) -> bool:
if type_ == Any:
return True
elif is_union(type_):
return any(is_instance(value, t) for t in extract_generic(type_))
elif is_generic_collection(type_):
origin = extract_origin_collection(type_)
if not isinstance(value, origin):
return False
if extract_generic_no_defaults(type_) is None:
return True
if isinstance(value, tuple) and is_tuple(type_):
tuple_types = extract_generic(type_)
if len(tuple_types) == 1 and tuple_types[0] == ():
return len(value) == 0
elif len(tuple_types) == 2 and tuple_types[1] is ...:
return all(is_instance(item, tuple_types[0]) for item in value)
else:
if len(tuple_types) != len(value):
return False
return all(is_instance(item, item_type) for item, item_type in zip(value, tuple_types))
if isinstance(value, Mapping):
key_type, val_type = extract_generic(type_, defaults=(Any, Any))
for key, val in value.items():
if not is_instance(key, key_type) or not is_instance(val, val_type):
return False
return True
return all(is_instance(item, extract_generic(type_, defaults=(Any,))[0]) for item in value)
elif is_new_type(type_):
return is_instance(value, extract_new_type(type_))
elif is_literal(type_):
return value in extract_generic(type_)
elif is_init_var(type_):
return is_instance(value, extract_init_var(type_))
elif is_type_generic(type_):
return is_subclass(value, extract_generic(type_)[0])
elif is_generic(type_):
origin = extract_origin_collection(type_)
return isinstance(value, origin)
else:
try:
# As described in PEP 484 - section: "The numeric tower"
if isinstance(value, (int, float)) and type_ in [float, complex]:
return True
return isinstance(value, type_)
except TypeError:
return False
def is_generic_collection(type_: Type) -> bool:
if not is_generic(type_):
return False
origin = extract_origin_collection(type_)
try:
return bool(origin and issubclass(origin, Collection) and not skip_generic_conversion(origin))
except (TypeError, AttributeError):
return False
def skip_generic_conversion(origin: Type) -> bool:
return origin.__module__ == "numpy" and origin.__qualname__ == "ndarray"
def extract_generic(type_: Type, defaults: Tuple = ()) -> tuple:
try:
if hasattr(type_, "_special") and type_._special:
return defaults
return type_.__args__ or defaults
except AttributeError:
return defaults
def extract_generic_no_defaults(type_: Type) -> Union[tuple, None]:
try:
if hasattr(type_, "_special") and type_._special:
return None
return type_.__args__
except AttributeError:
return None
def is_subclass(sub_type: Type, base_type: Type) -> bool:
if is_generic_collection(sub_type):
sub_type = extract_origin_collection(sub_type)
try:
return issubclass(sub_type, base_type)
except TypeError:
return False
def is_type_generic(type_: Type) -> bool:
try:
return type_.__origin__ in (type, Type)
except AttributeError:
return False
|
/scale_sensor_fusion_io-0.3.2-py3-none-any.whl/scale_sensor_fusion_io/validation/dacite_internal/types.py
| 0.741861 | 0.268851 |
types.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.