file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
02.代码实现-06Tensorflow-01Cifar10-01基本网络.py
|
100
线性全连接层:神经元个数10
softmax层
'''
import tensorflow as tf
import os
import cifar_input,cifar_toTFRecords
import numpy as np
import csv
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
learning_rate_init = 0.001
training_epochs = 1
batch_size = 100
display_step = 10
dataset_dir = '../Total_Data/TempData/'
num_examples_per_epoch_for_train = cifar_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN # 50000
num_examples_per_epoch_for_eval = cifar_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
image_size = cifar_input.IMAGE_SIZE
image_channel = 3
n_classes = cifar_input.NUM_CLASSES_CIFAR10
conv1_kernel_num = 32
conv2_kernel_num = 32
fc1_units_num = 192
fc2_units_num = 96
def WeightsVariable(shape, name_str, stddev = 0.1):
initial = tf.truncated_normal(shape=shape, stddev=stddev, dtype=tf.float32)
return tf.Variable(initial_value=initial, dtype=tf.float32, name=name_str)
def BiasesVariable(shape, name_str, init_value):
initial = tf.constant(init_value, shape=shape)
return tf.Variable(initial_value=initial, dtype=tf.float32, name = name_str)
# 卷积层不做降采样
def Conv2d(x, W, b, stride=1, padding='SAME', activation=tf.nn.relu, act_name='relu'):
with tf.name_scope('conv2d_bias'):
y = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
y = tf.nn.bias_add(y, b)
with tf.name_scope(act_name):
y = activation(y)
return y
def Pool2d(x, pool = tf.nn.max_pool, k =2, stride=2, padding='SAME'):
return pool(x, ksize=[1, k, k, 1], strides=[1, stride, stride, 1], padding=padding)
def FullyConnected(x, W, b, activate=tf.nn.relu, act_name='relu'):
with tf.name_scope('Wx_b'):
y = tf.matmul(x, W)
y = tf.add(y, b)
with tf.name_scope(act_name):
y = activate(y)
return y
def Inference(images_holder):
with tf.name_scope('Conv2d_1'): # 卷积层1
weights = WeightsVariable(shape=[5, 5, image_channel, conv1_kernel_num], name_str='weights', stddev=5e-2)
biases = BiasesVariable(shape=[conv1_kernel_num], name_str='biases', init_value=0.0)
conv1_out = Conv2d(images_holder, weights, biases, stride=1, padding='SAME')
with tf.name_scope('Pool2d_1'): #池化层1
|
l1_out = Pool2d(conv1_out, pool=tf.nn.max_pool, k=3, stride=2, padding='SAME')
with tf.name_scope('Conv2d_2'): # 卷积层2
weights = WeightsVariable(shape=[5, 5, conv1_kernel_num, conv2_kernel_num], name_str='weights', stddev=5e-2)
biases = BiasesVariable(shape=[conv2_kernel_num], name_str='biases', init_value=0.0)
conv2_out = Conv2d(pool1_out, weights, biases, stride=1, padding='SAME')
with tf.name_scope('Pool2d_2'): #池化层2
pool2_out = Pool2d(conv2_out, pool=tf.nn.max_pool, k=3, stride=2, padding='SAME') #6 * 6 * 64
with tf.name_scope('FeatsReshape'): #将二维特征图变为一维特征向量,得到的是conv1_kernel_num个特征图,每个特征图是12*12的
features = tf.reshape(pool2_out, [batch_size, -1]) # [batch_size, 2304] 2304 = 6 * 6 * 64
feats_dim = features.get_shape()[1].value
with tf.name_scope('FC1_nonlinear'): #非线性全连接层1
weights = WeightsVariable(shape=[feats_dim, fc1_units_num], name_str='weights', stddev=4e-2)
biases = BiasesVariable(shape=[fc1_units_num], name_str='biases', init_value=0.1)
fc1_out = FullyConnected(features, weights, biases,
activate=tf.nn.relu, act_name='relu')
with tf.name_scope('FC2_nonlinear'): #非线性全连接层2
weights = WeightsVariable(shape=[fc1_units_num, fc2_units_num], name_str='weights', stddev=4e-2)
biases = BiasesVariable(shape=[fc2_units_num], name_str='biases', init_value=0.1)
fc2_out = FullyConnected(fc1_out, weights, biases,
activate=tf.nn.relu, act_name='relu')
with tf.name_scope('FC2_linear'): #线性全连接层
weights = WeightsVariable(shape=[fc2_units_num, n_classes], name_str='weights', stddev=1.0 / fc2_units_num)
biases = BiasesVariable(shape=[n_classes], name_str='biases', init_value=0.0)
logits = FullyConnected(fc2_out, weights, biases,
activate=tf.identity, act_name='linear')
return logits
'''
返回的images是[batch_size, IMAGE_SIZE, IMAGE_SIZE, 3]
返回的labels不是one-hot编码的,因为它返回的是[batch_size],而不是[batch_size, n_classes]
'''
def get_distored_train_batch(data_dir, batch_size):
if not data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
# images, labels = cifar_input.distorted_inputs(cifar10or20or100=10, data_dir=data_dir, batch_size=batch_size)
images, labels = cifar_toTFRecords.readFromTFRecords(
'../Total_Data/TempData/cifar-10-batches-tfrecords/train_package.tfrecords', batch_size=batch_size,
img_shape=[32,32,3])
return images, labels
'''
获取评估测试集
'''
def get_undistored_eval_batch(eval_data, data_dir, batch_size):
if not data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
# images, labels = cifar_input.inputs(cifar10or20or100=10, eval_data=eval_data, data_dir=data_dir, batch_size=batch_size)
images, labels = cifar_toTFRecords.readFromTFRecords(
'../Total_Data/TempData/cifar-10-batches-tfrecords/test_package.tfrecords', batch_size=batch_size,
img_shape=[32,32,3])
return images, labels
if __name__ == '__main__':
# cifar_input.maybe_download_and_extract('../Total_Data/TempData', cifar_input.CIFAR10_DATA_URL)
with tf.Graph().as_default():
# 输入
with tf.name_scope('Inputs'):
images_holder = tf.placeholder(tf.float32, [batch_size, image_size, image_size, image_channel],
name='images')
labels_holder = tf.placeholder(tf.int32, [batch_size], name='labels')# 0 ~ 9的数字
#前向推断
with tf.name_scope('Inference'):
logits = Inference(images_holder)
#定义损失层
with tf.name_scope('Loss'):
# 因为cifar10不是one-hot编码的,所以不能使用softmax,而sparse内部会进行one-hot编码
labels = tf.cast( labels_holder, tf.int64 )
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy)
total_loss = cross_entropy_mean
#定义优化训练层
with tf.name_scope('Train'):
learning_rate = tf.placeholder(tf.float32)
global_step = tf.Variable(0, name='global_step', trainable=False, dtype=tf.int64)
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
#定义模型评估层
with tf.name_scope('Evaluate'):
top_K_op = tf.nn.in_top_k(predictions=logits, targets=labels_holder, k = 1)
with tf.name_scope('GetTrainBatch'):
images_train, labels_train = get_distored_train_batch(data_dir=dataset_dir, batch_size=batch_size)
with tf.name_scope('GetTestBatch'):
images_test, labels_test = get_undistored_eval_batch(eval_data=True, data_dir=dataset_dir,
batch_size=batch_size)
init
|
poo
|
identifier_name
|
enel645_group11_final_project.py
|
_classes):
self.input_paths = in_paths
self.target_paths = out_paths
self.img_size = img_size
self.n_channels = n_channels
self.n_classes = n_classes
def
|
(self):
return len(self.target_paths)
def __getitem__(self, idx):
'''Returns tuple (input, target) correspond to batch #idx.'''
i = idx
path = self.input_paths[i]
target_path = self.target_paths[i]
img = tf.keras.preprocessing.image.load_img(path, color_mode='rgb', target_size=self.img_size)
img = tf.keras.preprocessing.image.img_to_array(img)
img /= 255.0
mask = tf.keras.preprocessing.image.load_img(target_path, color_mode='rgb', target_size=self.img_size)
mask = tf.keras.preprocessing.image.img_to_array(mask)
# replace colors with corresponding labels
result = np.ndarray(shape=mask.shape[:2], dtype='float32')
result[:, :] = -1
for rgb, idx in mapping.items():
result[(mask == rgb).all(2)] = idx
result[(mask == (0, 0, 0)).all(2)] = 1
if(result.min() == -1):
colors = set(tuple(v) for m2d in mask for v in m2d)
# document incorrect mapping
print('\nincorrect mapping')
print(colors)
# One-hot encoded representation
result = tf.keras.utils.to_categorical(result, self.n_classes)
return img, result
# Split dataset into train/validation/test
val_samples = int(0.15 * len(input_paths))
test_samples = int(0.05 * len(input_paths))
train_samples = len(input_paths) - val_samples - test_samples
train_input_paths = input_paths[:train_samples]
train_target_paths = target_paths[:train_samples]
val_input_paths = input_paths[train_samples:train_samples + val_samples]
val_target_paths = target_paths[train_samples:train_samples + val_samples]
test_input_paths = input_paths[train_samples +
val_samples: train_samples + val_samples + test_samples]
test_target_paths = target_paths[train_samples +
val_samples:train_samples + val_samples + test_samples]
train_gen = DataSequence(train_input_paths, train_target_paths)
val_gen = DataSequence(val_input_paths, val_target_paths)
test_gen = DataSequence(test_input_paths, test_target_paths)
print('simulation data train_samples', train_samples)
print('simulation data val_samples', val_samples)
print('simulation data test_samples', test_samples)
#%% Define the model
# weight of each class in the whole dataset
weights = np.array([1-0.008129217, 1-0.741364343, 1-0.038759669,
1-0.033972565, 1-0.159647414, 1-0.018480072])
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = tf.keras.backend.flatten(y_true)
y_pred_f = tf.keras.backend.flatten(y_pred)
intersection = tf.keras.backend.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_f) + tf.keras.backend.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred)
ALPHA = 0.8
GAMMA = 2
def FocalLoss(targets, inputs, alpha=ALPHA, gamma=GAMMA):
inputs = tf.keras.backend.flatten(inputs)
targets = tf.keras.backend.flatten(targets)
BCE = tf.keras.backend.binary_crossentropy(targets, inputs)
BCE_EXP = tf.keras.backend.exp(-BCE)
focal_loss = tf.keras.backend.mean(
alpha * tf.keras.backend.pow((1-BCE_EXP), gamma) * BCE)
return focal_loss
def jaccard_coef(y_true, y_pred, smooth=1):
intersection = tf.keras.backend.sum(
tf.keras.backend.abs(y_true * y_pred), axis=-1)
sum_ = tf.keras.backend.sum(tf.keras.backend.abs(
y_true) + tf.keras.backend.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return jac * smooth
def jaccard_loss(y_true, y_pred, smooth=1):
return (smooth - jaccard_coef(y_true, y_pred, smooth))
def weighted_dice_loss(y_true, y_pred):
smooth = 1.
w, m1, m2 = weights * weights, y_true, y_pred
intersection = (m1 * m2)
score = (2. * tf.reduce_sum(w * intersection) + smooth) / \
(tf.reduce_sum(w * m1) + tf.reduce_sum(w * m2) + smooth)
loss = 1. - tf.reduce_sum(score)
return loss
def weighted_dice_coef(y_true, y_pred):
smooth = 1.
w, m1, m2 = weights * weights, y_true, y_pred
intersection = (m1 * m2)
score = (2. * tf.reduce_sum(w * intersection) + smooth) / \
(tf.reduce_sum(w * m1) + tf.reduce_sum(w * m2) + smooth)
return tf.reduce_sum(score)
def get_unet_mod(num_classes=num_classes, img_size=IMG_SIZE, learning_rate=1e-3,
learning_decay=1e-6, drop_out=0.1, nchannels=N_CHANNELS, kshape=(3, 3),
base_trainable=True):
input_img = tf.keras.layers.Input(img_size + (nchannels, ))
conv1 = tf.keras.layers.Conv2D(
64, kshape, activation='relu', padding='same', trainable=base_trainable)(input_img)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
conv1 = tf.keras.layers.Conv2D(
64, kshape, activation='relu', padding='same', trainable=base_trainable)(conv1)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
pool1 = tf.keras.layers.Dropout(drop_out)(pool1)
conv2 = tf.keras.layers.Conv2D(
128, kshape, activation='relu', padding='same', trainable=base_trainable)(pool1)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
conv2 = tf.keras.layers.Conv2D(
128, kshape, activation='relu', padding='same', trainable=base_trainable)(conv2)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
pool2 = tf.keras.layers.Dropout(drop_out)(pool2)
conv3 = tf.keras.layers.Conv2D(
256, kshape, activation='relu', padding='same', trainable=base_trainable)(pool2)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
conv3 = tf.keras.layers.Conv2D(
256, kshape, activation='relu', padding='same', trainable=base_trainable)(conv3)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
pool3 = tf.keras.layers.Dropout(drop_out)(pool3)
conv4 = tf.keras.layers.Conv2D(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(pool3)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
conv4 = tf.keras.layers.Conv2D(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(conv4)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
pool4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv4)
pool4 = tf.keras.layers.Dropout(drop_out)(pool4)
conv5 = tf.keras.layers.Conv2D(
1024, kshape, activation='relu', padding='same', trainable=base_trainable)(pool4)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
conv5 = tf.keras.layers.Conv2DTranspose(
1024, kshape, activation='relu', padding='same', trainable=base_trainable)(conv5)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
up6 = tf.keras.layers.concatenate(
[tf.keras.layers.UpSampling2D(size=(2, 2))(conv5), conv4], axis=-1)
up6 = tf.keras.layers.Dropout(drop_out)(up6)
conv6 = tf.keras.layers.Conv2DTranspose(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(up6)
conv6 = tf.keras.layers.BatchNormalization()(conv6)
conv6 = tf.keras.layers.Conv2DTranspose(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(conv6)
|
__len__
|
identifier_name
|
enel645_group11_final_project.py
|
_classes):
self.input_paths = in_paths
self.target_paths = out_paths
self.img_size = img_size
self.n_channels = n_channels
self.n_classes = n_classes
def __len__(self):
return len(self.target_paths)
def __getitem__(self, idx):
'''Returns tuple (input, target) correspond to batch #idx.'''
i = idx
path = self.input_paths[i]
target_path = self.target_paths[i]
img = tf.keras.preprocessing.image.load_img(path, color_mode='rgb', target_size=self.img_size)
img = tf.keras.preprocessing.image.img_to_array(img)
img /= 255.0
mask = tf.keras.preprocessing.image.load_img(target_path, color_mode='rgb', target_size=self.img_size)
mask = tf.keras.preprocessing.image.img_to_array(mask)
# replace colors with corresponding labels
result = np.ndarray(shape=mask.shape[:2], dtype='float32')
result[:, :] = -1
for rgb, idx in mapping.items():
result[(mask == rgb).all(2)] = idx
result[(mask == (0, 0, 0)).all(2)] = 1
if(result.min() == -1):
colors = set(tuple(v) for m2d in mask for v in m2d)
# document incorrect mapping
print('\nincorrect mapping')
print(colors)
# One-hot encoded representation
result = tf.keras.utils.to_categorical(result, self.n_classes)
return img, result
# Split dataset into train/validation/test
val_samples = int(0.15 * len(input_paths))
test_samples = int(0.05 * len(input_paths))
train_samples = len(input_paths) - val_samples - test_samples
train_input_paths = input_paths[:train_samples]
train_target_paths = target_paths[:train_samples]
val_input_paths = input_paths[train_samples:train_samples + val_samples]
val_target_paths = target_paths[train_samples:train_samples + val_samples]
test_input_paths = input_paths[train_samples +
val_samples: train_samples + val_samples + test_samples]
test_target_paths = target_paths[train_samples +
|
train_gen = DataSequence(train_input_paths, train_target_paths)
val_gen = DataSequence(val_input_paths, val_target_paths)
test_gen = DataSequence(test_input_paths, test_target_paths)
print('simulation data train_samples', train_samples)
print('simulation data val_samples', val_samples)
print('simulation data test_samples', test_samples)
#%% Define the model
# weight of each class in the whole dataset
weights = np.array([1-0.008129217, 1-0.741364343, 1-0.038759669,
1-0.033972565, 1-0.159647414, 1-0.018480072])
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = tf.keras.backend.flatten(y_true)
y_pred_f = tf.keras.backend.flatten(y_pred)
intersection = tf.keras.backend.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_f) + tf.keras.backend.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred)
ALPHA = 0.8
GAMMA = 2
def FocalLoss(targets, inputs, alpha=ALPHA, gamma=GAMMA):
inputs = tf.keras.backend.flatten(inputs)
targets = tf.keras.backend.flatten(targets)
BCE = tf.keras.backend.binary_crossentropy(targets, inputs)
BCE_EXP = tf.keras.backend.exp(-BCE)
focal_loss = tf.keras.backend.mean(
alpha * tf.keras.backend.pow((1-BCE_EXP), gamma) * BCE)
return focal_loss
def jaccard_coef(y_true, y_pred, smooth=1):
intersection = tf.keras.backend.sum(
tf.keras.backend.abs(y_true * y_pred), axis=-1)
sum_ = tf.keras.backend.sum(tf.keras.backend.abs(
y_true) + tf.keras.backend.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return jac * smooth
def jaccard_loss(y_true, y_pred, smooth=1):
return (smooth - jaccard_coef(y_true, y_pred, smooth))
def weighted_dice_loss(y_true, y_pred):
smooth = 1.
w, m1, m2 = weights * weights, y_true, y_pred
intersection = (m1 * m2)
score = (2. * tf.reduce_sum(w * intersection) + smooth) / \
(tf.reduce_sum(w * m1) + tf.reduce_sum(w * m2) + smooth)
loss = 1. - tf.reduce_sum(score)
return loss
def weighted_dice_coef(y_true, y_pred):
smooth = 1.
w, m1, m2 = weights * weights, y_true, y_pred
intersection = (m1 * m2)
score = (2. * tf.reduce_sum(w * intersection) + smooth) / \
(tf.reduce_sum(w * m1) + tf.reduce_sum(w * m2) + smooth)
return tf.reduce_sum(score)
def get_unet_mod(num_classes=num_classes, img_size=IMG_SIZE, learning_rate=1e-3,
learning_decay=1e-6, drop_out=0.1, nchannels=N_CHANNELS, kshape=(3, 3),
base_trainable=True):
input_img = tf.keras.layers.Input(img_size + (nchannels, ))
conv1 = tf.keras.layers.Conv2D(
64, kshape, activation='relu', padding='same', trainable=base_trainable)(input_img)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
conv1 = tf.keras.layers.Conv2D(
64, kshape, activation='relu', padding='same', trainable=base_trainable)(conv1)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
pool1 = tf.keras.layers.Dropout(drop_out)(pool1)
conv2 = tf.keras.layers.Conv2D(
128, kshape, activation='relu', padding='same', trainable=base_trainable)(pool1)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
conv2 = tf.keras.layers.Conv2D(
128, kshape, activation='relu', padding='same', trainable=base_trainable)(conv2)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
pool2 = tf.keras.layers.Dropout(drop_out)(pool2)
conv3 = tf.keras.layers.Conv2D(
256, kshape, activation='relu', padding='same', trainable=base_trainable)(pool2)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
conv3 = tf.keras.layers.Conv2D(
256, kshape, activation='relu', padding='same', trainable=base_trainable)(conv3)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
pool3 = tf.keras.layers.Dropout(drop_out)(pool3)
conv4 = tf.keras.layers.Conv2D(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(pool3)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
conv4 = tf.keras.layers.Conv2D(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(conv4)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
pool4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv4)
pool4 = tf.keras.layers.Dropout(drop_out)(pool4)
conv5 = tf.keras.layers.Conv2D(
1024, kshape, activation='relu', padding='same', trainable=base_trainable)(pool4)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
conv5 = tf.keras.layers.Conv2DTranspose(
1024, kshape, activation='relu', padding='same', trainable=base_trainable)(conv5)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
up6 = tf.keras.layers.concatenate(
[tf.keras.layers.UpSampling2D(size=(2, 2))(conv5), conv4], axis=-1)
up6 = tf.keras.layers.Dropout(drop_out)(up6)
conv6 = tf.keras.layers.Conv2DTranspose(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(up6)
conv6 = tf.keras.layers.BatchNormalization()(conv6)
conv6 = tf.keras.layers.Conv2DTranspose(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(conv6)
|
val_samples:train_samples + val_samples + test_samples]
|
random_line_split
|
enel645_group11_final_project.py
|
_classes):
self.input_paths = in_paths
self.target_paths = out_paths
self.img_size = img_size
self.n_channels = n_channels
self.n_classes = n_classes
def __len__(self):
return len(self.target_paths)
def __getitem__(self, idx):
'''Returns tuple (input, target) correspond to batch #idx.'''
i = idx
path = self.input_paths[i]
target_path = self.target_paths[i]
img = tf.keras.preprocessing.image.load_img(path, color_mode='rgb', target_size=self.img_size)
img = tf.keras.preprocessing.image.img_to_array(img)
img /= 255.0
mask = tf.keras.preprocessing.image.load_img(target_path, color_mode='rgb', target_size=self.img_size)
mask = tf.keras.preprocessing.image.img_to_array(mask)
# replace colors with corresponding labels
result = np.ndarray(shape=mask.shape[:2], dtype='float32')
result[:, :] = -1
for rgb, idx in mapping.items():
result[(mask == rgb).all(2)] = idx
result[(mask == (0, 0, 0)).all(2)] = 1
if(result.min() == -1):
colors = set(tuple(v) for m2d in mask for v in m2d)
# document incorrect mapping
print('\nincorrect mapping')
print(colors)
# One-hot encoded representation
result = tf.keras.utils.to_categorical(result, self.n_classes)
return img, result
# Split dataset into train/validation/test
val_samples = int(0.15 * len(input_paths))
test_samples = int(0.05 * len(input_paths))
train_samples = len(input_paths) - val_samples - test_samples
train_input_paths = input_paths[:train_samples]
train_target_paths = target_paths[:train_samples]
val_input_paths = input_paths[train_samples:train_samples + val_samples]
val_target_paths = target_paths[train_samples:train_samples + val_samples]
test_input_paths = input_paths[train_samples +
val_samples: train_samples + val_samples + test_samples]
test_target_paths = target_paths[train_samples +
val_samples:train_samples + val_samples + test_samples]
train_gen = DataSequence(train_input_paths, train_target_paths)
val_gen = DataSequence(val_input_paths, val_target_paths)
test_gen = DataSequence(test_input_paths, test_target_paths)
print('simulation data train_samples', train_samples)
print('simulation data val_samples', val_samples)
print('simulation data test_samples', test_samples)
#%% Define the model
# weight of each class in the whole dataset
weights = np.array([1-0.008129217, 1-0.741364343, 1-0.038759669,
1-0.033972565, 1-0.159647414, 1-0.018480072])
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = tf.keras.backend.flatten(y_true)
y_pred_f = tf.keras.backend.flatten(y_pred)
intersection = tf.keras.backend.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_f) + tf.keras.backend.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred)
ALPHA = 0.8
GAMMA = 2
def FocalLoss(targets, inputs, alpha=ALPHA, gamma=GAMMA):
|
def jaccard_coef(y_true, y_pred, smooth=1):
intersection = tf.keras.backend.sum(
tf.keras.backend.abs(y_true * y_pred), axis=-1)
sum_ = tf.keras.backend.sum(tf.keras.backend.abs(
y_true) + tf.keras.backend.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return jac * smooth
def jaccard_loss(y_true, y_pred, smooth=1):
return (smooth - jaccard_coef(y_true, y_pred, smooth))
def weighted_dice_loss(y_true, y_pred):
smooth = 1.
w, m1, m2 = weights * weights, y_true, y_pred
intersection = (m1 * m2)
score = (2. * tf.reduce_sum(w * intersection) + smooth) / \
(tf.reduce_sum(w * m1) + tf.reduce_sum(w * m2) + smooth)
loss = 1. - tf.reduce_sum(score)
return loss
def weighted_dice_coef(y_true, y_pred):
smooth = 1.
w, m1, m2 = weights * weights, y_true, y_pred
intersection = (m1 * m2)
score = (2. * tf.reduce_sum(w * intersection) + smooth) / \
(tf.reduce_sum(w * m1) + tf.reduce_sum(w * m2) + smooth)
return tf.reduce_sum(score)
def get_unet_mod(num_classes=num_classes, img_size=IMG_SIZE, learning_rate=1e-3,
learning_decay=1e-6, drop_out=0.1, nchannels=N_CHANNELS, kshape=(3, 3),
base_trainable=True):
input_img = tf.keras.layers.Input(img_size + (nchannels, ))
conv1 = tf.keras.layers.Conv2D(
64, kshape, activation='relu', padding='same', trainable=base_trainable)(input_img)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
conv1 = tf.keras.layers.Conv2D(
64, kshape, activation='relu', padding='same', trainable=base_trainable)(conv1)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
pool1 = tf.keras.layers.Dropout(drop_out)(pool1)
conv2 = tf.keras.layers.Conv2D(
128, kshape, activation='relu', padding='same', trainable=base_trainable)(pool1)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
conv2 = tf.keras.layers.Conv2D(
128, kshape, activation='relu', padding='same', trainable=base_trainable)(conv2)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
pool2 = tf.keras.layers.Dropout(drop_out)(pool2)
conv3 = tf.keras.layers.Conv2D(
256, kshape, activation='relu', padding='same', trainable=base_trainable)(pool2)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
conv3 = tf.keras.layers.Conv2D(
256, kshape, activation='relu', padding='same', trainable=base_trainable)(conv3)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
pool3 = tf.keras.layers.Dropout(drop_out)(pool3)
conv4 = tf.keras.layers.Conv2D(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(pool3)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
conv4 = tf.keras.layers.Conv2D(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(conv4)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
pool4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv4)
pool4 = tf.keras.layers.Dropout(drop_out)(pool4)
conv5 = tf.keras.layers.Conv2D(
1024, kshape, activation='relu', padding='same', trainable=base_trainable)(pool4)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
conv5 = tf.keras.layers.Conv2DTranspose(
1024, kshape, activation='relu', padding='same', trainable=base_trainable)(conv5)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
up6 = tf.keras.layers.concatenate(
[tf.keras.layers.UpSampling2D(size=(2, 2))(conv5), conv4], axis=-1)
up6 = tf.keras.layers.Dropout(drop_out)(up6)
conv6 = tf.keras.layers.Conv2DTranspose(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(up6)
conv6 = tf.keras.layers.BatchNormalization()(conv6)
conv6 = tf.keras.layers.Conv2DTranspose(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(conv6)
|
inputs = tf.keras.backend.flatten(inputs)
targets = tf.keras.backend.flatten(targets)
BCE = tf.keras.backend.binary_crossentropy(targets, inputs)
BCE_EXP = tf.keras.backend.exp(-BCE)
focal_loss = tf.keras.backend.mean(
alpha * tf.keras.backend.pow((1-BCE_EXP), gamma) * BCE)
return focal_loss
|
identifier_body
|
enel645_group11_final_project.py
|
mapping = {
(31, 120, 180): 0,
(106, 176, 25): 1,
(156, 62, 235): 2,
(255, 255, 255): 3,
(69, 144, 232): 4,
(227, 26, 28): 5,
}
class DataSequence(tf.keras.utils.Sequence):
'''Helper to iterate over the data as Numpy arrays.'''
def __init__(self, in_paths, out_paths, img_size=IMG_SIZE, n_channels=N_CHANNELS, n_classes=num_classes):
self.input_paths = in_paths
self.target_paths = out_paths
self.img_size = img_size
self.n_channels = n_channels
self.n_classes = n_classes
def __len__(self):
return len(self.target_paths)
def __getitem__(self, idx):
'''Returns tuple (input, target) correspond to batch #idx.'''
i = idx
path = self.input_paths[i]
target_path = self.target_paths[i]
img = tf.keras.preprocessing.image.load_img(path, color_mode='rgb', target_size=self.img_size)
img = tf.keras.preprocessing.image.img_to_array(img)
img /= 255.0
mask = tf.keras.preprocessing.image.load_img(target_path, color_mode='rgb', target_size=self.img_size)
mask = tf.keras.preprocessing.image.img_to_array(mask)
# replace colors with corresponding labels
result = np.ndarray(shape=mask.shape[:2], dtype='float32')
result[:, :] = -1
for rgb, idx in mapping.items():
result[(mask == rgb).all(2)] = idx
result[(mask == (0, 0, 0)).all(2)] = 1
if(result.min() == -1):
colors = set(tuple(v) for m2d in mask for v in m2d)
# document incorrect mapping
print('\nincorrect mapping')
print(colors)
# One-hot encoded representation
result = tf.keras.utils.to_categorical(result, self.n_classes)
return img, result
# Split dataset into train/validation/test
val_samples = int(0.15 * len(input_paths))
test_samples = int(0.05 * len(input_paths))
train_samples = len(input_paths) - val_samples - test_samples
train_input_paths = input_paths[:train_samples]
train_target_paths = target_paths[:train_samples]
val_input_paths = input_paths[train_samples:train_samples + val_samples]
val_target_paths = target_paths[train_samples:train_samples + val_samples]
test_input_paths = input_paths[train_samples +
val_samples: train_samples + val_samples + test_samples]
test_target_paths = target_paths[train_samples +
val_samples:train_samples + val_samples + test_samples]
train_gen = DataSequence(train_input_paths, train_target_paths)
val_gen = DataSequence(val_input_paths, val_target_paths)
test_gen = DataSequence(test_input_paths, test_target_paths)
print('simulation data train_samples', train_samples)
print('simulation data val_samples', val_samples)
print('simulation data test_samples', test_samples)
#%% Define the model
# weight of each class in the whole dataset
weights = np.array([1-0.008129217, 1-0.741364343, 1-0.038759669,
1-0.033972565, 1-0.159647414, 1-0.018480072])
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = tf.keras.backend.flatten(y_true)
y_pred_f = tf.keras.backend.flatten(y_pred)
intersection = tf.keras.backend.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_f) + tf.keras.backend.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred)
ALPHA = 0.8
GAMMA = 2
def FocalLoss(targets, inputs, alpha=ALPHA, gamma=GAMMA):
inputs = tf.keras.backend.flatten(inputs)
targets = tf.keras.backend.flatten(targets)
BCE = tf.keras.backend.binary_crossentropy(targets, inputs)
BCE_EXP = tf.keras.backend.exp(-BCE)
focal_loss = tf.keras.backend.mean(
alpha * tf.keras.backend.pow((1-BCE_EXP), gamma) * BCE)
return focal_loss
def jaccard_coef(y_true, y_pred, smooth=1):
intersection = tf.keras.backend.sum(
tf.keras.backend.abs(y_true * y_pred), axis=-1)
sum_ = tf.keras.backend.sum(tf.keras.backend.abs(
y_true) + tf.keras.backend.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return jac * smooth
def jaccard_loss(y_true, y_pred, smooth=1):
return (smooth - jaccard_coef(y_true, y_pred, smooth))
def weighted_dice_loss(y_true, y_pred):
smooth = 1.
w, m1, m2 = weights * weights, y_true, y_pred
intersection = (m1 * m2)
score = (2. * tf.reduce_sum(w * intersection) + smooth) / \
(tf.reduce_sum(w * m1) + tf.reduce_sum(w * m2) + smooth)
loss = 1. - tf.reduce_sum(score)
return loss
def weighted_dice_coef(y_true, y_pred):
smooth = 1.
w, m1, m2 = weights * weights, y_true, y_pred
intersection = (m1 * m2)
score = (2. * tf.reduce_sum(w * intersection) + smooth) / \
(tf.reduce_sum(w * m1) + tf.reduce_sum(w * m2) + smooth)
return tf.reduce_sum(score)
def get_unet_mod(num_classes=num_classes, img_size=IMG_SIZE, learning_rate=1e-3,
learning_decay=1e-6, drop_out=0.1, nchannels=N_CHANNELS, kshape=(3, 3),
base_trainable=True):
input_img = tf.keras.layers.Input(img_size + (nchannels, ))
conv1 = tf.keras.layers.Conv2D(
64, kshape, activation='relu', padding='same', trainable=base_trainable)(input_img)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
conv1 = tf.keras.layers.Conv2D(
64, kshape, activation='relu', padding='same', trainable=base_trainable)(conv1)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
pool1 = tf.keras.layers.Dropout(drop_out)(pool1)
conv2 = tf.keras.layers.Conv2D(
128, kshape, activation='relu', padding='same', trainable=base_trainable)(pool1)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
conv2 = tf.keras.layers.Conv2D(
128, kshape, activation='relu', padding='same', trainable=base_trainable)(conv2)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
pool2 = tf.keras.layers.Dropout(drop_out)(pool2)
conv3 = tf.keras.layers.Conv2D(
256, kshape, activation='relu', padding='same', trainable=base_trainable)(pool2)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
conv3 = tf.keras.layers.Conv2D(
256, kshape, activation='relu', padding='same', trainable=base_trainable)(conv3)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
pool3 = tf.keras.layers.Dropout(drop_out)(pool3)
conv4 = tf.keras.layers.Conv2D(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(pool3)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
conv4 = tf.keras.layers.Conv2D(
512, kshape, activation='relu', padding='same', trainable=base_trainable)(conv4)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
pool4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv4)
pool4 = tf.keras.layers.Dropout(drop_out)(pool4)
conv5 = tf.keras.layers.Conv2D(
1024, kshape, activation='relu', padding='same', trainable=base_trainable)(pool4)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
conv5 = tf.keras.layers.Conv2DTranspose(
1024, k
|
print(input_path, '|', target_path)
|
conditional_block
|
|
supplyGoodsList.js
|
15,
paddingBottom:15,
flexDirection: 'row',
alignItems:'center',
marginTop:10,
},
item: {
height: 45,
backgroundColor: 'fff',
paddingLeft: 15,
paddingRight: 15,
alignItems: 'center',
flexDirection: 'row',
},
border_bottom: {
borderBottomWidth: 'slimLine',
borderColor: 'f0'
},
border_bottom_paddingLeft: {
borderBottomWidth: 'slimLine',
borderColor: 'f0',
paddingLeft: 15
},
icon_img: {
width: 14,
height: 8,
marginLeft: 5
},
text_right: {
flex: 1,
textAlign: 'right'
},
gridStyle:{
flexDirection:'row', //改变ListView的主轴方向
flexWrap:'wrap', //换行
paddingLeft:15,
paddingRight:5,
backgroundColor:'fff'
},
gridItemViewStyle:{
alignItems:'center',
height:30,
flexDirection:'row',
marginRight:10,
},
itemButton:{
borderColor:'#2296f3',
borderWidth:1,
borderRadius:5,
width:60,
height:40,
backgroundColor:'fff',
justifyContent:'center',
alignItems:'center'
},
modal_content: {
backgroundColor: '#fff',
},
btn_dialog:{
flex:1,
height:40,
backgroundColor:'#f0f0f0',
justifyContent:'center',
alignItems:'center'
},
});
/**
* 供货商商品列表
* @type {SupplyGoodsList}
*/
module.exports=class SupplyGoodsList extends SComponent{
constructor(props) {
super(props);
this.state={
refreshing:true,
//供应商供应spu列表
supplierSpus:[],
// supplierId:this.props.route.data.supplierId
}
this._ds = new ListView.DataSource({
sectionHeaderHasChanged: (r1, r2) => r1 !== r2,
rowHasChanged: (r1, r2) => r1 !== r2
});
this._renderRow=this._renderRow.bind(this);
this._itemClick=this._itemClick.bind(this);
this._btnDeleteClick=this._btnDeleteClick.bind(this);
this._btnDetailsClick=this._btnDetailsClick.bind(this);
this._itemSpreadClick=this._itemSpreadClick.bind(this);
this._btnAffirm=this._btnAffirm.bind(this);
}
componentDidMount() {
InteractionManager.runAfterInteractions( () => {
this._getData();
})
}
_getData = () =>{
this.changeState({
pullDownRefreshing:true,
});
commonRequest({
apiKey: 'querySupplierSpuListKey',
withLoading: true,
objectName: 'supplyUserQueryDO',
params: {
supplierId: this.props.route.data.supplierId,
catId: this.props.route.data.catId
}
}).then( (res) => {
let pageData = res.data;
this.changeState({
refreshing:false,
pullDownRefreshing:false,
supplierSpus:pageData.supplierSpus,
});
}).catch( err => {})
}
render(){
return(
<Page
pageName='供货商商品列表'
title={this.props.route.data.supplierName+'-'+this.props.route.data.catName}
back={()=>this.navigator().pop()}
pageLoading={this.state.refreshing}>
<ListView
style={{flex:1}}
refreshControl={
<RefreshControl
style={{backgroundColor:'transparent'}}
refreshing={this.state.pullDownRefreshing}
onRefresh={this._getData.bind(this)}
tintColor="#54bb40"
title="加载中..."
titleColor="#999"
colors={['#2296f3']}
progressBackgroundColor="#fff"
/>
}
initialListSize={10}
enableEmptySections={true}
dataSource={this._ds.cloneWithRows(this.state.supplierSpus)}
renderRow={this._renderRow}
/>
<Button
type='green'
size='large'
onPress={()=>{
this.navigator().push({
component: ItemSelect,
from: 'SupplyInfo',
callback: this._getData,
name: 'ItemSelect',
data: {
supplierId: this.props.route.data.supplierId,
catId: this.props.route.data.catId,
catName: this.props.route.data.catName
}
})
}}
>
添加新品
</Button>
<SXCModal
style={{justifyContent: 'center', alignItems: 'center'}}
animated="slide"
ref={view => this._sxcmodal = view}>
<ModalContent
btnAffirm={this._btnAffirm}
fatherState={this.state}
currentRow={this.state.currentRow}
sxcmodal={this._sxcmodal}
/>
</SXCModal>
</Page>
)
}
/**
* 商品信息itemVew
* @param rowData
* @param sectionID
* @param rowID
* @returns {XML}
* @private
*/
_renderRow(rowData,sectionID,rowID) {
return(
// <TouchableOpacity style={[s.head,s.item, s.border_bottom]} onPress={()=>this._catItemClick(rowData)}>
// <SText fontSize="body" color="666" style={{flex:1}}>{rowData.catName}</SText>
// <SText fontSize="body" color="666" style={{flex:1,textAlign:'center'}}>{rowData.amountOfSpu}</SText>
// <SText fontSize="body" color="666" style={{flex:1,textAlign:'right'}}>{rowData.dailySupply}</SText>
// </TouchableOpacity>
<View>
<View style={[s.item_title, s.border_bottom]}>
<SText fontSize="body" color="333" style={{flex:1,marginRight:15}}>{rowData.spuName}</SText>
<TouchableOpacity style={{width:60,flexDirection:'row',alignItems:'center',justifyContent:'flex-end'}} onPress={()=>this._itemSpreadClick(rowData)} >
<SText fontSize="caption" color="999">属性</SText>
<Image source={rowData.spread?ICON_TRIANGEL_UP:ICON_TRIANGEL_DOWN} style={s.icon_img}/>
</TouchableOpacity>
</View>
{
rowData.spread ?
<ListView
dataSource={this._ds.cloneWithRows(rowData.attributeList)}
renderRow={this._attributeRenderRow}
contentContainerStyle={s.gridStyle}
showsVerticalScrollIndicator={false}
showsHorizontalScrollIndicator={false}
style={s.border_bottom}
/>
:
null
}
<View style={[s.item, s.border_bottom]}>
<SText fontSize="caption" color="999">每日供货量</SText>
<SText fontSize="caption" color="333" style={s.text_right} >{rowData.dailySupplyDes}</SText>
</View>
<View style={[s.item, s.border_bottom]}>
<SText fontSize="caption" color="999">当前供货价</SText>
<SText fontSize="caption" color="333" style={s.text_right} >{rowData.currentPriceDes}</SText>
</View>
<View style={{flexDirection:'row',justifyContent:'flex-end',height:60,backgroundColor:'#fafafa',alignItems:'center',paddingRight:15}}>
<TouchableOpacity onPress={()=>this._btnDeleteClick(rowData,sectionID,rowID)} style={s.itemButton}>
<SText fontSize="body" color="666" style={{color:"#2296f3"}}>删除</SText>
</TouchableOpacity>
{
// <TouchableOpacity onPress={()=>this._btnDetailsClick(rowData)} style={[s.itemButton,{marginLeft:15}]}>
// <SText fontSize="body" color="666" style={{color:"#2296f3"}} >详情</SText>
// </TouchableOpacity>
}
</View>
</View>
)
}
/**
* spu属性itemView
* @param rowData
* @param sectionID
* @param rowID
* @returns {XML}
* @private
*/
_attributeRenderRow(rowData,sectionID,rowID) {
return(
<View style={s.gridItemViewStyle}>
<SText fontSize="caption" color="999">{rowData.name}</SText>
<SText fontSize="caption" color="orange" style={{marginLeft:3}}>{rowData.value}</SText>
</View>
)
}
/**
* 商品属性 展开隐藏事件
* @param rowData
* @private
*/
_itemSpreadClick(rowData) {
if (rowData.spread) {
rowData.spread=false;
}else {
rowData.spread=true;
}
this.changeState(
)
}
/**
* 删除商品按钮
* @param
|
rowData 商品信息
* @private
*/
_btn
|
conditional_block
|
|
supplyGoodsList.js
|
.route.data.supplierId
}
this._ds = new ListView.DataSource({
sectionHeaderHasChanged: (r1, r2) => r1 !== r2,
rowHasChanged: (r1, r2) => r1 !== r2
});
this._renderRow=this._renderRow.bind(this);
this._itemClick=this._itemClick.bind(this);
this._btnDeleteClick=this._btnDeleteClick.bind(this);
this._btnDetailsClick=this._btnDetailsClick.bind(this);
this._itemSpreadClick=this._itemSpreadClick.bind(this);
this._btnAffirm=this._btnAffirm.bind(this);
}
componentDidMount() {
InteractionManager.runAfterInteractions( () => {
this._getData();
})
}
_getData = () =>{
this.changeState({
pullDownRefreshing:true,
});
commonRequest({
apiKey: 'querySupplierSpuListKey',
withLoading: true,
objectName: 'supplyUserQueryDO',
params: {
supplierId: this.props.route.data.supplierId,
catId: this.props.route.data.catId
}
}).then( (res) => {
let pageData = res.data;
this.changeState({
refreshing:false,
pullDownRefreshing:false,
supplierSpus:pageData.supplierSpus,
});
}).catch( err => {})
}
render(){
return(
<Page
pageName='供货商商品列表'
title={this.props.route.data.supplierName+'-'+this.props.route.data.catName}
back={()=>this.navigator().pop()}
pageLoading={this.state.refreshing}>
<ListView
style={{flex:1}}
refreshControl={
<RefreshControl
style={{backgroundColor:'transparent'}}
refreshing={this.state.pullDownRefreshing}
onRefresh={this._getData.bind(this)}
tintColor="#54bb40"
title="加载中..."
titleColor="#999"
colors={['#2296f3']}
progressBackgroundColor="#fff"
/>
}
initialListSize={10}
enableEmptySections={true}
dataSource={this._ds.cloneWithRows(this.state.supplierSpus)}
renderRow={this._renderRow}
/>
<Button
type='green'
size='large'
onPress={()=>{
this.navigator().push({
component: ItemSelect,
from: 'SupplyInfo',
callback: this._getData,
name: 'ItemSelect',
data: {
supplierId: this.props.route.data.supplierId,
catId: this.props.route.data.catId,
catName: this.props.route.data.catName
}
})
}}
>
添加新品
</Button>
<SXCModal
style={{justifyContent: 'center', alignItems: 'center'}}
animated="slide"
ref={view => this._sxcmodal = view}>
<ModalContent
btnAffirm={this._btnAffirm}
fatherState={this.state}
currentRow={this.state.currentRow}
sxcmodal={this._sxcmodal}
/>
</SXCModal>
</Page>
)
}
/**
* 商品信息itemVew
* @param rowData
* @param sectionID
* @param rowID
* @returns {XML}
* @private
*/
_renderRow(rowData,sectionID,rowID) {
return(
// <TouchableOpacity style={[s.head,s.item, s.border_bottom]} onPress={()=>this._catItemClick(rowData)}>
// <SText fontSize="body" color="666" style={{flex:1}}>{rowData.catName}</SText>
// <SText fontSize="body" color="666" style={{flex:1,textAlign:'center'}}>{rowData.amountOfSpu}</SText>
// <SText fontSize="body" color="666" style={{flex:1,textAlign:'right'}}>{rowData.dailySupply}</SText>
// </TouchableOpacity>
<View>
<View style={[s.item_title, s.border_bottom]}>
<SText fontSize="body" color="333" style={{flex:1,marginRight:15}}>{rowData.spuName}</SText>
<TouchableOpacity style={{width:60,flexDirection:'row',alignItems:'center',justifyContent:'flex-end'}} onPress={()=>this._itemSpreadClick(rowData)} >
<SText fontSize="caption" color="999">属性</SText>
<Image source={rowData.spread?ICON_TRIANGEL_UP:ICON_TRIANGEL_DOWN} style={s.icon_img}/>
</TouchableOpacity>
</View>
{
rowData.spread ?
<ListView
dataSource={this._ds.cloneWithRows(rowData.attributeList)}
renderRow={this._attributeRenderRow}
contentContainerStyle={s.gridStyle}
showsVerticalScrollIndicator={false}
showsHorizontalScrollIndicator={false}
style={s.border_bottom}
/>
:
null
}
<View style={[s.item, s.border_bottom]}>
<SText fontSize="caption" color="999">每日供货量</SText>
<SText fontSize="caption" color="333" style={s.text_right} >{rowData.dailySupplyDes}</SText>
</View>
<View style={[s.item, s.border_bottom]}>
<SText fontSize="caption" color="999">当前供货价</SText>
<SText fontSize="caption" color="333" style={s.text_right} >{rowData.currentPriceDes}</SText>
</View>
<View style={{flexDirection:'row',justifyContent:'flex-end',height:60,backgroundColor:'#fafafa',alignItems:'center',paddingRight:15}}>
<TouchableOpacity onPress={()=>this._btnDeleteClick(rowData,sectionID,rowID)} style={s.itemButton}>
<SText fontSize="body" color="666" style={{color:"#2296f3"}}>删除</SText>
</TouchableOpacity>
{
// <TouchableOpacity onPress={()=>this._btnDetailsClick(rowData)} style={[s.itemButton,{marginLeft:15}]}>
// <SText fontSize="body" color="666" style={{color:"#2296f3"}} >详情</SText>
// </TouchableOpacity>
}
</View>
</View>
)
}
/**
* spu属性itemView
* @param rowData
* @param sectionID
* @param rowID
* @returns {XML}
* @private
*/
_attributeRenderRow(rowData,sectionID,rowID) {
return(
<View style={s.gridItemViewStyle}>
<SText fontSize="caption" color="999">{rowData.name}</SText>
<SText fontSize="caption" color="orange" style={{marginLeft:3}}>{rowData.value}</SText>
</View>
)
}
/**
* 商品属性 展开隐藏事件
* @param rowData
* @private
*/
_itemSpreadClick(rowData) {
if (rowData.spread) {
rowData.spread=false;
}else {
rowData.spread=true;
}
this.changeState(
)
}
/**
* 删除商品按钮
* @param rowData 商品信息
* @private
*/
_btnDeleteClick(rowData,sectionID,rowID) {
this.changeState({
currentRow: {
rowData,
rowID,
sectionID
}
}, ()=> {
this._sxcmodal._toggle();
})
}
/**
* 查看商品详情信息按钮
* @param rowData
* @private
*/
_btnDetailsClick(rowData) {
__STORE.dispatch(UtilsAction.toast('功能开发中', 1500));
// console.log("dododo in _btnDetailsClick");
// console.log(rowData);
// this.navigator().push({
// })
}
/**
* 品类item点击
* @param itemData 类目信息数据
* catId catName
* @private
*/
_itemClick(itemData){
}
_btnAffirm(rowData) {
console.log('rowData',rowData);
commonRequest({
apiKey: 'deleteSupplierSpuKey',
withLoading: true,
objectName: 'supplySpuQueryDO',
params: {
supplierId: this.props.route.data.supplierId,
spuId: rowData.spuId,
}
}).then( (res) => {
let pageData = res.data;
this.changeState({
});
this._getData();
}).catch( err => {})
}
}
/**
* modal层内容组件
*/
class ModalContent extends React.Component {
constructor(props) {
super(props);
this.state = {
}
}
_affirm(rowData) {
this.props.btnAffirm(rowData);
this.props.sxcmodal._toggle();
}
render() {
return (
<View style={[s.modal_content]}>
<View style={{backgroundColor:'#2296f3',height:45,justifyContent:'center',a
|
lignItems:'center'}}>
<SText fontSize="headline" color="fff" styl
|
identifier_body
|
|
supplyGoodsList.js
|
: 15,
paddingTop:15,
paddingBottom:15,
flexDirection: 'row',
alignItems:'center',
marginTop:10,
},
item: {
height: 45,
backgroundColor: 'fff',
paddingLeft: 15,
paddingRight: 15,
alignItems: 'center',
flexDirection: 'row',
},
border_bottom: {
borderBottomWidth: 'slimLine',
borderColor: 'f0'
},
border_bottom_paddingLeft: {
borderBottomWidth: 'slimLine',
borderColor: 'f0',
paddingLeft: 15
},
icon_img: {
width: 14,
height: 8,
marginLeft: 5
},
text_right: {
flex: 1,
textAlign: 'right'
},
gridStyle:{
flexDirection:'row', //改变ListView的主轴方向
flexWrap:'wrap', //换行
paddingLeft:15,
paddingRight:5,
backgroundColor:'fff'
},
gridItemViewStyle:{
alignItems:'center',
height:30,
flexDirection:'row',
marginRight:10,
},
itemButton:{
borderColor:'#2296f3',
borderWidth:1,
borderRadius:5,
width:60,
height:40,
backgroundColor:'fff',
justifyContent:'center',
alignItems:'center'
},
modal_content: {
backgroundColor: '#fff',
},
btn_dialog:{
flex:1,
height:40,
backgroundColor:'#f0f0f0',
justifyContent:'center',
alignItems:'center'
},
});
/**
* 供货商商品列表
* @type {SupplyGoodsList}
*/
module.exports=class SupplyGoodsList extends SComponent{
constructor(props) {
super(props);
this.state={
refreshing:true,
//供应商供应spu列表
supplierSpus:[],
// supplierId:this.props.route.data.supplierId
}
this._ds = new ListView.DataSource({
sectionHeaderHasChanged: (r1, r2) => r1 !== r2,
rowHasChanged: (r1, r2) => r1 !== r2
});
this._renderRow=this._renderRow.bind(this);
this._itemClick=this._itemClick.bind(this);
this._btnDeleteClick=this._btnDeleteClick.bind(this);
this._btnDetailsClick=this._btnDetailsClick.bind(this);
this._itemSpreadClick=this._itemSpreadClick.bind(this);
this._btnAffirm=this._btnAffirm.bind(this);
}
componentDidMount() {
InteractionManager.runAfterInteractions( () => {
this._getData();
})
}
_getData = () =>{
this.changeState({
pullDownRefreshing:true,
});
commonRequest({
apiKey: 'querySupplierSpuListKey',
withLoading: true,
objectName: 'supplyUserQueryDO',
params: {
supplierId: this.props.route.data.supplierId,
catId: this.props.route.data.catId
}
}).then( (res) => {
let pageData = res.data;
this.changeState({
refreshing:false,
pullDownRefreshing:false,
supplierSpus:pageData.supplierSpus,
});
}).catch( err => {})
}
render(){
return(
<Page
pageName='供货商商品列表'
title={this.props.route.data.supplierName+'-'+this.props.route.data.catName}
back={()=>this.navigator().pop()}
pageLoading={this.state.refreshing}>
<ListView
style={{flex:1}}
refreshControl={
<RefreshControl
style={{backgroundColor:'transparent'}}
refreshing={this.state.pullDownRefreshing}
onRefresh={this._getData.bind(this)}
tintColor="#54bb40"
title="加载中..."
titleColor="#999"
colors={['#2296f3']}
progressBackgroundColor="#fff"
/>
}
initialListSize={10}
enableEmptySections={true}
dataSource={this._ds.cloneWithRows(this.state.supplierSpus)}
renderRow={this._renderRow}
/>
<Button
type='green'
size='large'
onPress={()=>{
this.navigator().push({
component: ItemSelect,
from: 'SupplyInfo',
callback: this._getData,
name: 'ItemSelect',
data: {
supplierId: this.props.route.data.supplierId,
catId: this.props.route.data.catId,
catName: this.props.route.data.catName
}
})
}}
>
添加新品
</Button>
<SXCModal
style={{justifyContent: 'center', alignItems: 'center'}}
animated="slide"
ref={view => this._sxcmodal = view}>
<ModalContent
btnAffirm={this._btnAffirm}
fatherState={this.state}
currentRow={this.state.currentRow}
sxcmodal={this._sxcmodal}
/>
</SXCModal>
</Page>
)
}
/**
* 商品信息itemVew
* @param rowData
* @param sectionID
* @param rowID
* @returns {XML}
* @private
*/
_renderRow(rowData,sectionID,rowID) {
return(
// <TouchableOpacity style={[s.head,s.item, s.border_bottom]} onPress={()=>this._catItemClick(rowData)}>
// <SText fontSize="body" color="666" style={{flex:1}}>{rowData.catName}</SText>
// <SText fontSize="body" color="666" style={{flex:1,textAlign:'center'}}>{rowData.amountOfSpu}</SText>
// <SText fontSize="body" color="666" style={{flex:1,textAlign:'right'}}>{rowData.dailySupply}</SText>
// </TouchableOpacity>
<View>
<View style={[s.item_title, s.border_bottom]}>
<SText fontSize="body" color="333" style={{flex:1,marginRight:15}}>{rowData.spuName}</SText>
<TouchableOpacity style={{width:60,flexDirection:'row',alignItems:'center',justifyContent:'flex-end'}} onPress={()=>this._itemSpreadClick(rowData)} >
<SText fontSize="caption" color="999">属性</SText>
<Image source={rowData.spread?ICON_TRIANGEL_UP:ICON_TRIANGEL_DOWN} style={s.icon_img}/>
</TouchableOpacity>
</View>
{
rowData.spread ?
<ListView
dataSource={this._ds.cloneWithRows(rowData.attributeList)}
renderRow={this._attributeRenderRow}
contentContainerStyle={s.gridStyle}
showsVerticalScrollIndicator={false}
showsHorizontalScrollIndicator={false}
style={s.border_bottom}
/>
:
null
}
<View style={[s.item, s.border_bottom]}>
<SText fontSize="caption" color="999">每日供货量</SText>
<SText fontSize="caption" color="333" style={s.text_right} >{rowData.dailySupplyDes}</SText>
</View>
<View style={[s.item, s.border_bottom]}>
<SText fontSize="caption" color="999">当前供货价</SText>
<SText fontSize="caption" color="333" style={s.text_right} >{rowData.currentPriceDes}</SText>
</View>
<View style={{flexDirection:'row',justifyContent:'flex-end',height:60,backgroundColor:'#fafafa',alignItems:'center',paddingRight:15}}>
<TouchableOpacity onPress={()=>this._btnDeleteClick(rowData,sectionID,rowID)} style={s.itemButton}>
<SText fontSize="body" color="666" style={{color:"#2296f3"}}>删除</SText>
</TouchableOpacity>
{
// <TouchableOpacity onPress={()=>this._btnDetailsClick(rowData)} style={[s.itemButton,{marginLeft:15}]}>
// <SText fontSize="body" color="666" style={{color:"#2296f3"}} >详情</SText>
// </TouchableOpacity>
}
</View>
</View>
)
}
/**
* spu属性itemView
* @param rowData
* @param sectionID
* @param rowID
* @returns {XML}
* @private
*/
_attributeRenderRow(rowData,sectionID,rowID) {
return(
<View style={s.gridItemViewStyle}>
<SText fontSize="caption" co
|
name}</SText>
<SText fontSize="caption" color="orange" style={{marginLeft:3}}>{rowData.value}</SText>
</View>
)
}
/**
* 商品属性 展开隐藏事件
* @param rowData
* @private
*/
_itemSpreadClick(rowData) {
if (rowData.spread) {
rowData.spread=false;
}else {
rowData.spread=true;
}
this.changeState(
)
}
/**
* 删除商品按钮
* @param rowData 商品信息
|
lor="999">{rowData.
|
identifier_name
|
supplyGoodsList.js
|
paddingRight: 15,
paddingTop:15,
paddingBottom:15,
flexDirection: 'row',
alignItems:'center',
marginTop:10,
},
item: {
height: 45,
backgroundColor: 'fff',
paddingLeft: 15,
paddingRight: 15,
alignItems: 'center',
flexDirection: 'row',
},
border_bottom: {
borderBottomWidth: 'slimLine',
borderColor: 'f0'
},
border_bottom_paddingLeft: {
borderBottomWidth: 'slimLine',
borderColor: 'f0',
paddingLeft: 15
},
icon_img: {
width: 14,
height: 8,
marginLeft: 5
},
text_right: {
flex: 1,
textAlign: 'right'
},
gridStyle:{
flexDirection:'row', //改变ListView的主轴方向
flexWrap:'wrap', //换行
paddingLeft:15,
paddingRight:5,
backgroundColor:'fff'
},
gridItemViewStyle:{
alignItems:'center',
height:30,
flexDirection:'row',
marginRight:10,
},
itemButton:{
borderColor:'#2296f3',
borderWidth:1,
borderRadius:5,
width:60,
height:40,
backgroundColor:'fff',
justifyContent:'center',
alignItems:'center'
},
modal_content: {
backgroundColor: '#fff',
},
btn_dialog:{
flex:1,
height:40,
backgroundColor:'#f0f0f0',
justifyContent:'center',
alignItems:'center'
},
});
/**
* 供货商商品列表
* @type {SupplyGoodsList}
*/
module.exports=class SupplyGoodsList extends SComponent{
constructor(props) {
super(props);
this.state={
refreshing:true,
//供应商供应spu列表
supplierSpus:[],
// supplierId:this.props.route.data.supplierId
}
this._ds = new ListView.DataSource({
sectionHeaderHasChanged: (r1, r2) => r1 !== r2,
rowHasChanged: (r1, r2) => r1 !== r2
});
this._renderRow=this._renderRow.bind(this);
this._itemClick=this._itemClick.bind(this);
this._btnDeleteClick=this._btnDeleteClick.bind(this);
this._btnDetailsClick=this._btnDetailsClick.bind(this);
this._itemSpreadClick=this._itemSpreadClick.bind(this);
this._btnAffirm=this._btnAffirm.bind(this);
}
componentDidMount() {
InteractionManager.runAfterInteractions( () => {
this._getData();
})
}
_getData = () =>{
this.changeState({
pullDownRefreshing:true,
});
commonRequest({
apiKey: 'querySupplierSpuListKey',
withLoading: true,
objectName: 'supplyUserQueryDO',
params: {
supplierId: this.props.route.data.supplierId,
catId: this.props.route.data.catId
}
}).then( (res) => {
let pageData = res.data;
this.changeState({
refreshing:false,
pullDownRefreshing:false,
supplierSpus:pageData.supplierSpus,
});
}).catch( err => {})
}
render(){
return(
<Page
pageName='供货商商品列表'
title={this.props.route.data.supplierName+'-'+this.props.route.data.catName}
back={()=>this.navigator().pop()}
pageLoading={this.state.refreshing}>
<ListView
style={{flex:1}}
refreshControl={
<RefreshControl
style={{backgroundColor:'transparent'}}
refreshing={this.state.pullDownRefreshing}
onRefresh={this._getData.bind(this)}
tintColor="#54bb40"
title="加载中..."
titleColor="#999"
colors={['#2296f3']}
progressBackgroundColor="#fff"
/>
}
initialListSize={10}
enableEmptySections={true}
dataSource={this._ds.cloneWithRows(this.state.supplierSpus)}
renderRow={this._renderRow}
/>
<Button
type='green'
size='large'
onPress={()=>{
this.navigator().push({
component: ItemSelect,
from: 'SupplyInfo',
callback: this._getData,
name: 'ItemSelect',
data: {
supplierId: this.props.route.data.supplierId,
catId: this.props.route.data.catId,
catName: this.props.route.data.catName
}
})
}}
>
添加新品
</Button>
<SXCModal
style={{justifyContent: 'center', alignItems: 'center'}}
animated="slide"
ref={view => this._sxcmodal = view}>
<ModalContent
btnAffirm={this._btnAffirm}
fatherState={this.state}
currentRow={this.state.currentRow}
sxcmodal={this._sxcmodal}
/>
</SXCModal>
</Page>
)
}
/**
* 商品信息itemVew
* @param rowData
* @param sectionID
* @param rowID
* @returns {XML}
* @private
*/
_renderRow(rowData,sectionID,rowID) {
return(
// <TouchableOpacity style={[s.head,s.item, s.border_bottom]} onPress={()=>this._catItemClick(rowData)}>
// <SText fontSize="body" color="666" style={{flex:1}}>{rowData.catName}</SText>
// <SText fontSize="body" color="666" style={{flex:1,textAlign:'center'}}>{rowData.amountOfSpu}</SText>
// <SText fontSize="body" color="666" style={{flex:1,textAlign:'right'}}>{rowData.dailySupply}</SText>
// </TouchableOpacity>
<View>
<View style={[s.item_title, s.border_bottom]}>
<SText fontSize="body" color="333" style={{flex:1,marginRight:15}}>{rowData.spuName}</SText>
<TouchableOpacity style={{width:60,flexDirection:'row',alignItems:'center',justifyContent:'flex-end'}} onPress={()=>this._itemSpreadClick(rowData)} >
<SText fontSize="caption" color="999">属性</SText>
<Image source={rowData.spread?ICON_TRIANGEL_UP:ICON_TRIANGEL_DOWN} style={s.icon_img}/>
</TouchableOpacity>
</View>
{
rowData.spread ?
<ListView
dataSource={this._ds.cloneWithRows(rowData.attributeList)}
renderRow={this._attributeRenderRow}
contentContainerStyle={s.gridStyle}
showsVerticalScrollIndicator={false}
showsHorizontalScrollIndicator={false}
style={s.border_bottom}
/>
:
null
}
<View style={[s.item, s.border_bottom]}>
<SText fontSize="caption" color="999">每日供货量</SText>
<SText fontSize="caption" color="333" style={s.text_right} >{rowData.dailySupplyDes}</SText>
</View>
<View style={[s.item, s.border_bottom]}>
<SText fontSize="caption" color="999">当前供货价</SText>
|
</TouchableOpacity>
{
// <TouchableOpacity onPress={()=>this._btnDetailsClick(rowData)} style={[s.itemButton,{marginLeft:15}]}>
// <SText fontSize="body" color="666" style={{color:"#2296f3"}} >详情</SText>
// </TouchableOpacity>
}
</View>
</View>
)
}
/**
* spu属性itemView
* @param rowData
* @param sectionID
* @param rowID
* @returns {XML}
* @private
*/
_attributeRenderRow(rowData,sectionID,rowID) {
return(
<View style={s.gridItemViewStyle}>
<SText fontSize="caption" color="999">{rowData.name}</SText>
<SText fontSize="caption" color="orange" style={{marginLeft:3}}>{rowData.value}</SText>
</View>
)
}
/**
* 商品属性 展开隐藏事件
* @param rowData
* @private
*/
_itemSpreadClick(rowData) {
if (rowData.spread) {
rowData.spread=false;
}else {
rowData.spread=true;
}
this.changeState(
)
}
/**
* 删除商品按钮
* @param rowData 商品信息
*
|
<SText fontSize="caption" color="333" style={s.text_right} >{rowData.currentPriceDes}</SText>
</View>
<View style={{flexDirection:'row',justifyContent:'flex-end',height:60,backgroundColor:'#fafafa',alignItems:'center',paddingRight:15}}>
<TouchableOpacity onPress={()=>this._btnDeleteClick(rowData,sectionID,rowID)} style={s.itemButton}>
<SText fontSize="body" color="666" style={{color:"#2296f3"}}>删除</SText>
|
random_line_split
|
api_op_RecognizeUtterance.go
|
.
InputStream io.Reader
// Request-specific information passed between the client application and Amazon
// Lex V2 The namespace x-amz-lex: is reserved for special attributes. Don't
// create any request attributes for prefix x-amz-lex: . The requestAttributes
// field must be compressed using gzip and then base64 encoded before sending to
// Amazon Lex V2.
RequestAttributes *string
// The message that Amazon Lex V2 returns in the response can be either text or
// speech based on the responseContentType value.
// - If the value is text/plain;charset=utf-8 , Amazon Lex V2 returns text in the
// response.
// - If the value begins with audio/ , Amazon Lex V2 returns speech in the
// response. Amazon Lex V2 uses Amazon Polly to generate the speech using the
// configuration that you specified in the responseContentType parameter. For
// example, if you specify audio/mpeg as the value, Amazon Lex V2 returns speech
// in the MPEG format.
// - If the value is audio/pcm , the speech returned is audio/pcm at 16 KHz in
// 16-bit, little-endian format.
// - The following are the accepted values:
// - audio/mpeg
// - audio/ogg
// - audio/pcm (16 KHz)
// - audio/* (defaults to mpeg)
// - text/plain; charset=utf-8
ResponseContentType *string
// Sets the state of the session with the user. You can use this to set the
// current intent, attributes, context, and dialog action. Use the dialog action to
// determine the next step that Amazon Lex V2 should use in the conversation with
// the user. The sessionState field must be compressed using gzip and then base64
// encoded before sending to Amazon Lex V2.
SessionState *string
noSmithyDocumentSerde
}
type RecognizeUtteranceOutput struct {
// The prompt or statement to send to the user. This is based on the bot
// configuration and context. For example, if Amazon Lex V2 did not understand the
// user intent, it sends the clarificationPrompt configured for the bot. If the
// intent requires confirmation before taking the fulfillment action, it sends the
// confirmationPrompt . Another example: Suppose that the Lambda function
// successfully fulfilled the intent, and sent a message to convey to the user.
// Then Amazon Lex V2 sends that message in the response.
AudioStream io.ReadCloser
// Content type as specified in the responseContentType in the request.
ContentType *string
// Indicates whether the input mode to the operation was text or speech.
InputMode *string
// The text used to process the request. If the input was an audio stream, the
// inputTranscript field contains the text extracted from the audio stream. This is
// the text that is actually processed to recognize intents and slot values. You
// can use this information to determine if Amazon Lex V2 is correctly processing
// the audio that you send. The inputTranscript field is compressed with gzip and
// then base64 encoded. Before you can use the contents of the field, you must
// decode and decompress the contents. See the example for a simple function to
// decode and decompress the contents.
InputTranscript *string
// A list of intents that Amazon Lex V2 determined might satisfy the user's
// utterance. Each interpretation includes the intent, a score that indicates how
// confident Amazon Lex V2 is that the interpretation is the correct one, and an
// optional sentiment response that indicates the sentiment expressed in the
// utterance. The interpretations field is compressed with gzip and then base64
// encoded. Before you can use the contents of the field, you must decode and
// decompress the contents. See the example for a simple function to decode and
// decompress the contents.
Interpretations *string
// A list of messages that were last sent to the user. The messages are ordered
// based on the order that you returned the messages from your Lambda function or
// the order that the messages are defined in the bot. The messages field is
// compressed with gzip and then base64 encoded. Before you can use the contents of
// the field, you must decode and decompress the contents. See the example for a
// simple function to decode and decompress the contents.
Messages *string
// The bot member that recognized the utterance.
RecognizedBotMember *string
// The attributes sent in the request. The requestAttributes field is compressed
// with gzip and then base64 encoded. Before you can use the contents of the field,
// you must decode and decompress the contents.
RequestAttributes *string
// The identifier of the session in use.
SessionId *string
// Represents the current state of the dialog between the user and the bot. Use
// this to determine the progress of the conversation and what the next action
// might be. The sessionState field is compressed with gzip and then base64
// encoded. Before you can use the contents of the field, you must decode and
// decompress the contents. See the example for a simple function to decode and
// decompress the contents.
SessionState *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationRecognizeUtteranceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpRecognizeUtterance{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRecognizeUtterance{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddUnsignedPayloadMiddleware(stack); err != nil {
return err
}
if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addRecognizeUtteranceResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpRecognizeUtteranceValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRecognizeUtterance(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opRecognizeUtterance(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "lex",
OperationName: "RecognizeUtterance",
}
}
type opRecognizeUtteranceResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opRecognizeUtteranceResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opRecognizeUtteranceResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil
|
{
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
|
conditional_block
|
|
api_op_RecognizeUtterance.go
|
)
|
(ctx context.Context, params *RecognizeUtteranceInput, optFns ...func(*Options)) (*RecognizeUtteranceOutput, error) {
if params == nil {
params = &RecognizeUtteranceInput{}
}
result, metadata, err := c.invokeOperation(ctx, "RecognizeUtterance", params, optFns, c.addOperationRecognizeUtteranceMiddlewares)
if err != nil {
return nil, err
}
out := result.(*RecognizeUtteranceOutput)
out.ResultMetadata = metadata
return out, nil
}
type RecognizeUtteranceInput struct {
// The alias identifier in use for the bot that should receive the request.
//
// This member is required.
BotAliasId *string
// The identifier of the bot that should receive the request.
//
// This member is required.
BotId *string
// The locale where the session is in use.
//
// This member is required.
LocaleId *string
// Indicates the format for audio input or that the content is text. The header
// must start with one of the following prefixes:
// - PCM format, audio data must be in little-endian byte order.
// - audio/l16; rate=16000; channels=1
// - audio/x-l16; sample-rate=16000; channel-count=1
// - audio/lpcm; sample-rate=8000; sample-size-bits=16; channel-count=1;
// is-big-endian=false
// - Opus format
// -
// audio/x-cbr-opus-with-preamble;preamble-size=0;bit-rate=256000;frame-size-milliseconds=4
//
// - Text format
// - text/plain; charset=utf-8
//
// This member is required.
RequestContentType *string
// The identifier of the session in use.
//
// This member is required.
SessionId *string
// User input in PCM or Opus audio format or text format as described in the
// requestContentType parameter.
InputStream io.Reader
// Request-specific information passed between the client application and Amazon
// Lex V2 The namespace x-amz-lex: is reserved for special attributes. Don't
// create any request attributes for prefix x-amz-lex: . The requestAttributes
// field must be compressed using gzip and then base64 encoded before sending to
// Amazon Lex V2.
RequestAttributes *string
// The message that Amazon Lex V2 returns in the response can be either text or
// speech based on the responseContentType value.
// - If the value is text/plain;charset=utf-8 , Amazon Lex V2 returns text in the
// response.
// - If the value begins with audio/ , Amazon Lex V2 returns speech in the
// response. Amazon Lex V2 uses Amazon Polly to generate the speech using the
// configuration that you specified in the responseContentType parameter. For
// example, if you specify audio/mpeg as the value, Amazon Lex V2 returns speech
// in the MPEG format.
// - If the value is audio/pcm , the speech returned is audio/pcm at 16 KHz in
// 16-bit, little-endian format.
// - The following are the accepted values:
// - audio/mpeg
// - audio/ogg
// - audio/pcm (16 KHz)
// - audio/* (defaults to mpeg)
// - text/plain; charset=utf-8
ResponseContentType *string
// Sets the state of the session with the user. You can use this to set the
// current intent, attributes, context, and dialog action. Use the dialog action to
// determine the next step that Amazon Lex V2 should use in the conversation with
// the user. The sessionState field must be compressed using gzip and then base64
// encoded before sending to Amazon Lex V2.
SessionState *string
noSmithyDocumentSerde
}
type RecognizeUtteranceOutput struct {
// The prompt or statement to send to the user. This is based on the bot
// configuration and context. For example, if Amazon Lex V2 did not understand the
// user intent, it sends the clarificationPrompt configured for the bot. If the
// intent requires confirmation before taking the fulfillment action, it sends the
// confirmationPrompt . Another example: Suppose that the Lambda function
// successfully fulfilled the intent, and sent a message to convey to the user.
// Then Amazon Lex V2 sends that message in the response.
AudioStream io.ReadCloser
// Content type as specified in the responseContentType in the request.
ContentType *string
// Indicates whether the input mode to the operation was text or speech.
InputMode *string
// The text used to process the request. If the input was an audio stream, the
// inputTranscript field contains the text extracted from the audio stream. This is
// the text that is actually processed to recognize intents and slot values. You
// can use this information to determine if Amazon Lex V2 is correctly processing
// the audio that you send. The inputTranscript field is compressed with gzip and
// then base64 encoded. Before you can use the contents of the field, you must
// decode and decompress the contents. See the example for a simple function to
// decode and decompress the contents.
InputTranscript *string
// A list of intents that Amazon Lex V2 determined might satisfy the user's
// utterance. Each interpretation includes the intent, a score that indicates how
// confident Amazon Lex V2 is that the interpretation is the correct one, and an
// optional sentiment response that indicates the sentiment expressed in the
// utterance. The interpretations field is compressed with gzip and then base64
// encoded. Before you can use the contents of the field, you must decode and
// decompress the contents. See the example for a simple function to decode and
// decompress the contents.
Interpretations *string
// A list of messages that were last sent to the user. The messages are ordered
// based on the order that you returned the messages from your Lambda function or
// the order that the messages are defined in the bot. The messages field is
// compressed with gzip and then base64 encoded. Before you can use the contents of
// the field, you must decode and decompress the contents. See the example for a
// simple function to decode and decompress the contents.
Messages *string
// The bot member that recognized the utterance.
RecognizedBotMember *string
// The attributes sent in the request. The requestAttributes field is compressed
// with gzip and then base64 encoded. Before you can use the contents of the field,
// you must decode and decompress the contents.
RequestAttributes *string
// The identifier of the session in use.
SessionId *string
// Represents the current state of the dialog between the user and the bot. Use
// this to determine the progress of the conversation and what the next action
// might be. The sessionState field is compressed with gzip and then base64
// encoded. Before you can use the contents of the field, you must decode and
// decompress the contents. See the example for a simple function to decode and
// decompress the contents.
SessionState *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationRecognizeUtteranceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpRecognizeUtterance{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRecognizeUtterance{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddUnsignedPayloadMiddleware(stack); err != nil {
return err
}
if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack
|
RecognizeUtterance
|
identifier_name
|
api_op_RecognizeUtterance.go
|
// - Failed message - The failed message is returned if the Lambda function
// throws an exception or if the Lambda function returns a failed intent state
// without a message.
// - Timeout message - If you don't configure a timeout message and a timeout,
// and the Lambda function doesn't return within 30 seconds, the timeout message is
// returned. If you configure a timeout, the timeout message is returned when the
// period times out.
//
// For more information, see Completion message (https://docs.aws.amazon.com/lexv2/latest/dg/streaming-progress.html#progress-complete.html)
// .
func (c *Client) RecognizeUtterance(ctx context.Context, params *RecognizeUtteranceInput, optFns ...func(*Options)) (*RecognizeUtteranceOutput, error) {
if params == nil {
params = &RecognizeUtteranceInput{}
}
result, metadata, err := c.invokeOperation(ctx, "RecognizeUtterance", params, optFns, c.addOperationRecognizeUtteranceMiddlewares)
if err != nil {
return nil, err
}
out := result.(*RecognizeUtteranceOutput)
out.ResultMetadata = metadata
return out, nil
}
type RecognizeUtteranceInput struct {
// The alias identifier in use for the bot that should receive the request.
//
// This member is required.
BotAliasId *string
// The identifier of the bot that should receive the request.
//
// This member is required.
BotId *string
// The locale where the session is in use.
//
// This member is required.
LocaleId *string
// Indicates the format for audio input or that the content is text. The header
// must start with one of the following prefixes:
// - PCM format, audio data must be in little-endian byte order.
// - audio/l16; rate=16000; channels=1
// - audio/x-l16; sample-rate=16000; channel-count=1
// - audio/lpcm; sample-rate=8000; sample-size-bits=16; channel-count=1;
// is-big-endian=false
// - Opus format
// -
// audio/x-cbr-opus-with-preamble;preamble-size=0;bit-rate=256000;frame-size-milliseconds=4
//
// - Text format
// - text/plain; charset=utf-8
//
// This member is required.
RequestContentType *string
// The identifier of the session in use.
//
// This member is required.
SessionId *string
// User input in PCM or Opus audio format or text format as described in the
// requestContentType parameter.
InputStream io.Reader
// Request-specific information passed between the client application and Amazon
// Lex V2 The namespace x-amz-lex: is reserved for special attributes. Don't
// create any request attributes for prefix x-amz-lex: . The requestAttributes
// field must be compressed using gzip and then base64 encoded before sending to
// Amazon Lex V2.
RequestAttributes *string
// The message that Amazon Lex V2 returns in the response can be either text or
// speech based on the responseContentType value.
// - If the value is text/plain;charset=utf-8 , Amazon Lex V2 returns text in the
// response.
// - If the value begins with audio/ , Amazon Lex V2 returns speech in the
// response. Amazon Lex V2 uses Amazon Polly to generate the speech using the
// configuration that you specified in the responseContentType parameter. For
// example, if you specify audio/mpeg as the value, Amazon Lex V2 returns speech
// in the MPEG format.
// - If the value is audio/pcm , the speech returned is audio/pcm at 16 KHz in
// 16-bit, little-endian format.
// - The following are the accepted values:
// - audio/mpeg
// - audio/ogg
// - audio/pcm (16 KHz)
// - audio/* (defaults to mpeg)
// - text/plain; charset=utf-8
ResponseContentType *string
// Sets the state of the session with the user. You can use this to set the
// current intent, attributes, context, and dialog action. Use the dialog action to
// determine the next step that Amazon Lex V2 should use in the conversation with
// the user. The sessionState field must be compressed using gzip and then base64
// encoded before sending to Amazon Lex V2.
SessionState *string
noSmithyDocumentSerde
}
type RecognizeUtteranceOutput struct {
// The prompt or statement to send to the user. This is based on the bot
// configuration and context. For example, if Amazon Lex V2 did not understand the
// user intent, it sends the clarificationPrompt configured for the bot. If the
// intent requires confirmation before taking the fulfillment action, it sends the
// confirmationPrompt . Another example: Suppose that the Lambda function
// successfully fulfilled the intent, and sent a message to convey to the user.
// Then Amazon Lex V2 sends that message in the response.
AudioStream io.ReadCloser
// Content type as specified in the responseContentType in the request.
ContentType *string
// Indicates whether the input mode to the operation was text or speech.
InputMode *string
// The text used to process the request. If the input was an audio stream, the
// inputTranscript field contains the text extracted from the audio stream. This is
// the text that is actually processed to recognize intents and slot values. You
// can use this information to determine if Amazon Lex V2 is correctly processing
// the audio that you send. The inputTranscript field is compressed with gzip and
// then base64 encoded. Before you can use the contents of the field, you must
// decode and decompress the contents. See the example for a simple function to
// decode and decompress the contents.
InputTranscript *string
// A list of intents that Amazon Lex V2 determined might satisfy the user's
// utterance. Each interpretation includes the intent, a score that indicates how
// confident Amazon Lex V2 is that the interpretation is the correct one, and an
// optional sentiment response that indicates the sentiment expressed in the
// utterance. The interpretations field is compressed with gzip and then base64
// encoded. Before you can use the contents of the field, you must decode and
// decompress the contents. See the example for a simple function to decode and
// decompress the contents.
Interpretations *string
// A list of messages that were last sent to the user. The messages are ordered
// based on the order that you returned the messages from your Lambda function or
// the order that the messages are defined in the bot. The messages field is
// compressed with gzip and then base64 encoded. Before you can use the contents of
// the field, you must decode and decompress the contents. See the example for a
// simple function to decode and decompress the contents.
Messages *string
// The bot member that recognized the utterance.
RecognizedBotMember *string
// The attributes sent in the request. The requestAttributes field is compressed
// with gzip and then base64 encoded. Before you can use the contents of the field,
// you must decode and decompress the contents.
RequestAttributes *string
// The identifier of the session in use.
SessionId *string
// Represents the current state of the dialog between the user and the bot. Use
// this to determine the progress of the conversation and what the next action
// might be. The sessionState field is compressed with gzip and then base64
// encoded. Before you can use the contents of the field, you must decode and
// decompress the contents. See the example for a simple function to decode and
// decompress the contents.
SessionState *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationRecognizeUtteranceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpRecognizeUtterance{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRecognizeUtterance{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.Add
|
random_line_split
|
||
api_op_RecognizeUtterance.go
|
// create any request attributes for prefix x-amz-lex: . The requestAttributes
// field must be compressed using gzip and then base64 encoded before sending to
// Amazon Lex V2.
RequestAttributes *string
// The message that Amazon Lex V2 returns in the response can be either text or
// speech based on the responseContentType value.
// - If the value is text/plain;charset=utf-8 , Amazon Lex V2 returns text in the
// response.
// - If the value begins with audio/ , Amazon Lex V2 returns speech in the
// response. Amazon Lex V2 uses Amazon Polly to generate the speech using the
// configuration that you specified in the responseContentType parameter. For
// example, if you specify audio/mpeg as the value, Amazon Lex V2 returns speech
// in the MPEG format.
// - If the value is audio/pcm , the speech returned is audio/pcm at 16 KHz in
// 16-bit, little-endian format.
// - The following are the accepted values:
// - audio/mpeg
// - audio/ogg
// - audio/pcm (16 KHz)
// - audio/* (defaults to mpeg)
// - text/plain; charset=utf-8
ResponseContentType *string
// Sets the state of the session with the user. You can use this to set the
// current intent, attributes, context, and dialog action. Use the dialog action to
// determine the next step that Amazon Lex V2 should use in the conversation with
// the user. The sessionState field must be compressed using gzip and then base64
// encoded before sending to Amazon Lex V2.
SessionState *string
noSmithyDocumentSerde
}
type RecognizeUtteranceOutput struct {
// The prompt or statement to send to the user. This is based on the bot
// configuration and context. For example, if Amazon Lex V2 did not understand the
// user intent, it sends the clarificationPrompt configured for the bot. If the
// intent requires confirmation before taking the fulfillment action, it sends the
// confirmationPrompt . Another example: Suppose that the Lambda function
// successfully fulfilled the intent, and sent a message to convey to the user.
// Then Amazon Lex V2 sends that message in the response.
AudioStream io.ReadCloser
// Content type as specified in the responseContentType in the request.
ContentType *string
// Indicates whether the input mode to the operation was text or speech.
InputMode *string
// The text used to process the request. If the input was an audio stream, the
// inputTranscript field contains the text extracted from the audio stream. This is
// the text that is actually processed to recognize intents and slot values. You
// can use this information to determine if Amazon Lex V2 is correctly processing
// the audio that you send. The inputTranscript field is compressed with gzip and
// then base64 encoded. Before you can use the contents of the field, you must
// decode and decompress the contents. See the example for a simple function to
// decode and decompress the contents.
InputTranscript *string
// A list of intents that Amazon Lex V2 determined might satisfy the user's
// utterance. Each interpretation includes the intent, a score that indicates how
// confident Amazon Lex V2 is that the interpretation is the correct one, and an
// optional sentiment response that indicates the sentiment expressed in the
// utterance. The interpretations field is compressed with gzip and then base64
// encoded. Before you can use the contents of the field, you must decode and
// decompress the contents. See the example for a simple function to decode and
// decompress the contents.
Interpretations *string
// A list of messages that were last sent to the user. The messages are ordered
// based on the order that you returned the messages from your Lambda function or
// the order that the messages are defined in the bot. The messages field is
// compressed with gzip and then base64 encoded. Before you can use the contents of
// the field, you must decode and decompress the contents. See the example for a
// simple function to decode and decompress the contents.
Messages *string
// The bot member that recognized the utterance.
RecognizedBotMember *string
// The attributes sent in the request. The requestAttributes field is compressed
// with gzip and then base64 encoded. Before you can use the contents of the field,
// you must decode and decompress the contents.
RequestAttributes *string
// The identifier of the session in use.
SessionId *string
// Represents the current state of the dialog between the user and the bot. Use
// this to determine the progress of the conversation and what the next action
// might be. The sessionState field is compressed with gzip and then base64
// encoded. Before you can use the contents of the field, you must decode and
// decompress the contents. See the example for a simple function to decode and
// decompress the contents.
SessionState *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationRecognizeUtteranceMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpRecognizeUtterance{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRecognizeUtterance{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddUnsignedPayloadMiddleware(stack); err != nil {
return err
}
if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addRecognizeUtteranceResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpRecognizeUtteranceValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRecognizeUtterance(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opRecognizeUtterance(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "lex",
OperationName: "RecognizeUtterance",
}
}
type opRecognizeUtteranceResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opRecognizeUtteranceResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opRecognizeUtteranceResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
)
|
{
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
|
identifier_body
|
|
admin.py
|
import force_str
from django.utils.safestring import mark_safe
from django.utils.translation import ngettext
from concurrency import core, forms
from concurrency.api import get_revision_of_object
from concurrency.compat import concurrency_param_name
from concurrency.config import CONCURRENCY_LIST_EDITABLE_POLICY_ABORT_ALL, conf
from concurrency.exceptions import RecordModifiedError
from concurrency.forms import ConcurrentForm, VersionWidget
from concurrency.utils import flatten
ALL = object()
class ConcurrencyActionMixin:
check_concurrent_action = True
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
if self.check_concurrent_action:
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME,
force_str("%s,%s" % (obj.pk, get_revision_of_object(obj))))
else: # pragma: no cover
return super().action_checkbox(obj)
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_confirmation_template(self):
return "concurrency/delete_selected_confirmation.html"
def response_action(self, request, queryset): # noqa
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError: # pragma: no cover
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError: # pragma: no cover
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail.
if action_form.cleaned_data['select_across']:
selected = ALL
else:
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected:
return None
revision_field = self.model._concurrencymeta.field
if self.check_concurrent_action:
self.delete_selected_confirmation_template = self.get_confirmation_template()
# If select_across we have to avoid the use of concurrency
if selected is not ALL:
filters = []
for x in selected:
try:
pk, version = x.split(",")
except ValueError: # pragma: no cover
raise ImproperlyConfigured('`ConcurrencyActionMixin` error.'
'A tuple with `primary_key, version_number` '
'expected: `%s` found' % x)
filters.append(Q(**{'pk': pk,
revision_field.attname: version}))
queryset = queryset.filter(reduce(operator.or_, filters))
if len(selected) != queryset.count():
messages.error(request, 'One or more record were updated. '
'(Probably by other user) '
'The execution was aborted.')
return HttpResponseRedirect(".")
else:
messages.warning(request, 'Selecting all records, you will avoid the concurrency check')
response = func(self, request, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(".")
class ConcurrentManagementForm(ManagementForm):
def __init__(self, *args, **kwargs):
self._versions = kwargs.pop('versions', [])
super().__init__(*args, **kwargs)
def _get_concurrency_fields(self):
v = []
for pk, version in self._versions:
v.append(f'<input type="hidden" name="{concurrency_param_name}_{pk}" value="{version}">')
return mark_safe("".join(v))
def render(self, template_name=None, context=None, renderer=None):
out = super().render(template_name, context, renderer)
return out + self._get_concurrency_fields()
def __str__(self):
if django.VERSION[:2] >= (4, 0):
return self.render()
else:
return super().__str__()
__html__ = __str__
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
ret = super()._html_output(normal_row, error_row, row_ender, help_text_html, errors_on_separate_row)
return mark_safe("{0}{1}".format(ret, self._get_concurrency_fields()))
class ConcurrentBaseModelFormSet(BaseModelFormSet):
def _management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ConcurrentManagementForm(self.data, auto_id=self.auto_id,
prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ConcurrentManagementForm(auto_id=self.auto_id,
prefix=self.prefix,
initial={TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num},
versions=[(form.instance.pk, get_revision_of_object(form.instance)) for form
in self.initial_forms])
return form
management_form = property(_management_form)
class ConcurrencyListEditableMixin:
list_editable_policy = conf.POLICY
def get_changelist_formset(self, request, **kwargs):
kwargs['formset'] = ConcurrentBaseModelFormSet
return super().get_changelist_formset(request, **kwargs)
def _add_conflict(self, request, obj):
if hasattr(request, '_concurrency_list_editable_errors'):
request._concurrency_list_editable_errors.append(obj.pk)
else:
request._concurrency_list_editable_errors = [obj.pk]
def _get_conflicts(self, request):
if hasattr(request, '_concurrency_list_editable_errors'):
|
else:
return []
def save_model(self, request, obj, form, change):
try:
if change:
version = request.POST.get(f'{concurrency_param_name}_{obj.pk}', None)
if version:
core._set_version(obj, version)
super().save_model(request, obj, form, change)
except RecordModifiedError:
self._add_conflict(request, obj)
# If policy is set to 'silent' the user will be informed using message_user
# raise Exception if not silent.
# NOTE:
# list_editable_policy MUST have the LIST_EDITABLE_POLICY_ABORT_ALL
# set to work properly
if self.list_editable_policy == CONCURRENCY_LIST_EDITABLE_POLICY_ABORT_ALL:
raise
def log_change(self, request, object, message):
if object.pk in self._get_conflicts(request):
return
return super().log_change(request, object, message)
def log_deletion(self, request, object, object_repr):
if object.pk in self._get_conflicts(request):
return
return super().log_deletion(request, object, object_repr)
def message_user(self, request, message, *args, **kwargs):
# This is ugly but we do not want to touch the changelist_view() code.
opts = self.model._meta
conflicts = self._get_conflicts(request)
if conflicts:
names = force_str(opts.verbose_name), force_str(opts.verbose_name_plural)
pattern = r"(?P<num>\d+) ({0}|{1})".format(*names)
rex = re.compile(pattern)
m = rex.match(message)
concurrency_errros = len(conflicts)
if m:
updated_record = int(m.group('num')) - concurrency_errros
ids = ",".join(map(str, conflicts))
messages.error(request,
ngettext("Record with pk `{0}` has been modified and was not updated",
"Records `{0}` have been modified and were not updated",
concurrency_errros).format(ids))
if updated_record == 1:
name = force_str(opts.verbose_name)
else:
name = force_str(opts.verbose_name_plural)
message = None
if updated_record > 0:
message = ngettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
updated_record) % {'count': updated_record,
'name': name}
return super().message_user(request, message, *args, **kwargs)
class ConcurrentModelAdmin(ConcurrencyActionMixin,
ConcurrencyList
|
return request._concurrency_list_editable_errors
|
conditional_block
|
admin.py
|
force_str
from django.utils.safestring import mark_safe
from django.utils.translation import ngettext
from concurrency import core, forms
from concurrency.api import get_revision_of_object
from concurrency.compat import concurrency_param_name
from concurrency.config import CONCURRENCY_LIST_EDITABLE_POLICY_ABORT_ALL, conf
from concurrency.exceptions import RecordModifiedError
from concurrency.forms import ConcurrentForm, VersionWidget
from concurrency.utils import flatten
ALL = object()
class ConcurrencyActionMixin:
check_concurrent_action = True
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
if self.check_concurrent_action:
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME,
force_str("%s,%s" % (obj.pk, get_revision_of_object(obj))))
else: # pragma: no cover
return super().action_checkbox(obj)
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_confirmation_template(self):
return "concurrency/delete_selected_confirmation.html"
def response_action(self, request, queryset): # noqa
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError: # pragma: no cover
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError: # pragma: no cover
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail.
if action_form.cleaned_data['select_across']:
selected = ALL
else:
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected:
return None
revision_field = self.model._concurrencymeta.field
if self.check_concurrent_action:
self.delete_selected_confirmation_template = self.get_confirmation_template()
# If select_across we have to avoid the use of concurrency
if selected is not ALL:
filters = []
for x in selected:
try:
pk, version = x.split(",")
except ValueError: # pragma: no cover
raise ImproperlyConfigured('`ConcurrencyActionMixin` error.'
'A tuple with `primary_key, version_number` '
'expected: `%s` found' % x)
filters.append(Q(**{'pk': pk,
revision_field.attname: version}))
queryset = queryset.filter(reduce(operator.or_, filters))
if len(selected) != queryset.count():
messages.error(request, 'One or more record were updated. '
'(Probably by other user) '
'The execution was aborted.')
return HttpResponseRedirect(".")
else:
messages.warning(request, 'Selecting all records, you will avoid the concurrency check')
response = func(self, request, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(".")
class ConcurrentManagementForm(ManagementForm):
def __init__(self, *args, **kwargs):
|
def _get_concurrency_fields(self):
v = []
for pk, version in self._versions:
v.append(f'<input type="hidden" name="{concurrency_param_name}_{pk}" value="{version}">')
return mark_safe("".join(v))
def render(self, template_name=None, context=None, renderer=None):
out = super().render(template_name, context, renderer)
return out + self._get_concurrency_fields()
def __str__(self):
if django.VERSION[:2] >= (4, 0):
return self.render()
else:
return super().__str__()
__html__ = __str__
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
ret = super()._html_output(normal_row, error_row, row_ender, help_text_html, errors_on_separate_row)
return mark_safe("{0}{1}".format(ret, self._get_concurrency_fields()))
class ConcurrentBaseModelFormSet(BaseModelFormSet):
def _management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ConcurrentManagementForm(self.data, auto_id=self.auto_id,
prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ConcurrentManagementForm(auto_id=self.auto_id,
prefix=self.prefix,
initial={TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num},
versions=[(form.instance.pk, get_revision_of_object(form.instance)) for form
in self.initial_forms])
return form
management_form = property(_management_form)
class ConcurrencyListEditableMixin:
list_editable_policy = conf.POLICY
def get_changelist_formset(self, request, **kwargs):
kwargs['formset'] = ConcurrentBaseModelFormSet
return super().get_changelist_formset(request, **kwargs)
def _add_conflict(self, request, obj):
if hasattr(request, '_concurrency_list_editable_errors'):
request._concurrency_list_editable_errors.append(obj.pk)
else:
request._concurrency_list_editable_errors = [obj.pk]
def _get_conflicts(self, request):
if hasattr(request, '_concurrency_list_editable_errors'):
return request._concurrency_list_editable_errors
else:
return []
def save_model(self, request, obj, form, change):
try:
if change:
version = request.POST.get(f'{concurrency_param_name}_{obj.pk}', None)
if version:
core._set_version(obj, version)
super().save_model(request, obj, form, change)
except RecordModifiedError:
self._add_conflict(request, obj)
# If policy is set to 'silent' the user will be informed using message_user
# raise Exception if not silent.
# NOTE:
# list_editable_policy MUST have the LIST_EDITABLE_POLICY_ABORT_ALL
# set to work properly
if self.list_editable_policy == CONCURRENCY_LIST_EDITABLE_POLICY_ABORT_ALL:
raise
def log_change(self, request, object, message):
if object.pk in self._get_conflicts(request):
return
return super().log_change(request, object, message)
def log_deletion(self, request, object, object_repr):
if object.pk in self._get_conflicts(request):
return
return super().log_deletion(request, object, object_repr)
def message_user(self, request, message, *args, **kwargs):
# This is ugly but we do not want to touch the changelist_view() code.
opts = self.model._meta
conflicts = self._get_conflicts(request)
if conflicts:
names = force_str(opts.verbose_name), force_str(opts.verbose_name_plural)
pattern = r"(?P<num>\d+) ({0}|{1})".format(*names)
rex = re.compile(pattern)
m = rex.match(message)
concurrency_errros = len(conflicts)
if m:
updated_record = int(m.group('num')) - concurrency_errros
ids = ",".join(map(str, conflicts))
messages.error(request,
ngettext("Record with pk `{0}` has been modified and was not updated",
"Records `{0}` have been modified and were not updated",
concurrency_errros).format(ids))
if updated_record == 1:
name = force_str(opts.verbose_name)
else:
name = force_str(opts.verbose_name_plural)
message = None
if updated_record > 0:
message = ngettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
updated_record) % {'count': updated_record,
'name': name}
return super().message_user(request, message, *args, **kwargs)
class ConcurrentModelAdmin(ConcurrencyActionMixin,
ConcurrencyList
|
self._versions = kwargs.pop('versions', [])
super().__init__(*args, **kwargs)
|
identifier_body
|
admin.py
|
import force_str
from django.utils.safestring import mark_safe
from django.utils.translation import ngettext
from concurrency import core, forms
from concurrency.api import get_revision_of_object
from concurrency.compat import concurrency_param_name
from concurrency.config import CONCURRENCY_LIST_EDITABLE_POLICY_ABORT_ALL, conf
from concurrency.exceptions import RecordModifiedError
from concurrency.forms import ConcurrentForm, VersionWidget
from concurrency.utils import flatten
ALL = object()
class ConcurrencyActionMixin:
check_concurrent_action = True
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
if self.check_concurrent_action:
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME,
force_str("%s,%s" % (obj.pk, get_revision_of_object(obj))))
else: # pragma: no cover
return super().action_checkbox(obj)
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_confirmation_template(self):
return "concurrency/delete_selected_confirmation.html"
def response_action(self, request, queryset): # noqa
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError: # pragma: no cover
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError: # pragma: no cover
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail.
if action_form.cleaned_data['select_across']:
selected = ALL
else:
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected:
return None
revision_field = self.model._concurrencymeta.field
if self.check_concurrent_action:
self.delete_selected_confirmation_template = self.get_confirmation_template()
# If select_across we have to avoid the use of concurrency
if selected is not ALL:
filters = []
for x in selected:
try:
pk, version = x.split(",")
except ValueError: # pragma: no cover
raise ImproperlyConfigured('`ConcurrencyActionMixin` error.'
'A tuple with `primary_key, version_number` '
'expected: `%s` found' % x)
filters.append(Q(**{'pk': pk,
revision_field.attname: version}))
queryset = queryset.filter(reduce(operator.or_, filters))
if len(selected) != queryset.count():
messages.error(request, 'One or more record were updated. '
'(Probably by other user) '
'The execution was aborted.')
return HttpResponseRedirect(".")
else:
messages.warning(request, 'Selecting all records, you will avoid the concurrency check')
response = func(self, request, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(".")
class ConcurrentManagementForm(ManagementForm):
def __init__(self, *args, **kwargs):
self._versions = kwargs.pop('versions', [])
super().__init__(*args, **kwargs)
def _get_concurrency_fields(self):
v = []
for pk, version in self._versions:
v.append(f'<input type="hidden" name="{concurrency_param_name}_{pk}" value="{version}">')
return mark_safe("".join(v))
def render(self, template_name=None, context=None, renderer=None):
out = super().render(template_name, context, renderer)
return out + self._get_concurrency_fields()
def __str__(self):
if django.VERSION[:2] >= (4, 0):
return self.render()
else:
return super().__str__()
__html__ = __str__
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
ret = super()._html_output(normal_row, error_row, row_ender, help_text_html, errors_on_separate_row)
return mark_safe("{0}{1}".format(ret, self._get_concurrency_fields()))
class ConcurrentBaseModelFormSet(BaseModelFormSet):
def _management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ConcurrentManagementForm(self.data, auto_id=self.auto_id,
prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ConcurrentManagementForm(auto_id=self.auto_id,
prefix=self.prefix,
initial={TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num},
versions=[(form.instance.pk, get_revision_of_object(form.instance)) for form
in self.initial_forms])
return form
management_form = property(_management_form)
class ConcurrencyListEditableMixin:
list_editable_policy = conf.POLICY
def get_changelist_formset(self, request, **kwargs):
kwargs['formset'] = ConcurrentBaseModelFormSet
return super().get_changelist_formset(request, **kwargs)
def _add_conflict(self, request, obj):
if hasattr(request, '_concurrency_list_editable_errors'):
request._concurrency_list_editable_errors.append(obj.pk)
else:
request._concurrency_list_editable_errors = [obj.pk]
|
if hasattr(request, '_concurrency_list_editable_errors'):
return request._concurrency_list_editable_errors
else:
return []
def save_model(self, request, obj, form, change):
try:
if change:
version = request.POST.get(f'{concurrency_param_name}_{obj.pk}', None)
if version:
core._set_version(obj, version)
super().save_model(request, obj, form, change)
except RecordModifiedError:
self._add_conflict(request, obj)
# If policy is set to 'silent' the user will be informed using message_user
# raise Exception if not silent.
# NOTE:
# list_editable_policy MUST have the LIST_EDITABLE_POLICY_ABORT_ALL
# set to work properly
if self.list_editable_policy == CONCURRENCY_LIST_EDITABLE_POLICY_ABORT_ALL:
raise
def log_change(self, request, object, message):
if object.pk in self._get_conflicts(request):
return
return super().log_change(request, object, message)
def log_deletion(self, request, object, object_repr):
if object.pk in self._get_conflicts(request):
return
return super().log_deletion(request, object, object_repr)
def message_user(self, request, message, *args, **kwargs):
# This is ugly but we do not want to touch the changelist_view() code.
opts = self.model._meta
conflicts = self._get_conflicts(request)
if conflicts:
names = force_str(opts.verbose_name), force_str(opts.verbose_name_plural)
pattern = r"(?P<num>\d+) ({0}|{1})".format(*names)
rex = re.compile(pattern)
m = rex.match(message)
concurrency_errros = len(conflicts)
if m:
updated_record = int(m.group('num')) - concurrency_errros
ids = ",".join(map(str, conflicts))
messages.error(request,
ngettext("Record with pk `{0}` has been modified and was not updated",
"Records `{0}` have been modified and were not updated",
concurrency_errros).format(ids))
if updated_record == 1:
name = force_str(opts.verbose_name)
else:
name = force_str(opts.verbose_name_plural)
message = None
if updated_record > 0:
message = ngettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
updated_record) % {'count': updated_record,
'name': name}
return super().message_user(request, message, *args, **kwargs)
class ConcurrentModelAdmin(ConcurrencyActionMixin,
Concurrency
|
def _get_conflicts(self, request):
|
random_line_split
|
admin.py
|
force_str
from django.utils.safestring import mark_safe
from django.utils.translation import ngettext
from concurrency import core, forms
from concurrency.api import get_revision_of_object
from concurrency.compat import concurrency_param_name
from concurrency.config import CONCURRENCY_LIST_EDITABLE_POLICY_ABORT_ALL, conf
from concurrency.exceptions import RecordModifiedError
from concurrency.forms import ConcurrentForm, VersionWidget
from concurrency.utils import flatten
ALL = object()
class ConcurrencyActionMixin:
check_concurrent_action = True
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
if self.check_concurrent_action:
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME,
force_str("%s,%s" % (obj.pk, get_revision_of_object(obj))))
else: # pragma: no cover
return super().action_checkbox(obj)
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_confirmation_template(self):
return "concurrency/delete_selected_confirmation.html"
def
|
(self, request, queryset): # noqa
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError: # pragma: no cover
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError: # pragma: no cover
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail.
if action_form.cleaned_data['select_across']:
selected = ALL
else:
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected:
return None
revision_field = self.model._concurrencymeta.field
if self.check_concurrent_action:
self.delete_selected_confirmation_template = self.get_confirmation_template()
# If select_across we have to avoid the use of concurrency
if selected is not ALL:
filters = []
for x in selected:
try:
pk, version = x.split(",")
except ValueError: # pragma: no cover
raise ImproperlyConfigured('`ConcurrencyActionMixin` error.'
'A tuple with `primary_key, version_number` '
'expected: `%s` found' % x)
filters.append(Q(**{'pk': pk,
revision_field.attname: version}))
queryset = queryset.filter(reduce(operator.or_, filters))
if len(selected) != queryset.count():
messages.error(request, 'One or more record were updated. '
'(Probably by other user) '
'The execution was aborted.')
return HttpResponseRedirect(".")
else:
messages.warning(request, 'Selecting all records, you will avoid the concurrency check')
response = func(self, request, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(".")
class ConcurrentManagementForm(ManagementForm):
def __init__(self, *args, **kwargs):
self._versions = kwargs.pop('versions', [])
super().__init__(*args, **kwargs)
def _get_concurrency_fields(self):
v = []
for pk, version in self._versions:
v.append(f'<input type="hidden" name="{concurrency_param_name}_{pk}" value="{version}">')
return mark_safe("".join(v))
def render(self, template_name=None, context=None, renderer=None):
out = super().render(template_name, context, renderer)
return out + self._get_concurrency_fields()
def __str__(self):
if django.VERSION[:2] >= (4, 0):
return self.render()
else:
return super().__str__()
__html__ = __str__
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
ret = super()._html_output(normal_row, error_row, row_ender, help_text_html, errors_on_separate_row)
return mark_safe("{0}{1}".format(ret, self._get_concurrency_fields()))
class ConcurrentBaseModelFormSet(BaseModelFormSet):
def _management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ConcurrentManagementForm(self.data, auto_id=self.auto_id,
prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ConcurrentManagementForm(auto_id=self.auto_id,
prefix=self.prefix,
initial={TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num},
versions=[(form.instance.pk, get_revision_of_object(form.instance)) for form
in self.initial_forms])
return form
management_form = property(_management_form)
class ConcurrencyListEditableMixin:
list_editable_policy = conf.POLICY
def get_changelist_formset(self, request, **kwargs):
kwargs['formset'] = ConcurrentBaseModelFormSet
return super().get_changelist_formset(request, **kwargs)
def _add_conflict(self, request, obj):
if hasattr(request, '_concurrency_list_editable_errors'):
request._concurrency_list_editable_errors.append(obj.pk)
else:
request._concurrency_list_editable_errors = [obj.pk]
def _get_conflicts(self, request):
if hasattr(request, '_concurrency_list_editable_errors'):
return request._concurrency_list_editable_errors
else:
return []
def save_model(self, request, obj, form, change):
try:
if change:
version = request.POST.get(f'{concurrency_param_name}_{obj.pk}', None)
if version:
core._set_version(obj, version)
super().save_model(request, obj, form, change)
except RecordModifiedError:
self._add_conflict(request, obj)
# If policy is set to 'silent' the user will be informed using message_user
# raise Exception if not silent.
# NOTE:
# list_editable_policy MUST have the LIST_EDITABLE_POLICY_ABORT_ALL
# set to work properly
if self.list_editable_policy == CONCURRENCY_LIST_EDITABLE_POLICY_ABORT_ALL:
raise
def log_change(self, request, object, message):
if object.pk in self._get_conflicts(request):
return
return super().log_change(request, object, message)
def log_deletion(self, request, object, object_repr):
if object.pk in self._get_conflicts(request):
return
return super().log_deletion(request, object, object_repr)
def message_user(self, request, message, *args, **kwargs):
# This is ugly but we do not want to touch the changelist_view() code.
opts = self.model._meta
conflicts = self._get_conflicts(request)
if conflicts:
names = force_str(opts.verbose_name), force_str(opts.verbose_name_plural)
pattern = r"(?P<num>\d+) ({0}|{1})".format(*names)
rex = re.compile(pattern)
m = rex.match(message)
concurrency_errros = len(conflicts)
if m:
updated_record = int(m.group('num')) - concurrency_errros
ids = ",".join(map(str, conflicts))
messages.error(request,
ngettext("Record with pk `{0}` has been modified and was not updated",
"Records `{0}` have been modified and were not updated",
concurrency_errros).format(ids))
if updated_record == 1:
name = force_str(opts.verbose_name)
else:
name = force_str(opts.verbose_name_plural)
message = None
if updated_record > 0:
message = ngettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
updated_record) % {'count': updated_record,
'name': name}
return super().message_user(request, message, *args, **kwargs)
class ConcurrentModelAdmin(ConcurrencyActionMixin,
ConcurrencyList
|
response_action
|
identifier_name
|
soldev.datatable.js
|
type":"checkbox","class":"select_filter_"+filter.id,"value":filter.id});
$input_wrapper = $label.append($input).append(filter.label);
$wrapper = $checkbox_wrapper.append($input_wrapper);
$('.add_filter_content > form').append($wrapper);
});
}
else {
alert('failed');
}
});
/* --get list of filters for datatable */
}
var dom ='lfrtip';
var canSelect = true
var selectProperties = {};
var ajax = {};
//by default kalo dom nya ga di isi maka lfrtip, tapi kalo kaya mao ngilangin search berarti tambahin dom di parameternya
if(typeof(properties.dom)!=='undefined' && properties.dom != null && properties.dom.length != 0){
dom = properties.dom;
}
//tambahan properties. kalo ga mau munculin text box search cukup tambahin properties inputSearch dengan nilai false
if(typeof(properties.inputSearch)!=='undefined' && properties.inputSearch != null && properties.inputSearch.length != 0){
if(properties.inputSearch == false){
dom = 'lrtip';
}
}
//tambahan data
if(typeof(properties.filterid)!=='undefined' && properties.filterid != null && properties.filterid.length != 0){
ajax.data = filterData;
if (typeof(properties.ajax.url)!=='undefined' && properties.ajax.url != null && properties.ajax.url.length != 0){
ajax.url = properties.ajax.url;
}
else {
ajax.url = properties.ajax;
}
}
else {
ajax = properties.ajax
}
var dtTblProperties =
{
"processing": true,
"serverSide": true,
"responsive":true,
"ajax": ajax,
"ordering":true,
"columns" : properties.columns,
"dom": dom,
"stateSave": true
};
if(typeof(properties.fnCreatedRow)!=='undefinded' && typeof(properties.fnCreatedRow) == 'function'){
dtTblProperties.fnCreatedRow = properties.fnCreatedRow;
}
if(typeof(properties.fnDrawCallback)!=='undefinded' && typeof(properties.fnDrawCallback) == 'function'){
dtTblProperties.fnDrawCallback = properties.fnDrawCallback;
}
if(typeof(properties.columnDefs)!=='undefinded' && properties.columnDefs != null && properties.columnDefs.length != 0){
dtTblProperties.columnDefs = properties.columnDefs;
}
if(typeof(properties.height)!=='undefinded' && properties.height != null && properties.height != 0){
dtTblProperties.scrollY = properties.height;
dtTblProperties.scrollCollapse = true;
}
if(typeof(properties.colReorder)!=='undefined' && properties.colReorder != null && properties.colReorder !=0){
dtTblProperties.colReorder = properties.colReorder
}
var lang = '${user.language}';
if(lang == 'in'){
var language = dt_lang_in;
dtTblProperties.oLanguage = language;
}
//kalau dari page nya ada porperty select dan isinya true
if(typeof(properties.select)!=='undefined' && properties.select != null && properties.select.length != 0){
if(properties.select == true){
selectProperties.style = 'os';
}
else if(properties.select =='single') {
selectProperties.style = 'single';
}
else if(properties.select == 'multi'){
selectProperties.style ='multi';
}
}else selectProperties.style = 'os';
selectProperties.info = false;
dtTblProperties.select = selectProperties;
if(typeof(properties.destroy)!=='undefinded' && properties.destroy != null && properties.destroy.length != 0){
dtTblProperties.destroy = properties.destroy;
}
if(typeof(properties.responsive)!=='undefinded' && properties.responsive != null && typeof(properties.responsive) == 'boolean'){
console.log('responsive is exist and the value is : ',properties.responsive);
dtTblProperties.responsive = properties.responsive;
}
if(typeof(properties.sScrollX)!=='undefinded' && properties.sScrollX != null && properties.sScrollX != 0){
dtTblProperties.sScrollX = properties.sScrollX;
}
if(typeof(properties.sScrollXInner)!=='undefinded' && properties.sScrollXInner != null && properties.sScrollXInner != 0){
dtTblProperties.sScrollXInner = properties.sScrollXInner;
}
this.dataTable(dtTblProperties);
this.addClass('table-bordered');
this.find('td').css('word-wrap','break-word');
this.css('width','100%');
var oTable = this;
/*---------START INITIALIZING DATATABLE FILTERS--------*/
if(typeof(properties.filterid)!=='undefined' && properties.filterid != null && properties.filterid.length != 0){
$('.add_filter_content').on('change','input:checkbox', function(){
var isAddingNewFilter = $(this).prop('checked');
var filterId = $(this).val();
//if adding new filter
if (isAddingNewFilter){
console.log('isAddingNewFilter');
$.get("${pageContext.request.contextPath}/dt/filter",{filterId:filterId}, function(returned){
if (returned.status == 1){
$('.filters_wrapper').addNewFilter(returned.data);
}
else {
alert('failed');
}
});
}
//if removing filter
else {
$('#filter_' + filterId).removeFilter();
}
});
$.fn.addNewFilter = function(filter){
console.log('addFilter');
this.append(
'<div class="filter" id="filter_'+ filter.id +'">' +
'<div class="filter_trigger">' +
filter.label + '<a class="filter_delete">×</a>' +
'</div>' +
'<div class="filter_content"><div class="filter_content_inside"></div></div>' +
'</div>');
var $filterElm = $('#filter_' + filter.id);
$filterElm.data('name',filter.name);
$filterElm.data('type',filter.type);
var wrapper = $('.filter_content_inside',$filterElm);
if (filter.type == 'text'){
wrapper.append(
'<div class="form-inline">' +
'<div class="form-group">' +
'<input type="text" class="form-control dt-filter-text" placeholder="'+filter.label+'">' +
'</div>' +
'</div>');
}
else if (filter.type == 'select'){
var options = filter.options;
var $select = $('<select/>',{'class':'dt-filter-select'});
$select.attr('multiple','multiple');
for (var i=0; i < options.length;i++){
$select.append('<option value="'+options[i].value+'">'+ options[i].label +'</option>')
}
wrapper.append($select);
//apply chosen.js
$('select',wrapper).chosen();
}
else if (filter.type == 'daterange'){
wrapper.append(
'<div class="form-inline">' +
'<div class="form-group">' +
'<input type="text" class="form-control dt-filter-date" name="from" placeholder="From">' +
'</div>' +
'<div class="form-group">' +
'<input type="text" class="form-control dt-filter-date" name="to" placeholder="Until">' +
'</div>' +
'</div>'
)
//apply datepicker
$('.dt-filter-date',wrapper).datepicker({
format: "yyyy-mm-dd",
todayBtn: 'linked',
todayHighlight: true
});
}
else if (filter.type == 'range'){
var rangeSetting = filter.options[0];
wrapper.append('<div class="dt-filter-range" style="width:200px"></div>');
$minLabel = $('<span/>',{class:'minLabel'});
$maxLabel = $('<span/>',{class:'maxLabel'});
$rangeLabel = $('<div/>',{class:'rangeLabel'});
$minLabel.text(rangeSetting.min);
$maxLabel.text(rangeSetting.max)
$rangeLabel.append($minLabel);
$rangeLabel.append(' - ');
$rangeLabel.append($maxLabel);;
wrapper.append($rangeLabel);
//apply jquery ui slider
$('.dt-filter-range',wrapper).slider({
range: true,
min: rangeSetting.min,
max: rangeSetting.max,
values : [rangeSetting.min,rangeSetting.max],
step: rangeSetting.step,
slide: function( event, ui ) {
$minLabel.text(ui.values[0]);
$maxLabel.text(ui.values[1]);
}
});
}
|
//add buttons
|
random_line_split
|
|
soldev.datatable.js
|
('type') == 'range' ){
value = {
min: $('.dt-filter-range',filterElm).slider("values",0),
max: $('.dt-filter-range',filterElm).slider("values",1),
}
}
else if (filterElm.data('type') == 'daterange' ){
value = {
'from': $('[name=from]',filterElm).val(),
'to' : $('[name=to]',filterElm).val(),
}
}
else {
value = ($(':input',filterElm).val() == null)?'':$(':input',filterElm).val().toString();
}
var filter = {
key : filterElm.data('name'),
type : filterElm.data('type'),
value : value
}
d['filter'].push(filter);
numOfFilter = numOfFilter + 1;
});
d.numOfFilter = numOfFilter;
}
/* --function data for datatable */
/* get list of filters for datatable */
$.get("${pageContext.request.contextPath}/dt/filtergroup",{groupId:properties.filterid}, function(returned){
if (returned.status == 1){
$.each(returned.data, function(key, filter) {
$checkbox_wrapper = $("<div />",{"class":"checkbox"});
$label = $('<label />');
$input = $('<input />',{"type":"checkbox","class":"select_filter_"+filter.id,"value":filter.id});
$input_wrapper = $label.append($input).append(filter.label);
$wrapper = $checkbox_wrapper.append($input_wrapper);
$('.add_filter_content > form').append($wrapper);
});
}
else {
alert('failed');
}
});
/* --get list of filters for datatable */
}
var dom ='lfrtip';
var canSelect = true
var selectProperties = {};
var ajax = {};
//by default kalo dom nya ga di isi maka lfrtip, tapi kalo kaya mao ngilangin search berarti tambahin dom di parameternya
if(typeof(properties.dom)!=='undefined' && properties.dom != null && properties.dom.length != 0){
dom = properties.dom;
}
//tambahan properties. kalo ga mau munculin text box search cukup tambahin properties inputSearch dengan nilai false
if(typeof(properties.inputSearch)!=='undefined' && properties.inputSearch != null && properties.inputSearch.length != 0){
if(properties.inputSearch == false){
dom = 'lrtip';
}
}
//tambahan data
if(typeof(properties.filterid)!=='undefined' && properties.filterid != null && properties.filterid.length != 0){
ajax.data = filterData;
if (typeof(properties.ajax.url)!=='undefined' && properties.ajax.url != null && properties.ajax.url.length != 0){
ajax.url = properties.ajax.url;
}
else {
ajax.url = properties.ajax;
}
}
else {
ajax = properties.ajax
}
var dtTblProperties =
{
"processing": true,
"serverSide": true,
"responsive":true,
"ajax": ajax,
"ordering":true,
"columns" : properties.columns,
"dom": dom,
"stateSave": true
};
if(typeof(properties.fnCreatedRow)!=='undefinded' && typeof(properties.fnCreatedRow) == 'function'){
dtTblProperties.fnCreatedRow = properties.fnCreatedRow;
}
if(typeof(properties.fnDrawCallback)!=='undefinded' && typeof(properties.fnDrawCallback) == 'function'){
dtTblProperties.fnDrawCallback = properties.fnDrawCallback;
}
if(typeof(properties.columnDefs)!=='undefinded' && properties.columnDefs != null && properties.columnDefs.length != 0){
dtTblProperties.columnDefs = properties.columnDefs;
}
if(typeof(properties.height)!=='undefinded' && properties.height != null && properties.height != 0){
dtTblProperties.scrollY = properties.height;
dtTblProperties.scrollCollapse = true;
}
if(typeof(properties.colReorder)!=='undefined' && properties.colReorder != null && properties.colReorder !=0){
dtTblProperties.colReorder = properties.colReorder
}
var lang = '${user.language}';
if(lang == 'in'){
var language = dt_lang_in;
dtTblProperties.oLanguage = language;
}
//kalau dari page nya ada porperty select dan isinya true
if(typeof(properties.select)!=='undefined' && properties.select != null && properties.select.length != 0){
if(properties.select == true){
selectProperties.style = 'os';
}
else if(properties.select =='single')
|
else if(properties.select == 'multi'){
selectProperties.style ='multi';
}
}else selectProperties.style = 'os';
selectProperties.info = false;
dtTblProperties.select = selectProperties;
if(typeof(properties.destroy)!=='undefinded' && properties.destroy != null && properties.destroy.length != 0){
dtTblProperties.destroy = properties.destroy;
}
if(typeof(properties.responsive)!=='undefinded' && properties.responsive != null && typeof(properties.responsive) == 'boolean'){
console.log('responsive is exist and the value is : ',properties.responsive);
dtTblProperties.responsive = properties.responsive;
}
if(typeof(properties.sScrollX)!=='undefinded' && properties.sScrollX != null && properties.sScrollX != 0){
dtTblProperties.sScrollX = properties.sScrollX;
}
if(typeof(properties.sScrollXInner)!=='undefinded' && properties.sScrollXInner != null && properties.sScrollXInner != 0){
dtTblProperties.sScrollXInner = properties.sScrollXInner;
}
this.dataTable(dtTblProperties);
this.addClass('table-bordered');
this.find('td').css('word-wrap','break-word');
this.css('width','100%');
var oTable = this;
/*---------START INITIALIZING DATATABLE FILTERS--------*/
if(typeof(properties.filterid)!=='undefined' && properties.filterid != null && properties.filterid.length != 0){
$('.add_filter_content').on('change','input:checkbox', function(){
var isAddingNewFilter = $(this).prop('checked');
var filterId = $(this).val();
//if adding new filter
if (isAddingNewFilter){
console.log('isAddingNewFilter');
$.get("${pageContext.request.contextPath}/dt/filter",{filterId:filterId}, function(returned){
if (returned.status == 1){
$('.filters_wrapper').addNewFilter(returned.data);
}
else {
alert('failed');
}
});
}
//if removing filter
else {
$('#filter_' + filterId).removeFilter();
}
});
$.fn.addNewFilter = function(filter){
console.log('addFilter');
this.append(
'<div class="filter" id="filter_'+ filter.id +'">' +
'<div class="filter_trigger">' +
filter.label + '<a class="filter_delete">×</a>' +
'</div>' +
'<div class="filter_content"><div class="filter_content_inside"></div></div>' +
'</div>');
var $filterElm = $('#filter_' + filter.id);
$filterElm.data('name',filter.name);
$filterElm.data('type',filter.type);
var wrapper = $('.filter_content_inside',$filterElm);
if (filter.type == 'text'){
wrapper.append(
'<div class="form-inline">' +
'<div class="form-group">' +
'<input type="text" class="form-control dt-filter-text" placeholder="'+filter.label+'">' +
'</div>' +
'</div>');
}
else if (filter.type == 'select'){
var options = filter.options;
var $select = $('<select/>',{'class':'dt-filter-select'});
$select.attr('multiple','multiple');
for (var i=0; i < options.length;i++){
$select.append('<option value="'+options[i].value+'">'+ options[i].label +'</option>')
}
wrapper.append($select);
//apply chosen.js
$('select',wrapper).chosen();
}
else if (filter.type == 'daterange'){
wrapper.append(
'<div class="form-inline">' +
'<div class="form-group">' +
'<input type="text" class="form-control dt-filter-date" name="from" placeholder="From">' +
'</div>' +
'<div class="form-group">' +
'<input type="text" class="form-control dt-filter-date"
|
{
selectProperties.style = 'single';
}
|
conditional_block
|
main.js
|
">')
.text(data.message);
var $messageDiv = $('<li class="message"/>')
.data('username', data.username)
//.addClass(typingClass)
.append($usernameDiv, $messageBodyDiv);
addMessageElement($messageDiv, options);
}
// Adds the visual chat message to the message list
const addNotificationsMessage = (data, options) => {
var $usernameDiv = $('<span class="username"/>')
.text(timeNow() + " " + data.username)
.css('color', getUsernameColor(data.username));
var $messageBodyDiv = $('<span class="notificationsBody">')
.html(data.message);
var $messageDiv = $('<li class="notification "/>')
.data('username', data.username)
//.addClass(typingClass)
.append($usernameDiv, $messageBodyDiv);
addNotificationsElement($messageDiv, options);
}
// Removes the visual chat typing message
const removeChatTyping = (data) => {
getTypingMessages(data).fadeOut(function () {
$(this).remove();
});
}
// Adds a message element to the messages and scrolls to the bottom
// el - The element to add as a message
// options.fade - If the element should fade-in (default = true)
// options.prepend - If the element should prepend
// all other messages (default = false)
const addMessageElement = (el, options) => {
var $el = $(el);
// Setup default options
if (!options) {
options = {};
}
if (typeof options.fade === 'undefined') {
options.fade = true;
}
if (typeof options.prepend === 'undefined') {
options.prepend = false;
}
// Apply options
if (options.fade) {
$el.hide().fadeIn(FADE_TIME);
}
if (options.prepend) {
$messages.prepend($el);
} else {
$messages.append($el);
}
$messages[0].scrollTop = $messages[0].scrollHeight;
}
// Adds a message element to the notifications and scrolls to the bottom
// el - The element to add as a message
// options.fade - If the element should fade-in (default = true)
// options.prepend - If the element should prepend
// all other notifications (default = false)
const addNotificationsElement = (el, options) => {
var $el = $(el);
// Setup default options
if (!options) {
options = {};
}
if (typeof options.fade === 'undefined') {
options.fade = true;
}
options.prepend = true;
// Apply options
if (options.fade) {
$el.hide().fadeIn(FADE_TIME);
}
if (options.prepend) {
$notifications.prepend($el);
} else {
$notifications.append($el);
}
//$notifications[0].scrollTop = $notifications[0].scrollHeight;
}
// Prevents input from having injected markup
const cleanInput = (input) => {
return $('<div/>').text(input).html();
}
// Gets the color of a username through our hash function
const getUsernameColor = (username) => {
// Compute hash code
var hash = 7;
for (var i = 0; i < username.length; i++) {
hash = username.charCodeAt(i) + (hash << 5) - hash;
}
// Calculate color
var index = Math.abs(hash % COLORS.length);
return COLORS[index];
}
const updateBtnActionList = () =>
{
//hide actions the user cannot do
if(myEmpire.aspects['army'].quantity > 0 )
{
$('.fight').show();
showUserActions = true;
}
else $('.fight').hide();
if(myEmpire.aspects['production'].quantity >= 10 &&
myEmpire.aspects['diplomacy'].quantity >= 10 &&
myEmpire.aspects['growth'].quantity >= 10) {
{
$('.tradedeal').show();
showUserActions = true;
}
} else {
$('.tradedeal').hide();
}
for(user in userList) {
if(myEmpire.aspects['diplomacy'].quantity >= userList[user].territory)
{
$('.treaty').show();
showUserActions = true;
}
else
$('.treaty').hide();
}
if(myEmpire.aspects['army'].quantity > 0 ||
myEmpire.aspects['science'].quantity > 0 ||
myEmpire.aspects['production'].quantity > 0 ||
myEmpire.aspects['diplomacy'].quantity > 0 ||
myEmpire.aspects['growth'].quantity > 0 ||
myEmpire.aspects['development'].quantity > 0)
{
$('.sendResources').show();
showUserActions = true;
console.log('HERE');
}
else
$('.sendResources').hide();
if(!showUserActions) $('.btn-action-list').hide();
else $('.btn-action-list').show();
if(myEmpire.aspects['growth'].quantity >= 10) $('#conquer').show();
else $('#conquer').hide();
}
const updateAspects = (stats) => {
console.log(stats.data);
showUserActions = false;
for(i=0;i<ASPECT_NAMES.length;i++)
{
aspect = ASPECT_NAMES[i];
//console.log(aspect); console.log(stats.data['army'].level);
myEmpire.aspects[aspect].name = stats.data[aspect].name;
myEmpire.aspects[aspect].shortening = stats.data[aspect].shortening;
myEmpire.aspects[aspect].level = stats.data[aspect].level;
myEmpire.aspects[aspect].quantity = stats.data[aspect].quantity;
myEmpire.aspects[aspect].maxsize = stats.data[aspect].maxsize;
$('#'+aspect+'_lvl').text(" " + myEmpire.aspects[aspect].level);
console.log(myEmpire.aspects);
$('#'+aspect+'_progress')
//.html(numberWithCommas(myEmpire.aspects[aspect].quantity)+'/'+numberWithCommas(myEmpire.aspects[aspect].maxsize));
.attr('data-progress',Math.floor(100*myEmpire.aspects[aspect].quantity/myEmpire.aspects[aspect].maxsize))
.attr('data-value', myEmpire.aspects[aspect].quantity);
$('#'+aspect+'_maxsize').html(myEmpire.aspects[aspect].maxsize);
// if(myEmpire.aspects[aspect].quantity==myEmpire.aspects[aspect].maxsize)
// $('.'+aspect+'_div')
// //.css('background-color', 'red')
// .css('color','white');
// else
// $('.'+aspect+'_div')
// //.css('background-color', '#6287ec')
// .css('color','black');
}
for(i=0;i<ASPECT_NAMES.length;i++)
{
var discCost = Math.pow(5, myEmpire.aspects[ASPECT_NAMES[i]].level - 1) * 10;
var builCost = myEmpire.aspects[ASPECT_NAMES[i]].maxsize;
aspect = ASPECT_NAMES[i];
if(myEmpire.aspects['science'].quantity >= discCost)
$('#lvlup_'+aspect).show();
else
$('#lvlup_'+aspect).hide();
if(myEmpire.aspects['development'].quantity >= builCost)
$('#upgrade_'+aspect).show();
else
$('#upgrade_'+aspect).hide();
}
// background opacity
console.log(myEmpire.aspects['production'].quantity);
if(myEmpire.aspects['production'].quantity > 0)
{
$('.army_div').css('opacity', 1);
$('.science_div').css('opacity', 1);
$('.development_div').css('opacity', 1);
} else {
$('.army_div').css('opacity', 0.4);
$('.science_div').css('opacity', 0.4);
$('.development_div').css('opacity', 0.4);
}
console.log('HELLO!!!!????!!!');
updateBtnActionList();
}
const addToInput = (msg) =>
{
$('#input').val($('#input').val() + msg);
$currentInput.focus();
}
function updateUserTable(users)
|
{
userList = users[0];
$('#users').empty();
for(user in userList) { console.log(user);
if(userList[user].username == username) {
var row = '';
row += '<tr class="leaderboard-user-row"><td></td>';
row += '<td>'+userList[user].age+'</td>';
row += '<td>'+userList[user].territory+'</td>';
row += '<td>'+userList[user].level+'</td>';
row += '<td>'+userList[user].username+'</td>';
row += '</tr>';
$('#users').append(row);
}
}
for(user in userList) {
if(userList[user].username != username) {
var row = '';
|
identifier_body
|
|
main.js
|
}
var $usernameDiv = $('<span class="username"/>')
.text(timeNow() + " " + data.username)
.css('color', getUsernameColor(data.username));
var $messageBodyDiv = $('<span class="messageBody">')
.text(data.message);
var $messageDiv = $('<li class="message"/>')
.data('username', data.username)
//.addClass(typingClass)
.append($usernameDiv, $messageBodyDiv);
addMessageElement($messageDiv, options);
}
// Adds the visual chat message to the message list
const addNotificationsMessage = (data, options) => {
var $usernameDiv = $('<span class="username"/>')
.text(timeNow() + " " + data.username)
.css('color', getUsernameColor(data.username));
var $messageBodyDiv = $('<span class="notificationsBody">')
.html(data.message);
var $messageDiv = $('<li class="notification "/>')
.data('username', data.username)
//.addClass(typingClass)
.append($usernameDiv, $messageBodyDiv);
addNotificationsElement($messageDiv, options);
}
// Removes the visual chat typing message
const removeChatTyping = (data) => {
getTypingMessages(data).fadeOut(function () {
$(this).remove();
});
}
// Adds a message element to the messages and scrolls to the bottom
// el - The element to add as a message
// options.fade - If the element should fade-in (default = true)
// options.prepend - If the element should prepend
// all other messages (default = false)
const addMessageElement = (el, options) => {
var $el = $(el);
// Setup default options
if (!options) {
options = {};
}
if (typeof options.fade === 'undefined') {
options.fade = true;
}
if (typeof options.prepend === 'undefined') {
options.prepend = false;
}
// Apply options
if (options.fade) {
$el.hide().fadeIn(FADE_TIME);
}
if (options.prepend) {
$messages.prepend($el);
} else {
$messages.append($el);
}
$messages[0].scrollTop = $messages[0].scrollHeight;
}
// Adds a message element to the notifications and scrolls to the bottom
// el - The element to add as a message
// options.fade - If the element should fade-in (default = true)
// options.prepend - If the element should prepend
// all other notifications (default = false)
const addNotificationsElement = (el, options) => {
var $el = $(el);
// Setup default options
if (!options) {
options = {};
}
if (typeof options.fade === 'undefined') {
options.fade = true;
}
options.prepend = true;
// Apply options
if (options.fade) {
$el.hide().fadeIn(FADE_TIME);
}
if (options.prepend) {
$notifications.prepend($el);
} else {
$notifications.append($el);
}
//$notifications[0].scrollTop = $notifications[0].scrollHeight;
}
// Prevents input from having injected markup
const cleanInput = (input) => {
return $('<div/>').text(input).html();
}
// Gets the color of a username through our hash function
const getUsernameColor = (username) => {
// Compute hash code
var hash = 7;
for (var i = 0; i < username.length; i++) {
hash = username.charCodeAt(i) + (hash << 5) - hash;
}
// Calculate color
var index = Math.abs(hash % COLORS.length);
return COLORS[index];
}
const updateBtnActionList = () =>
{
//hide actions the user cannot do
if(myEmpire.aspects['army'].quantity > 0 )
{
$('.fight').show();
showUserActions = true;
}
else $('.fight').hide();
if(myEmpire.aspects['production'].quantity >= 10 &&
myEmpire.aspects['diplomacy'].quantity >= 10 &&
myEmpire.aspects['growth'].quantity >= 10) {
{
$('.tradedeal').show();
showUserActions = true;
}
} else
|
for(user in userList) {
if(myEmpire.aspects['diplomacy'].quantity >= userList[user].territory)
{
$('.treaty').show();
showUserActions = true;
}
else
$('.treaty').hide();
}
if(myEmpire.aspects['army'].quantity > 0 ||
myEmpire.aspects['science'].quantity > 0 ||
myEmpire.aspects['production'].quantity > 0 ||
myEmpire.aspects['diplomacy'].quantity > 0 ||
myEmpire.aspects['growth'].quantity > 0 ||
myEmpire.aspects['development'].quantity > 0)
{
$('.sendResources').show();
showUserActions = true;
console.log('HERE');
}
else
$('.sendResources').hide();
if(!showUserActions) $('.btn-action-list').hide();
else $('.btn-action-list').show();
if(myEmpire.aspects['growth'].quantity >= 10) $('#conquer').show();
else $('#conquer').hide();
}
const updateAspects = (stats) => {
console.log(stats.data);
showUserActions = false;
for(i=0;i<ASPECT_NAMES.length;i++)
{
aspect = ASPECT_NAMES[i];
//console.log(aspect); console.log(stats.data['army'].level);
myEmpire.aspects[aspect].name = stats.data[aspect].name;
myEmpire.aspects[aspect].shortening = stats.data[aspect].shortening;
myEmpire.aspects[aspect].level = stats.data[aspect].level;
myEmpire.aspects[aspect].quantity = stats.data[aspect].quantity;
myEmpire.aspects[aspect].maxsize = stats.data[aspect].maxsize;
$('#'+aspect+'_lvl').text(" " + myEmpire.aspects[aspect].level);
console.log(myEmpire.aspects);
$('#'+aspect+'_progress')
//.html(numberWithCommas(myEmpire.aspects[aspect].quantity)+'/'+numberWithCommas(myEmpire.aspects[aspect].maxsize));
.attr('data-progress',Math.floor(100*myEmpire.aspects[aspect].quantity/myEmpire.aspects[aspect].maxsize))
.attr('data-value', myEmpire.aspects[aspect].quantity);
$('#'+aspect+'_maxsize').html(myEmpire.aspects[aspect].maxsize);
// if(myEmpire.aspects[aspect].quantity==myEmpire.aspects[aspect].maxsize)
// $('.'+aspect+'_div')
// //.css('background-color', 'red')
// .css('color','white');
// else
// $('.'+aspect+'_div')
// //.css('background-color', '#6287ec')
// .css('color','black');
}
for(i=0;i<ASPECT_NAMES.length;i++)
{
var discCost = Math.pow(5, myEmpire.aspects[ASPECT_NAMES[i]].level - 1) * 10;
var builCost = myEmpire.aspects[ASPECT_NAMES[i]].maxsize;
aspect = ASPECT_NAMES[i];
if(myEmpire.aspects['science'].quantity >= discCost)
$('#lvlup_'+aspect).show();
else
$('#lvlup_'+aspect).hide();
if(myEmpire.aspects['development'].quantity >= builCost)
$('#upgrade_'+aspect).show();
else
$('#upgrade_'+aspect).hide();
}
// background opacity
console.log(myEmpire.aspects['production'].quantity);
if(myEmpire.aspects['production'].quantity > 0)
{
$('.army_div').css('opacity', 1);
$('.science_div').css('opacity', 1);
$('.development_div').css('opacity', 1);
} else {
$('.army_div').css('opacity', 0.4);
$('.science_div').css('opacity', 0.4);
$('.development_div').css('opacity', 0.4);
}
console.log('HELLO!!!!????!!!');
updateBtnActionList();
}
const addToInput = (msg) =>
{
$('#input').val($('#input').val() + msg);
$currentInput.focus();
}
function updateUserTable(users)
{
userList = users[0];
$('#users').empty();
for(user in userList) { console.log(user);
if(userList[user].username == username) {
var row = '';
row += '<tr class="leaderboard-user-row"><td></td>';
row += '<td>'+userList[user].age+'</td>';
row += '<td>'+userList[user].territory+'</td>';
row += '<td>'+userList[user].level+'</td>';
row += '<td>'+
|
{
$('.tradedeal').hide();
}
|
conditional_block
|
main.js
|
var userList = {};
var silentMode = false;
var quietMode = false;
var blockList = [];
// ------------ //
// private info //
// ------------ //
function MyAspect(name, shortening, level, quantity, maxsize) {
this.name = name;
this.shortening = shortening;
this.level = level;
this.quantity = quantity;
this.maxsize = maxsize;
}
// ------------ //
// private info //
// ------------ //
function MyEmpire() {
this.aspects = {
'army' : new MyAspect('Army','{S}',0,0,10),
'science' : new MyAspect('Science','[D]',0,0,10),
'production' : new MyAspect('Production','(G)',0,0,10),
'diplomacy' : new MyAspect('Diplomacy','#A#',0,0,10),
'growth' : new MyAspect('Growth','!P!',0,0,10),
'development' : new MyAspect('Development','>P>',0,0,10)
};
}
var myEmpire = new MyEmpire();
const addParticipantsMessage = (data) => {
var message = '';
if (data.numUsers === 1) {
message += "there's 1 participant";
} else {
message += "there are " + data.numUsers + " participants";
}
log(message);
}
// Sets the client's username
const setUsername = () => {
username = cleanInput($usernameInput.val().trim());
// If the username is valid
if (username) {
$loginPage.fadeOut();
$chatPage.show();
$loginPage.off('click');
$currentInput = $inputMessage.focus();
// Tell the server your username
socket.emit('add user', username);
/*
updateAspects({'army': [0,5],
'science': [0,10],
'production': [0,0],
'diplomacy': [0,0],
'growth': [0,0],
'development': [0,0]});
*/
}
}
// Sends a chat messages
const sendMessage = () => {
var message = $inputMessage.val();
// Prevent markup from being injected into the message
message = cleanInput(message);
// if there is a non-empty message and a socket connection
if (message && connected) {
$inputMessage.val('');
/*
addChatMessage({
username: username,
message: message
});*/
// tell server to execute 'new message' and send along one parameter
socket.emit('new message', {username, message});
}
}
// Log a message
const log = (message, options) => {
var $el = $('<li>').addClass('log').text(message);
addMessageElement($el, options);
}
// Adds the visual chat message to the message list
const addChatMessage = (data, options) => {
if(arrayContains(data.username,blockList)) return;
// Don't fade the message in if there is an 'X was typing'
//var $typingMessages = getTypingMessages(data);
options = options || {};
//if ($typingMessages.length !== 0) {
// options.fade = false;
// $typingMessages.remove();
// }
var $usernameDiv = $('<span class="username"/>')
.text(timeNow() + " " + data.username)
.css('color', getUsernameColor(data.username));
var $messageBodyDiv = $('<span class="messageBody">')
.text(data.message);
var $messageDiv = $('<li class="message"/>')
.data('username', data.username)
//.addClass(typingClass)
.append($usernameDiv, $messageBodyDiv);
addMessageElement($messageDiv, options);
}
// Adds the visual chat message to the message list
const addNotificationsMessage = (data, options) => {
var $usernameDiv = $('<span class="username"/>')
.text(timeNow() + " " + data.username)
.css('color', getUsernameColor(data.username));
var $messageBodyDiv = $('<span class="notificationsBody">')
.html(data.message);
var $messageDiv = $('<li class="notification "/>')
.data('username', data.username)
//.addClass(typingClass)
.append($usernameDiv, $messageBodyDiv);
addNotificationsElement($messageDiv, options);
}
// Removes the visual chat typing message
const removeChatTyping = (data) => {
getTypingMessages(data).fadeOut(function () {
$(this).remove();
});
}
// Adds a message element to the messages and scrolls to the bottom
// el - The element to add as a message
// options.fade - If the element should fade-in (default = true)
// options.prepend - If the element should prepend
// all other messages (default = false)
const addMessageElement = (el, options) => {
var $el = $(el);
// Setup default options
if (!options) {
options = {};
}
if (typeof options.fade === 'undefined') {
options.fade = true;
}
if (typeof options.prepend === 'undefined') {
options.prepend = false;
}
// Apply options
if (options.fade) {
$el.hide().fadeIn(FADE_TIME);
}
if (options.prepend) {
$messages.prepend($el);
} else {
$messages.append($el);
}
$messages[0].scrollTop = $messages[0].scrollHeight;
}
// Adds a message element to the notifications and scrolls to the bottom
// el - The element to add as a message
// options.fade - If the element should fade-in (default = true)
// options.prepend - If the element should prepend
// all other notifications (default = false)
const addNotificationsElement = (el, options) => {
var $el = $(el);
// Setup default options
if (!options) {
options = {};
}
if (typeof options.fade === 'undefined') {
options.fade = true;
}
options.prepend = true;
// Apply options
if (options.fade) {
$el.hide().fadeIn(FADE_TIME);
}
if (options.prepend) {
$notifications.prepend($el);
} else {
$notifications.append($el);
}
//$notifications[0].scrollTop = $notifications[0].scrollHeight;
}
// Prevents input from having injected markup
const cleanInput = (input) => {
return $('<div/>').text(input).html();
}
// Gets the color of a username through our hash function
const getUsernameColor = (username) => {
// Compute hash code
var hash = 7;
for (var i = 0; i < username.length; i++) {
hash = username.charCodeAt(i) + (hash << 5) - hash;
}
// Calculate color
var index = Math.abs(hash % COLORS.length);
return COLORS[index];
}
const updateBtnActionList = () =>
{
//hide actions the user cannot do
if(myEmpire.aspects['army'].quantity > 0 )
{
$('.fight').show();
showUserActions = true;
}
else $('.fight').hide();
if(myEmpire.aspects['production'].quantity >= 10 &&
myEmpire.aspects['diplomacy'].quantity >= 10 &&
myEmpire.aspects['growth'].quantity >= 10) {
{
$('.tradedeal').show();
showUserActions = true;
}
} else {
$('.tradedeal').hide();
}
for(user in userList) {
if(myEmpire.aspects['diplomacy'].quantity >= userList[user].territory)
{
$('.treaty').show();
showUserActions = true;
}
else
$('.treaty').hide();
}
if(myEmpire.aspects['army'].quantity > 0 ||
myEmpire.aspects['science'].quantity > 0 ||
myEmpire.aspects['production'].quantity > 0 ||
myEmpire.aspects['diplomacy'].quantity > 0 ||
myEmpire.aspects['growth'].quantity > 0 ||
myEmpire.aspects['development'].quantity > 0)
{
$('.sendResources').show();
showUserActions = true;
console.log('HERE');
}
else
$('.sendResources').hide();
if(!showUserActions) $('.btn-action-list').hide();
else $('.btn-action-list').show();
if(myEmpire.aspects['growth'].quantity >= 10) $('#conquer').show();
else $('#conquer').hide();
}
const updateAspects = (stats) => {
console.log(stats.data);
showUserActions = false;
for(i=0;i<ASPECT_NAMES.length;i++)
{
aspect = ASPECT_NAMES[i];
//console.log(aspect); console.log(stats.data['arm
|
var $currentInput = $usernameInput.focus();
var socket = io();
|
random_line_split
|
|
main.js
|
ASPECT_NAMES[i];
if(myEmpire.aspects['science'].quantity >= discCost)
$('#lvlup_'+aspect).show();
else
$('#lvlup_'+aspect).hide();
if(myEmpire.aspects['development'].quantity >= builCost)
$('#upgrade_'+aspect).show();
else
$('#upgrade_'+aspect).hide();
}
// background opacity
console.log(myEmpire.aspects['production'].quantity);
if(myEmpire.aspects['production'].quantity > 0)
{
$('.army_div').css('opacity', 1);
$('.science_div').css('opacity', 1);
$('.development_div').css('opacity', 1);
} else {
$('.army_div').css('opacity', 0.4);
$('.science_div').css('opacity', 0.4);
$('.development_div').css('opacity', 0.4);
}
console.log('HELLO!!!!????!!!');
updateBtnActionList();
}
const addToInput = (msg) =>
{
$('#input').val($('#input').val() + msg);
$currentInput.focus();
}
function updateUserTable(users)
{
userList = users[0];
$('#users').empty();
for(user in userList) { console.log(user);
if(userList[user].username == username) {
var row = '';
row += '<tr class="leaderboard-user-row"><td></td>';
row += '<td>'+userList[user].age+'</td>';
row += '<td>'+userList[user].territory+'</td>';
row += '<td>'+userList[user].level+'</td>';
row += '<td>'+userList[user].username+'</td>';
row += '</tr>';
$('#users').append(row);
}
}
for(user in userList) {
if(userList[user].username != username) {
var row = '';
row += '<tr>';
row += '<td><button type="button" class="btn btn-success btn-action-list">'+'<i class="fas fa-caret-down"></i>'+'</button>';
row += '<div class="dropdown-content action-nav"><ul class="action-nav-div">' +
'<li class="fight" data-target="'+user+'">Fight</li>' +
'<li class="tradedeal" data-target="'+user+'">Trade Deal</li>' +
'<li class="treaty" data-target="'+user+'">Treaty</li>' +
'<li class="sendResources" data-target="'+user+'">Send Resources</li>' +
'</ul></div></td>';
row += '<td>'+userList[user].age+'</td>';
row += '<td>'+userList[user].territory+'</td>';
row += '<td>'+userList[user].level+'</td>';
row += '<td class="userName" id="'+user+'">'+userList[user].username+'</td>';
row += '</tr>';
$('#users').append(row);
}
}
$('.userName').click(
function(){
addToInput('/whisper ' + $(this).attr('id') + ' ');
});
$('.fight').click(
function(){
message = '#figh ' + $(this).attr('data-target');
socket.emit('new message', {username, message });
});
$('.tradedeal').click(
function(){
message = '#trad ' + $(this).attr('data-target');
socket.emit('new message', {username, message });
});
$('.treaty').click(
function(){
message = '#trea ' + $(this).attr('data-target');
socket.emit('new message', {username, message });
});
$('.sendResources').click(
function(){
target = $(this).attr('data-target');
console.log("sendin' to " + target ); // DEBUG
$chatPage.fadeOut();
$sendGoodsPage.fadeIn();
$('#sendResources').click({target},
function() {
console.log(target);
message = '/give ' + target + ' ' +
$('#sendgoodsType option:selected').attr('data-value') + ' ' +
$('#sendgoodsQty').val();
console.log(message);
socket.emit('new message', {username, message });
$sendGoodsPage.fadeOut();
$chatPage.fadeIn();
}
)
}
);
updateBtnActionList();
}
// Keyboard events
$window.keydown(event => {
// Auto-focus the current input when a key is typed
if (!(event.ctrlKey || event.metaKey || event.altKey)) {
$currentInput.focus();
}
// When the client hits ENTER on their keyboard
if (event.which === 13) {
if (username) {
sendMessage();
socket.emit('stop typing');
typing = false;
} else {
setUsername();
}
}
});
// $inputMessage.on('input', () => {
// updateTyping();
// });
// Click events
// Focus input when clicking anywhere on login page
$loginPage.click(() => {
$currentInput.focus();
});
// Focus input when clicking on the message input's border
$inputMessage.click(() => {
$inputMessage.focus();
});
// Socket events
socket.on('loadads', (data) => {
console.log(data);
});
// Whenever the server emits 'login', log the login message
socket.on('login', (data) => {
connected = true;
// Display the welcome message
var message = "<#> Welcome to HexWorld <#>";
log(message, {
prepend: true
});
updateUserTable(data.userList);
addParticipantsMessage(data);
});
socket.on('login failed', () => {
connected = false;
$chatPage.hide();
$chatPage.off('click');
$loginPage.fadeIn();
$loginPage.on('click');
username = "";
$currentInput = $usernameInput.focus();
$('#loginerror').removeAttr("hidden");
});
// Whenever the server emits 'new message', update the chat body
socket.on('new message', (data) => {
if(!silentMode && !quietMode) addChatMessage(data);
});
socket.on('notifications', (data) => {
console.log(data);
addNotificationsMessage(data);
});
socket.on('silent', (data) => {
silentMode = true;
addNotificationsMessage(data)
});
socket.on('quiet', (data) => {
quietMode = true;
addNotificationsMessage(data)
});
socket.on('listen', (data) => {
silentMode = false;
quietMode = false;
addNotificationsMessage(data);
});
socket.on('whisper', (data) => {
if(!silentMode) addChatMessage(data);
});
socket.on('block', (data) => {
if(!arrayContains(data.blockUser,blockList)) blockList.push(data.blockUser);
addNotificationsMessage(data);
});
socket.on('unblock', (data) => {
if(arrayContains(data.blockUser,blockList)) blockList.pop(data.blockUser);
addNotificationsMessage(data);
});
socket.on('adminmsg', (data) => {
addNotificationsMessage(data);
});
// Whenever the server emits 'user joined', log it in the chat body
socket.on('user joined', (data) => {
log(data.username + ' joined');
addParticipantsMessage(data);
});
// Whenever the server emits 'user left', log it in the chat body
socket.on('user left', (data) => {
log(data.username + ' left');
addParticipantsMessage(data);
removeChatTyping(data);
});
socket.on('disconnect', () => {
log('you have been disconnected');
});
socket.on('forceDisconnect', function(){
log('you have been kicked from the server by admin');
socket.disconnect();
});
socket.on('reconnect', () => {
log('you have been reconnected');
if (username) {
socket.emit('add user', username);
}
});
socket.on('reconnect_error', () => {
log('attempt to reconnect has failed');
});
socket.on('update', (data) => { console.log(data);
updateUserTable(data);
});
socket.on('update empire', (stats) => {
updateAspects(stats);
});
socket.on('dead', (data) => {
switch(data.method)
{
case "fight":
msg = "Your empire was destroyed by " + data.bywhom;
break;
case "buy":
msg = "Your empire was bought by " + data.bywhom;
break;
default:
msg = "You got killed by a random event...";
break;
}
$('#killer').html(msg);
$chatPage.fadeOut(4000);
$deadPage.fadeIn(4000);
} );
function arrayContains(needle, arrhaystack)
{
return (arrhaystack.indexOf(needle) > -1);
}
function capitalizeFirstLetter(string) {
return string.charAt(0).toUpperCase() + string.slice(1);
}
function numberWithCommas(x) {
return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
}
function
|
addActionDiv
|
identifier_name
|
|
main.py
|
1
elif wybor == "3":
tekst = "który 'p' (max " + str(len(all_p)-1) + " element wyświetlić?:"
ktoryP = input(tekst)
print(ktoryP ," element: ",all_h1_tags, soup.select("p")[int(ktoryP)].text)
#endregion
#region 4-web-scraping-example5.py
def example5():
# Create top_items as empty list
top_items = []
# Extract and store in top_items according to instructions on the left
products = soup.select("div.thumbnail")
print("Liczba top items = ", len(products))
for elem in products:
title = elem.select("h4 > a.title")[0].text
review_label = elem.select("div.ratings")[0].text
info = {"title": title.strip(), "review": review_label.strip()}
top_items.append(info)
print("menu:\n1.przykła z zajęć\n2.Wyświetl tytuły produktów\n3.Znajdź produkt po tytule")
wybor = input("twój wybór:")
if wybor == "1":
print(top_items)
elif wybor == "2":
print("sprawdźmy jakie produkty są: ")
licznik = 0
while licznik<len(top_items):
print(top_items[licznik]["title"])
licznik+=1
elif wybor =="3":
fragmentTekstu = input("podaj tekst jakiego szukasz:")
licznik = 0
jest = False
while licznik<len(top_items):
napis = top_items[licznik]["title"]
if napis.count(fragmentTekstu) != 0:
print(top_items[licznik]["title"])
jest = True
licznik+=1
if jest == False:
print("Nie znaleniono produktu")
print("-----------------------------------------")
#endregion
#region 4-web-scraping-example6.py
def example6():
# Create top_items as empty list
image_data = []
domena = "https://codedamn-classrooms.github.io"
# Extract and store in top_items according to instructions on the left
images = soup.select("img")
print("Liczba obrazków =", len(images))
for image in images:
# print(type(image))
src = image.get("src")
alt = image.get("alt")
image_data.append({"src": src, "alt": alt})
print("menu:\n1.Przykład z zajęć\n2.Wyświetl obraz")
wybor = input("twój wybór:")
if wybor == "1":
print(image_data)
elif wybor == "2":
print("Który obraz wyświetlić:")
licznik = 0
while licznik<len(images):
print(licznik,".",image_data[licznik]["alt"])
licznik += 1
wybor = input("podaj id:")
adres_obrazu = domena + image_data[int(wybor)]["src"]
print("obraz powinnien się wyświetlić")
# dobrze jest wiedzieć co pobieramy, wyświetlmy wieć obraz na ekranie:
print(adres_obrazu)
im = Image.open(requests.get(adres_obrazu, stream=True).raw)
im.show()
#endregion
#region 4-web-scraping-example6_lab.py
def example6_lab():
# Create top_items as empty list
all_links = []
# Extract and store i
|
if all_links[licznik]["href"].count("https") > 0:
adres = all_links[licznik]["href"]
elif all_links[licznik]["href"].count("#") > 0:
adres = domena + "/" + all_links[licznik]["href"]
else:
adres = domena + all_links[licznik]["href"]
print(licznik,".",adres)
licznik += 1
wybor = input("podaj id:")
if all_links[int(wybor)]["href"].count("https") > 0:
adres = all_links[int(wybor)]["href"]
elif all_links[int(wybor)]["href"].count("#") > 0:
adres = domena + "/" + all_links[int(wybor)]["href"]
else:
adres = domena + all_links[int(wybor)]["href"]
webbrowser.open(adres, new=2)
#endregion
#region 4-web-scraping-example7.py
def example7():
# Create top_items as empty list
all_products = []
# Extract and store in top_items according to instructions on the left
products = soup.select('div.thumbnail')
for product in products:
name = product.select('h4 > a')[0].text.strip()
description = product.select('p.description')[0].text.strip()
price = product.select('h4.price')[0].text.strip()
reviews = product.select('div.ratings')[0].text.strip()
image = product.select('img')[0].get('src')
all_products.append({
"name": name,
"description": description,
"price": price,
"reviews": reviews,
"image": image
})
keys = all_products[0].keys()
print("keys = ", keys)
with open('products.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_products)
#endregion
#region
def example8():
odpowiedz = requests.get("https://pl.wikipedia.org/wiki/Zygmunt_III_Waza")
print("odpowiedź = \n", odpowiedz)
print(odpowiedz.status_code)
html_text: str = odpowiedz.text
# print("Strona o Wazie = \n", html_text)
user1 = requests.get("https://jsonplaceholder.typicode.com/users/1")
json_text: dict = user1.json()
print("url = ", user1.url)
print("json_text = ", json_text)
print("history = ", user1.history)
# print("odpowiedź tekstowa = \n", html_text)
html_doc = """<html>
<head>
<title>Moja pierwsza strona!</title>
</head>
<body>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec posuere elit at malesuada tempor. Donec eget ligula in ante auctor luctus. Phasellus iaculis porttitor gravida. Donec eget sem lorem. Morbi a libero imperdiet, viverra tellus ac, consequat tortor. Suspendisse nibh massa, accumsan non neque a, vestibulum commodo dui.</p>
<p>Phasellus vestibulum ut <br>erat sit amet ullamcorper. Nam at elit feugiat, dapibus ante vitae, ullamcorper dui. Nunc rutrum at nibh tincidunt mattis. In finibus sed ante vel mollis. Donec at semper metus. Aenean quis consectetur risus. Sed suscipit felis sed ex pretium euismod. In fermentum mi a odio porttitor, dapibus aliquet leo accumsan. Suspendisse pretium augue et faucibus euismod. Quisque risus metus, ultricies nec tortor at, efficitur convallis nunc.</p>
<ul>
<li>Pierwszy punkt</li>
<li>Drugi punkt</li>
<li>Trzeci punkt</li>
</ul>
<ol>
<li>Pierwszy punkt</li>
<li>Drugi punkt</li>
<li>Trzeci punkt</li>
</ol>
<table border="3" bgcolor="#ff00ff" class="tabela blog">
<tr><th>Naglowek 1</th><th>Naglowek 2</th></tr>
<tr><td>komorka 11</td><td>komorka 12</td></tr>
<tr><td>komorka 2
|
n top_items according to instructions on the left
links = soup.select("a")
print("Liczba linków = ", len(links))
for ahref in links:
text = ahref.text
text = text.strip() if text is not None else ""
href = ahref.get("href")
href = href.strip() if href is not None else ""
all_links.append({"href": href, "text": text})
print("menu:\n1.Przykład z zajęć\n2.Otwórz wybrany link")
wybor = input("twój wybór:")
if wybor == "1":
print(all_links)
elif wybor == "2":
licznik = 0
domena = "https://codedamn-classrooms.github.io"
while licznik<len(links):
|
identifier_body
|
main.py
|
():
# Create all_h1_tags as empty list
all_h1_tags = []
# Set all_h1_tags to all h1 tags of the soup
for element in soup.select("h1"):
all_h1_tags.append(element.text)
# Create seventh_p_text and set it to 7th p element text of the page
seventh_p_text = soup.select("p")[6].text
all_p = []
for element in soup.select("p"):
all_p.append(element.text)
print("Menu example 4:")
print("1.Przykład z zajęć\n2.Pokaż wszystkie\n3.Wyszukaj")
wybor = input("Twój wybór:")
if wybor == "1":
print("Przykład z zajęć: ",all_h1_tags, seventh_p_text)
elif wybor == "2":
print("A teraz w pętli sprawdźmy:", len(all_p))
licznik = 0
while licznik<len(all_p):
print(licznik+1, all_p[licznik])
licznik +=1
elif wybor == "3":
tekst = "który 'p' (max " + str(len(all_p)-1) + " element wyświetlić?:"
ktoryP = input(tekst)
print(ktoryP ," element: ",all_h1_tags, soup.select("p")[int(ktoryP)].text)
#endregion
#region 4-web-scraping-example5.py
def example5():
# Create top_items as empty list
top_items = []
# Extract and store in top_items according to instructions on the left
products = soup.select("div.thumbnail")
print("Liczba top items = ", len(products))
for elem in products:
title = elem.select("h4 > a.title")[0].text
review_label = elem.select("div.ratings")[0].text
info = {"title": title.strip(), "review": review_label.strip()}
top_items.append(info)
print("menu:\n1.przykła z zajęć\n2.Wyświetl tytuły produktów\n3.Znajdź produkt po tytule")
wybor = input("twój wybór:")
if wybor == "1":
print(top_items)
elif wybor == "2":
print("sprawdźmy jakie produkty są: ")
licznik = 0
while licznik<len(top_items):
print(top_items[licznik]["title"])
licznik+=1
elif wybor =="3":
fragmentTekstu = input("podaj tekst jakiego szukasz:")
licznik = 0
jest = False
while licznik<len(top_items):
napis = top_items[licznik]["title"]
if napis.count(fragmentTekstu) != 0:
print(top_items[licznik]["title"])
jest = True
licznik+=1
if jest == False:
print("Nie znaleniono produktu")
print("-----------------------------------------")
#endregion
#region 4-web-scraping-example6.py
def example6():
# Create top_items as empty list
image_data = []
domena = "https://codedamn-classrooms.github.io"
# Extract and store in top_items according to instructions on the left
images = soup.select("img")
print("Liczba obrazków =", len(images))
for image in images:
# print(type(image))
src = image.get("src")
alt = image.get("alt")
image_data.append({"src": src, "alt": alt})
print("menu:\n1.Przykład z zajęć\n2.Wyświetl obraz")
wybor = input("twój wybór:")
if wybor == "1":
print(image_data)
elif wybor == "2":
print("Który obraz wyświetlić:")
licznik = 0
while licznik<len(images):
print(licznik,".",image_data[licznik]["alt"])
licznik += 1
wybor = input("podaj id:")
adres_obrazu = domena + image_data[int(wybor)]["src"]
print("obraz powinnien się wyświetlić")
# dobrze jest wiedzieć co pobieramy, wyświetlmy wieć obraz na ekranie:
print(adres_obrazu)
im = Image.open(requests.get(adres_obrazu, stream=True).raw)
im.show()
#endregion
#region 4-web-scraping-example6_lab.py
def example6_lab():
# Create top_items as empty list
all_links = []
# Extract and store in top_items according to instructions on the left
links = soup.select("a")
print("Liczba linków = ", len(links))
for ahref in links:
text = ahref.text
text = text.strip() if text is not None else ""
href = ahref.get("href")
href = href.strip() if href is not None else ""
all_links.append({"href": href, "text": text})
print("menu:\n1.Przykład z zajęć\n2.Otwórz wybrany link")
wybor = input("twój wybór:")
if wybor == "1":
print(all_links)
elif wybor == "2":
licznik = 0
domena = "https://codedamn-classrooms.github.io"
while licznik<len(links):
if all_links[licznik]["href"].count("https") > 0:
adres = all_links[licznik]["href"]
elif all_links[licznik]["href"].count("#") > 0:
adres = domena + "/" + all_links[licznik]["href"]
else:
adres = domena + all_links[licznik]["href"]
print(licznik,".",adres)
licznik += 1
wybor = input("podaj id:")
if all_links[int(wybor)]["href"].count("https") > 0:
adres = all_links[int(wybor)]["href"]
elif all_links[int(wybor)]["href"].count("#") > 0:
adres = domena + "/" + all_links[int(wybor)]["href"]
else:
adres = domena + all_links[int(wybor)]["href"]
webbrowser.open(adres, new=2)
#endregion
#region 4-web-scraping-example7.py
def example7():
# Create top_items as empty list
all_products = []
# Extract and store in top_items according to instructions on the left
products = soup.select('div.thumbnail')
for product in products:
name = product.select('h4 > a')[0].text.strip()
description = product.select('p.description')[0].text.strip()
price = product.select('h4.price')[0].text.strip()
reviews = product.select('div.ratings')[0].text.strip()
image = product.select('img')[0].get('src')
all_products.append({
"name": name,
"description": description,
"price": price,
"reviews": reviews,
"image": image
})
keys = all_products[0].keys()
print("keys = ", keys)
with open('products.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_products)
#endregion
#region
def example8():
odpowiedz = requests.get("https://pl.wikipedia.org/wiki/Zygmunt_III_Waza")
print("odpowiedź = \n", odpowiedz)
print(odpowiedz.status_code)
html_text: str = odpowiedz.text
# print("Strona o Wazie = \n", html_text)
user1 = requests.get("https://jsonplaceholder.typicode.com/users/1")
json_text: dict = user1.json()
print("url = ", user1.url)
print("json_text = ", json_text)
print("history = ", user1.history)
# print("odpowiedź tekstowa = \n", html_text)
html_doc = """<html>
<head>
<title>Moja pierwsza strona!</title>
</head>
<body>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec posuere elit at malesuada tempor. Donec eget ligula in ante auctor luctus. Phasellus iaculis porttitor gravida. Donec eget sem lorem. Morbi a libero imperdiet, viverra tellus ac, consequat tortor. Suspendisse nibh massa, accumsan non neque a, vestibulum commodo dui.</p>
<p>Phasellus vestibulum ut <br>erat sit amet ullamcorper. Nam at elit feugiat, dapibus ante vitae, ullamcorper dui. Nunc rutrum at nibh tincidunt mattis. In finibus sed ante vel mollis. Donec at semper metus
|
example4
|
identifier_name
|
|
main.py
|
1
elif wybor == "3":
tekst = "który 'p' (max " + str(len(all_p)-1) + " element wyświetlić?:"
ktoryP = input(tekst)
print(ktoryP ," element: ",all_h1_tags, soup.select("p")[int(ktoryP)].text)
#endregion
#region 4-web-scraping-example5.py
def example5():
# Create top_items as empty list
top_items = []
# Extract and store in top_items according to instructions on the left
products = soup.select("div.thumbnail")
print("Liczba top items = ", len(products))
for elem in products:
title = elem.s
|
nu:\n1.przykła z zajęć\n2.Wyświetl tytuły produktów\n3.Znajdź produkt po tytule")
wybor = input("twój wybór:")
if wybor == "1":
print(top_items)
elif wybor == "2":
print("sprawdźmy jakie produkty są: ")
licznik = 0
while licznik<len(top_items):
print(top_items[licznik]["title"])
licznik+=1
elif wybor =="3":
fragmentTekstu = input("podaj tekst jakiego szukasz:")
licznik = 0
jest = False
while licznik<len(top_items):
napis = top_items[licznik]["title"]
if napis.count(fragmentTekstu) != 0:
print(top_items[licznik]["title"])
jest = True
licznik+=1
if jest == False:
print("Nie znaleniono produktu")
print("-----------------------------------------")
#endregion
#region 4-web-scraping-example6.py
def example6():
# Create top_items as empty list
image_data = []
domena = "https://codedamn-classrooms.github.io"
# Extract and store in top_items according to instructions on the left
images = soup.select("img")
print("Liczba obrazków =", len(images))
for image in images:
# print(type(image))
src = image.get("src")
alt = image.get("alt")
image_data.append({"src": src, "alt": alt})
print("menu:\n1.Przykład z zajęć\n2.Wyświetl obraz")
wybor = input("twój wybór:")
if wybor == "1":
print(image_data)
elif wybor == "2":
print("Który obraz wyświetlić:")
licznik = 0
while licznik<len(images):
print(licznik,".",image_data[licznik]["alt"])
licznik += 1
wybor = input("podaj id:")
adres_obrazu = domena + image_data[int(wybor)]["src"]
print("obraz powinnien się wyświetlić")
# dobrze jest wiedzieć co pobieramy, wyświetlmy wieć obraz na ekranie:
print(adres_obrazu)
im = Image.open(requests.get(adres_obrazu, stream=True).raw)
im.show()
#endregion
#region 4-web-scraping-example6_lab.py
def example6_lab():
# Create top_items as empty list
all_links = []
# Extract and store in top_items according to instructions on the left
links = soup.select("a")
print("Liczba linków = ", len(links))
for ahref in links:
text = ahref.text
text = text.strip() if text is not None else ""
href = ahref.get("href")
href = href.strip() if href is not None else ""
all_links.append({"href": href, "text": text})
print("menu:\n1.Przykład z zajęć\n2.Otwórz wybrany link")
wybor = input("twój wybór:")
if wybor == "1":
print(all_links)
elif wybor == "2":
licznik = 0
domena = "https://codedamn-classrooms.github.io"
while licznik<len(links):
if all_links[licznik]["href"].count("https") > 0:
adres = all_links[licznik]["href"]
elif all_links[licznik]["href"].count("#") > 0:
adres = domena + "/" + all_links[licznik]["href"]
else:
adres = domena + all_links[licznik]["href"]
print(licznik,".",adres)
licznik += 1
wybor = input("podaj id:")
if all_links[int(wybor)]["href"].count("https") > 0:
adres = all_links[int(wybor)]["href"]
elif all_links[int(wybor)]["href"].count("#") > 0:
adres = domena + "/" + all_links[int(wybor)]["href"]
else:
adres = domena + all_links[int(wybor)]["href"]
webbrowser.open(adres, new=2)
#endregion
#region 4-web-scraping-example7.py
def example7():
# Create top_items as empty list
all_products = []
# Extract and store in top_items according to instructions on the left
products = soup.select('div.thumbnail')
for product in products:
name = product.select('h4 > a')[0].text.strip()
description = product.select('p.description')[0].text.strip()
price = product.select('h4.price')[0].text.strip()
reviews = product.select('div.ratings')[0].text.strip()
image = product.select('img')[0].get('src')
all_products.append({
"name": name,
"description": description,
"price": price,
"reviews": reviews,
"image": image
})
keys = all_products[0].keys()
print("keys = ", keys)
with open('products.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_products)
#endregion
#region
def example8():
odpowiedz = requests.get("https://pl.wikipedia.org/wiki/Zygmunt_III_Waza")
print("odpowiedź = \n", odpowiedz)
print(odpowiedz.status_code)
html_text: str = odpowiedz.text
# print("Strona o Wazie = \n", html_text)
user1 = requests.get("https://jsonplaceholder.typicode.com/users/1")
json_text: dict = user1.json()
print("url = ", user1.url)
print("json_text = ", json_text)
print("history = ", user1.history)
# print("odpowiedź tekstowa = \n", html_text)
html_doc = """<html>
<head>
<title>Moja pierwsza strona!</title>
</head>
<body>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec posuere elit at malesuada tempor. Donec eget ligula in ante auctor luctus. Phasellus iaculis porttitor gravida. Donec eget sem lorem. Morbi a libero imperdiet, viverra tellus ac, consequat tortor. Suspendisse nibh massa, accumsan non neque a, vestibulum commodo dui.</p>
<p>Phasellus vestibulum ut <br>erat sit amet ullamcorper. Nam at elit feugiat, dapibus ante vitae, ullamcorper dui. Nunc rutrum at nibh tincidunt mattis. In finibus sed ante vel mollis. Donec at semper metus. Aenean quis consectetur risus. Sed suscipit felis sed ex pretium euismod. In fermentum mi a odio porttitor, dapibus aliquet leo accumsan. Suspendisse pretium augue et faucibus euismod. Quisque risus metus, ultricies nec tortor at, efficitur convallis nunc.</p>
<ul>
<li>Pierwszy punkt</li>
<li>Drugi punkt</li>
<li>Trzeci punkt</li>
</ul>
<ol>
<li>Pierwszy punkt</li>
<li>Drugi punkt</li>
<li>Trzeci punkt</li>
</ol>
<table border="3" bgcolor="#ff00ff" class="tabela blog">
<tr><th>Naglowek 1</th><th>Naglowek 2</th></tr>
<tr><td>komorka 11</td><td>komorka 12</td></tr>
<tr><td>komorka
|
elect("h4 > a.title")[0].text
review_label = elem.select("div.ratings")[0].text
info = {"title": title.strip(), "review": review_label.strip()}
top_items.append(info)
print("me
|
conditional_block
|
main.py
|
list
top_items = []
# Extract and store in top_items according to instructions on the left
products = soup.select("div.thumbnail")
print("Liczba top items = ", len(products))
for elem in products:
title = elem.select("h4 > a.title")[0].text
review_label = elem.select("div.ratings")[0].text
info = {"title": title.strip(), "review": review_label.strip()}
top_items.append(info)
print("menu:\n1.przykła z zajęć\n2.Wyświetl tytuły produktów\n3.Znajdź produkt po tytule")
wybor = input("twój wybór:")
if wybor == "1":
print(top_items)
elif wybor == "2":
print("sprawdźmy jakie produkty są: ")
licznik = 0
while licznik<len(top_items):
print(top_items[licznik]["title"])
licznik+=1
elif wybor =="3":
fragmentTekstu = input("podaj tekst jakiego szukasz:")
licznik = 0
jest = False
while licznik<len(top_items):
napis = top_items[licznik]["title"]
if napis.count(fragmentTekstu) != 0:
print(top_items[licznik]["title"])
jest = True
licznik+=1
if jest == False:
print("Nie znaleniono produktu")
print("-----------------------------------------")
#endregion
#region 4-web-scraping-example6.py
def example6():
# Create top_items as empty list
image_data = []
domena = "https://codedamn-classrooms.github.io"
# Extract and store in top_items according to instructions on the left
images = soup.select("img")
print("Liczba obrazków =", len(images))
for image in images:
# print(type(image))
src = image.get("src")
alt = image.get("alt")
image_data.append({"src": src, "alt": alt})
print("menu:\n1.Przykład z zajęć\n2.Wyświetl obraz")
wybor = input("twój wybór:")
if wybor == "1":
print(image_data)
elif wybor == "2":
print("Który obraz wyświetlić:")
licznik = 0
while licznik<len(images):
print(licznik,".",image_data[licznik]["alt"])
licznik += 1
wybor = input("podaj id:")
adres_obrazu = domena + image_data[int(wybor)]["src"]
print("obraz powinnien się wyświetlić")
# dobrze jest wiedzieć co pobieramy, wyświetlmy wieć obraz na ekranie:
print(adres_obrazu)
im = Image.open(requests.get(adres_obrazu, stream=True).raw)
im.show()
#endregion
#region 4-web-scraping-example6_lab.py
def example6_lab():
# Create top_items as empty list
all_links = []
# Extract and store in top_items according to instructions on the left
links = soup.select("a")
print("Liczba linków = ", len(links))
for ahref in links:
text = ahref.text
text = text.strip() if text is not None else ""
href = ahref.get("href")
href = href.strip() if href is not None else ""
all_links.append({"href": href, "text": text})
print("menu:\n1.Przykład z zajęć\n2.Otwórz wybrany link")
wybor = input("twój wybór:")
if wybor == "1":
print(all_links)
elif wybor == "2":
licznik = 0
domena = "https://codedamn-classrooms.github.io"
while licznik<len(links):
if all_links[licznik]["href"].count("https") > 0:
adres = all_links[licznik]["href"]
elif all_links[licznik]["href"].count("#") > 0:
adres = domena + "/" + all_links[licznik]["href"]
else:
adres = domena + all_links[licznik]["href"]
print(licznik,".",adres)
licznik += 1
wybor = input("podaj id:")
if all_links[int(wybor)]["href"].count("https") > 0:
adres = all_links[int(wybor)]["href"]
elif all_links[int(wybor)]["href"].count("#") > 0:
adres = domena + "/" + all_links[int(wybor)]["href"]
else:
adres = domena + all_links[int(wybor)]["href"]
webbrowser.open(adres, new=2)
#endregion
#region 4-web-scraping-example7.py
def example7():
# Create top_items as empty list
all_products = []
# Extract and store in top_items according to instructions on the left
products = soup.select('div.thumbnail')
for product in products:
name = product.select('h4 > a')[0].text.strip()
description = product.select('p.description')[0].text.strip()
price = product.select('h4.price')[0].text.strip()
reviews = product.select('div.ratings')[0].text.strip()
image = product.select('img')[0].get('src')
all_products.append({
"name": name,
"description": description,
"price": price,
"reviews": reviews,
"image": image
})
keys = all_products[0].keys()
print("keys = ", keys)
with open('products.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_products)
#endregion
#region
def example8():
odpowiedz = requests.get("https://pl.wikipedia.org/wiki/Zygmunt_III_Waza")
print("odpowiedź = \n", odpowiedz)
print(odpowiedz.status_code)
html_text: str = odpowiedz.text
# print("Strona o Wazie = \n", html_text)
user1 = requests.get("https://jsonplaceholder.typicode.com/users/1")
json_text: dict = user1.json()
print("url = ", user1.url)
print("json_text = ", json_text)
print("history = ", user1.history)
# print("odpowiedź tekstowa = \n", html_text)
html_doc = """<html>
<head>
<title>Moja pierwsza strona!</title>
</head>
<body>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec posuere elit at malesuada tempor. Donec eget ligula in ante auctor luctus. Phasellus iaculis porttitor gravida. Donec eget sem lorem. Morbi a libero imperdiet, viverra tellus ac, consequat tortor. Suspendisse nibh massa, accumsan non neque a, vestibulum commodo dui.</p>
<p>Phasellus vestibulum ut <br>erat sit amet ullamcorper. Nam at elit feugiat, dapibus ante vitae, ullamcorper dui. Nunc rutrum at nibh tincidunt mattis. In finibus sed ante vel mollis. Donec at semper metus. Aenean quis consectetur risus. Sed suscipit felis sed ex pretium euismod. In fermentum mi a odio porttitor, dapibus aliquet leo accumsan. Suspendisse pretium augue et faucibus euismod. Quisque risus metus, ultricies nec tortor at, efficitur convallis nunc.</p>
<ul>
<li>Pierwszy punkt</li>
<li>Drugi punkt</li>
<li>Trzeci punkt</li>
</ul>
<ol>
<li>Pierwszy punkt</li>
<li>Drugi punkt</li>
<li>Trzeci punkt</li>
</ol>
<table border="3" bgcolor="#ff00ff" class="tabela blog">
<tr><th>Naglowek 1</th><th>Naglowek 2</th></tr>
<tr><td>komorka 11</td><td>komorka 12</td></tr>
<tr><td>komorka 21</td><td>komorka 22</td></tr>
</table>
<a href="http://google.pl">Arcyciekawa strona</a>
</body>
</html>"""
url_amw = "https://www.amw.gdynia.pl/"
# page = requests.get(url_amw)
|
# soup = BeautifulSoup(page.content, "html.parser")
soup = BeautifulSoup(html_doc, "lxml")
# Extract head of page
|
random_line_split
|
|
upimg.js
|
, id: 17 , name:'天天卤味' },
{parentId: 2, id: 18 , name:'小明烧菜' },
{parentId: 2, id: 19 , name:'麦德豪' },
{parentId: 2, id: 20 , name:'水果捞' },
{parentId: 3, id: 1 , name:'自选快餐' },
{parentId: 3, id: 2 , name:'风味早点' },
{parentId: 4, id: 1 , name:'特色煲仔' },
{parentId: 4, id: 2 , name:'食汇水煮' },
{parentId: 4, id: 3 , name:'手工水饺' },
{parentId: 4, id: 4 , name:'乡村炖品' },
{parentId: 4, id: 5 , name:'饸饹面' },
{parentId: 4, id: 6 , name:'沙茶面' },
{parentId: 4, id: 7 , name:'漳州风味' },
{parentId: 4, id: 8 , name:'闽北特色' },
{parentId: 4, id: 9 , name:'新加坡捞烫' },
{parentId: 5, id: 1 , name:'喔喔手工水饺' },
{parentId: 5, id: 2 , name:'麻辣香锅' },
{parentId: 5, id: 3 , name:'淳百味' },
{parentId: 5, id: 4 , name:'瓦香鸡米饭' },
{parentId: 5, id: 5 , name:'猫叫了只鱼' },
{parentId: 5, id: 6 , name:'港式烧腊' },
{parentId: 5, id: 7 , name:'饭小榜牛肉饭' },
{parentId: 5, id: 8 , name:'品鲜高汤' },
{parentId: 5, id: 9 , name:'老鸭粉面' },
{parentId: 5, id: 10 , name:'李大碗拉面' },
{parentId: 5, id: 11 , name:'廖记瓦罐' },
{parentId: 5, id: 12 , name:'章叔有空' },
{parentId: 5, id: 13 , name:'正粤营养糖水粥' },
{parentId: 6, id: 1 , name:'阿肥发扁食' },
{parentId: 6, id: 2 , name:'阿妈香肉拌饭' },
{parentId: 6, id: 3 , name:'阿兴瓦罐' },
{parentId: 6, id: 4 , name:'典赞花甲粉' },
{parentId: 6, id: 5 , name:'广式煲仔砂锅' },
{parentId: 6, id: 6 , name:'麻辣香锅' },
{parentId: 6, id: 7 , name:'蜀合记' },
{parentId: 6, id: 8 , name:'无骨烤鱼饭' },
{parentId: 6, id: 9 , name:'杨守荣麻辣烫' },
{parentId: 6, id: 10 , name:'旋转小火锅' },
{parentId: 6, id: 11 , name:'壹碗米饭' },
{parentId: 6, id: 12 , name:'自选快餐' },
{parentId: 7, id: 1 , name:'营养快餐' },
{parentId: 7, id: 2 , name:'功夫粥' },
{parentId: 7, id: 3 , name:'腊么香' },
{parentId: 7, id: 4 , name:'新概念瓦罐' },
{parentId: 7, id: 5 , name:'沙县小吃' },
{parentId: 7, id: 6 , name:'小米米' },
{parentId: 7, id: 7 , name:'民族餐厅' },
{parentId: 7, id: 8 , name:'锅先生渔粉' },
{parentId: 7, id: 9 , name:'沙茶面' },
{parentId: 7, id: 10 , name:'水果捞' },
{parentId: 7, id: 11 , name:'鑫龙福麻辣烫' },
{parentId: 7, id: 12 , name:'兰州拉面' },
{parentId: 7, id: 13 , name:'胖子' },
{parentId: 8, id: 1 , name:'迪卡健身餐厅' },
{parentId: 8, id: 2 , name:'肉骨茶' },
{parentId: 8, id: 3 , name:'范青春焖饭' },
{parentId: 8, id: 4 , name:'小猪麻辣烫' },
{parentId: 8, id: 5 , name:'沙茶面' },
{parentId: 8, id: 6 , name:'金翼瓦罐' },
{parentId: 8, id: 7 , name:'石锅鱼' },
{parentId: 8, id: 8 , name:'重庆小面' },
{parentId: 8, id: 9 , name:'一品草膳汤' },
{parentId: 8, id: 10 , name:'老上海混沌' },
{parentId: 8, id: 11 , name:'脆皮鸡米饭' },
{parentId: 8, id: 12 , name:'闽香卤味' },
{parentId: 8, id: 13 , name:'谷田稻乡' },
],
proCityIndex: [0, 0], //2列选择器当前显示数据的索引
},
onLoad: function(){
let proArr = [];
let cityArr = [];
for(var i=0; i<this.data.proObjArr.length; i++){
proArr.push(this.data.proObjArr[i].name)
}
for(var i=0; i<this.data.cityObjArr.length; i++){
if(this.data.cityObjArr[i].parentId == 1){
cityArr.push(this.data.cityObjArr[i].name)
}
}
this.setData({ //根据接口的数据,给当前显示的默认数据
[`proCityArr[0]`]: proArr,
[`proCityArr[1]`]: cityArr,
})
},
formSubmit(e) {
if(
this.data.imgsrctest!="../../images/upimg/update.png"&&
this.data.proCityArr[1][this.data.proCityIndex[1]]!=''&& //判定数据不得为空
this.data.inputvaule!=''&&
this.data.details!=''
)
{
console.log('form发生了submit事件,携带数据为:', e.detail.value)
console.log('form发生了submit事件,食堂序号为:', e.detail.value.picker[0])
console.log('form发生了submit事件,店名为:',this.data.proCityArr[1][this.data.proCityIndex[1]])
console.log('form发生了submit事件,提交数据:菜名:',this.data.inputvaule,"介绍:",this.data.details)
console.log('form发生了submit事件,提交图片为:',this.data.imgsrctest[0])
this.setData({
chosen: e.detail.value
})
var that=this
wx.cloud.uploadFile({
cloudPath: 'images/userupload/'+getApp().globalData.useropenid+'/'+that.data.proCityArr[1][that.data.proCityIndex[1]]+((e.detail.value.picker[0]+1))+that.data.inputvaule+'.png',
filePath: this.data.imgsrctest[0], // 文件路径
success: res => {
console.log('图片上传到云存储成功,上传图片路径:',res.fileID)
db.collection('userupload').add({
data:
|
{
show2:true,
store:that.data.proCityArr[1][that.data.proCityIndex[1]],
canteennum:e.detail.value.picker[0]+1,
talknum:0,
cainum:0,
isaudit:false,
img_name:that.data.inputvaule,
img_src: 'https://7375-sutest2020-ukd9i-1304294060.tcb.qcloud.la/images/userupload/'+getApp().globalData.useropenid+'/'+that.data.proCityArr[1][that.data.proCityIndex[1]]+((e.detail.value.picker[0]+1))+that.data.inputvaule+'.png',
img_tex2:that.data.details,
talks:[],
zannum:0,
img_dislike:"../../images/menuicon/cai.png",
img_like:"../../images/menuicon/xin1.png",
img_tex1:that.data.inputvaule,
show1:true
},
success:function(res)
{
console.log('上传到userupload成功,云端返回信息:',res)
|
identifier_body
|
|
upimg.js
|
先生渔粉' },
{parentId: 7, id: 9 , name:'沙茶面' },
{parentId: 7, id: 10 , name:'水果捞' },
{parentId: 7, id: 11 , name:'鑫龙福麻辣烫' },
{parentId: 7, id: 12 , name:'兰州拉面' },
{parentId: 7, id: 13 , name:'胖子' },
{parentId: 8, id: 1 , name:'迪卡健身餐厅' },
{parentId: 8, id: 2 , name:'肉骨茶' },
{parentId: 8, id: 3 , name:'范青春焖饭' },
{parentId: 8, id: 4 , name:'小猪麻辣烫' },
{parentId: 8, id: 5 , name:'沙茶面' },
{parentId: 8, id: 6 , name:'金翼瓦罐' },
{parentId: 8, id: 7 , name:'石锅鱼' },
{parentId: 8, id: 8 , name:'重庆小面' },
{parentId: 8, id: 9 , name:'一品草膳汤' },
{parentId: 8, id: 10 , name:'老上海混沌' },
{parentId: 8, id: 11 , name:'脆皮鸡米饭' },
{parentId: 8, id: 12 , name:'闽香卤味' },
{parentId: 8, id: 13 , name:'谷田稻乡' },
],
proCityIndex: [0, 0], //2列选择器当前显示数据的索引
},
onLoad: function(){
let proArr = [];
let cityArr = [];
for(var i=0; i<this.data.proObjArr.length; i++){
proArr.push(this.data.proObjArr[i].name)
}
for(var i=0; i<this.data.cityObjArr.length; i++){
if(this.data.cityObjArr[i].parentId == 1){
cityArr.push(this.data.cityObjArr[i].name)
}
}
this.setData({ //根据接口的数据,给当前显示的默认数据
[`proCityArr[0]`]: proArr,
[`proCityArr[1]`]: cityArr,
})
},
formSubmit(e) {
if(
this.data.imgsrctest!="../../images/upimg/update.png"&&
this.data.proCityArr[1][this.data.proCityIndex[1]]!=''&& //判定数据不得为空
this.data.inputvaule!=''&&
this.data.details!=''
)
{
console.log('form发生了submit事件,携带数据为:', e.detail.value)
console.log('form发生了submit事件,食堂序号为:', e.detail.value.picker[0])
console.log('form发生了submit事件,店名为:',this.data.proCityArr[1][this.data.proCityIndex[1]])
console.log('form发生了submit事件,提交数据:菜名:',this.data.inputvaule,"介绍:",this.data.details)
console.log('form发生了submit事件,提交图片为:',this.data.imgsrctest[0])
this.setData({
chosen: e.detail.value
})
var that=this
wx.cloud.uploadFile({
cloudPath: 'images/userupload/'+getApp().globalData.useropenid+'/'+that.data.proCityArr[1][that.data.proCityIndex[1]]+((e.detail.value.picker[0]+1))+that.data.inputvaule+'.png',
filePath: this.data.imgsrctest[0], // 文件路径
success: res => {
console.log('图片上传到云存储成功,上传图片路径:',res.fileID)
db.collection('userupload').add({
data: {
show2:true,
store:that.data.proCityArr[1][that.data.proCityIndex[1]],
canteennum:e.detail.value.picker[0]+1,
talknum:0,
cainum:0,
isaudit:false,
img_name:that.data.inputvaule,
img_src: 'https://7375-sutest2020-ukd9i-1304294060.tcb.qcloud.la/images/userupload/'+getApp().globalData.useropenid+'/'+that.data.proCityArr[1][that.data.proCityIndex[1]]+((e.detail.value.picker[0]+1))+that.data.inputvaule+'.png',
img_tex2:that.data.details,
talks:[],
zannum:0,
img_dislike:"../../images/menuicon/cai.png",
img_like:"../../images/menuicon/xin1.png",
img_tex1:that.data.inputvaule,
show1:true
},
success:function(res)
{
console.log('上传到userupload成功,云端返回信息:',res)
},fail: console.error
})
},
fail: err => {
console.log(err)
console.log(cloudPath)
}
})
wx.showToast({
title: '提交中',
icon: 'loading',
duration: 2000,
success:function(){
setTimeout(function () {
wx.showToast({
title: '感谢您为饭图添砖加瓦',
icon: 'none',
duration: 1500,
})
setTimeout(function (){
wx.switchTab({
url: '../homepage/homepage'
})},1000)
}
,2000)
}
})
}else{
wx.showToast({
title: "抱歉 请勿留空数据",
icon: 'none',
duration: 1000,
})
}
},
formReset(e) {
console.log('form发生了reset事件,携带数据为:', e.detail.value)
this.setData({
inputvaule:'',
details:''
})
},
// 城市选择后确定--触发
proCityChange: function(e){
let proCityIndex = e.detail.value
this.setData({
proCityIndex: proCityIndex
})
console.log(this.data.proCityArr[0][proCityIndex[0]],
this.data.proCityArr[1][proCityIndex[1]],);
},
// 城市选择时--触发
proCityColumnChange: function(e){
let column = e.detail.column;//第几列被触发
let index = e.detail.value;//变成第几个
let proCityIndex = this.data.proCityIndex;
if(column===0){//第一列选中项数据被滑动修改了...
proCityIndex[0] = index;
proCityIndex[1] = 0;//第一列修改了,第二列始终显示第一个
let currentParentId = this.data.proObjArr[index].id;
let cityArr = [];
for(var i=0; i<this.data.cityObjArr.length; i++){
if(this.data.cityObjArr[i].parentId == currentParentId){
cityArr.push(this.data.cityObjArr[i].name)
}
}
this.setData({
[`proCityArr[1]`]: cityArr, //当前显示数据的重新赋值
[`proCityIndex`]: proCityIndex //当前显示数据的索引的重新赋值
})
}else{//第二列选中项数据被滑动修改了...
proCityIndex[1] = index;
this.setData({
[`proCityIndex`]: proCityIndex //当前显示数据的索引的重新赋值
})
}
},
getfoodname(event) { //监听获取菜名
var _this=this;
console.log("菜名:", event.detail.value)
this.setData({
inputvaule: event.detail.value
})
},
uploadPhoto(e) { // 拍摄或从相册选取上传
let that = this;
var tempFilePaths
wx.chooseImage({
count: 1, // 默认9
sizeType: ['compressed'], // 可以指定是原图还是压缩图,默认二者都有
sourceType: ['album', 'camera'], // 可以指定来源是相册还是相机,默认二者都有
success(res) {
tempFilePaths = res.tempFilePaths; // 返回选定照片的本地路径列表
//that.upload(that, tempFilePaths);
// console.log(tempFilePaths);
that.setData({
imgsrctest:tempFilePaths
})
}
})
console.log(tempFilePaths)
},
previewImg: function (e) {
//获取当前图片的下标
var imgs = this.data.imgsrctest;
wx.previewImage({
//当前显示图片
//current: imgs[index],
//所有图片
urls: imgs
})
},
bindTextAreaBlur: function(e) {
console.log("菜的相关介绍:",e.detail.value);
var that = this;
that.setData({
details: e.detail.value
});
let value = e.detail.value;//获取textarea的内容,
let len = value.length;//获取textarea的内容长度
this.setData({
'number': len
})
},
})
|
identifier_name
|
||
upimg.js
|
{parentId: 6, id: 2 , name:'阿妈香肉拌饭' },
{parentId: 6, id: 3 , name:'阿兴瓦罐' },
{parentId: 6, id: 4 , name:'典赞花甲粉' },
{parentId: 6, id: 5 , name:'广式煲仔砂锅' },
{parentId: 6, id: 6 , name:'麻辣香锅' },
{parentId: 6, id: 7 , name:'蜀合记' },
{parentId: 6, id: 8 , name:'无骨烤鱼饭' },
{parentId: 6, id: 9 , name:'杨守荣麻辣烫' },
{parentId: 6, id: 10 , name:'旋转小火锅' },
{parentId: 6, id: 11 , name:'壹碗米饭' },
{parentId: 6, id: 12 , name:'自选快餐' },
{parentId: 7, id: 1 , name:'营养快餐' },
{parentId: 7, id: 2 , name:'功夫粥' },
{parentId: 7, id: 3 , name:'腊么香' },
{parentId: 7, id: 4 , name:'新概念瓦罐' },
{parentId: 7, id: 5 , name:'沙县小吃' },
{parentId: 7, id: 6 , name:'小米米' },
{parentId: 7, id: 7 , name:'民族餐厅' },
{parentId: 7, id: 8 , name:'锅先生渔粉' },
{parentId: 7, id: 9 , name:'沙茶面' },
{parentId: 7, id: 10 , name:'水果捞' },
{parentId: 7, id: 11 , name:'鑫龙福麻辣烫' },
{parentId: 7, id: 12 , name:'兰州拉面' },
{parentId: 7, id: 13 , name:'胖子' },
{parentId: 8, id: 1 , name:'迪卡健身餐厅' },
{parentId: 8, id: 2 , name:'肉骨茶' },
{parentId: 8, id: 3 , name:'范青春焖饭' },
{parentId: 8, id: 4 , name:'小猪麻辣烫' },
{parentId: 8, id: 5 , name:'沙茶面' },
{parentId: 8, id: 6 , name:'金翼瓦罐' },
{parentId: 8, id: 7 , name:'石锅鱼' },
{parentId: 8, id: 8 , name:'重庆小面' },
{parentId: 8, id: 9 , name:'一品草膳汤' },
{parentId: 8, id: 10 , name:'老上海混沌' },
{parentId: 8, id: 11 , name:'脆皮鸡米饭' },
{parentId: 8, id: 12 , name:'闽香卤味' },
{parentId: 8, id: 13 , name:'谷田稻乡' },
],
proCityIndex: [0, 0], //2列选择器当前显示数据的索引
},
onLoad: function(){
let proArr = [];
let cityArr = [];
for(var i=0; i<this.data.proObjArr.length; i++){
proArr.push(this.data.proObjArr[i].name)
}
for(var i=0; i<this.data.cityObjArr.length; i++){
if(this.data.cityObjArr[i].parentId == 1){
cityArr.push(this.data.cityObjArr[i].name)
}
}
this.setData({ //根据接口的数据,给当前显示的默认数据
[`proCityArr[0]`]: proArr,
[`proCityArr[1]`]: cityArr,
})
},
formSubmit(e) {
if(
this.data.imgsrctest!="../../images/upimg/update.png"&&
this.data.proCityArr[1][this.data.proCityIndex[1]]!=''&& //判定数据不得为空
this.data.inputvaule!=''&&
this.data.details!=''
)
{
console.log('form发生了submit事件,携带数据为:', e.detail.value)
console.log('form发生了submit事件,食堂序号为:', e.detail.value.picker[0])
console.log('form发生了submit事件,店名为:',this.data.proCityArr[1][this.data.proCityIndex[1]])
console.log('form发生了submit事件,提交数据:菜名:',this.data.inputvaule,"介绍:",this.data.details)
console.log('form发生了submit事件,提交图片为:',this.data.imgsrctest[0])
this.setData({
chosen: e.detail.value
})
var that=this
wx.cloud.uploadFile({
cloudPath: 'images/userupload/'+getApp().globalData.useropenid+'/'+that.data.proCityArr[1][that.data.proCityIndex[1]]+((e.detail.value.picker[0]+1))+that.data.inputvaule+'.png',
filePath: this.data.imgsrctest[0], // 文件路径
success: res => {
console.log('图片上传到云存储成功,上传图片路径:',res.fileID)
db.collection('userupload').add({
data: {
show2:true,
store:that.data.proCityArr[1][that.data.proCityIndex[1]],
canteennum:e.detail.value.picker[0]+1,
talknum:0,
cainum:0,
isaudit:false,
img_name:that.data.inputvaule,
img_src: 'https://7375-sutest2020-ukd9i-1304294060.tcb.qcloud.la/images/userupload/'+getApp().globalData.useropenid+'/'+that.data.proCityArr[1][that.data.proCityIndex[1]]+((e.detail.value.picker[0]+1))+that.data.inputvaule+'.png',
img_tex2:that.data.details,
talks:[],
zannum:0,
img_dislike:"../../images/menuicon/cai.png",
img_like:"../../images/menuicon/xin1.png",
img_tex1:that.data.inputvaule,
show1:true
},
success:function(res)
{
console.log('上传到userupload成功,云端返回信息:',res)
},fail: console.error
})
},
fail: err => {
console.log(err)
console.log(cloudPath)
}
})
wx.showToast({
title: '提交中',
icon: 'loading',
duration: 2000,
success:function(){
setTimeout(function () {
wx.showToast({
title: '感谢您为饭图添砖加瓦',
icon: 'none',
duration: 1500,
})
setTimeout(function (){
wx.switchTab({
url: '../homepage/homepage'
})},1000)
}
,2000)
}
})
}else{
wx.showToast({
title: "抱歉 请勿留空数据",
icon: 'none',
duration: 1000,
})
}
},
formReset(e) {
console.log('form发生了reset事件,携带数据为:', e.detail.value)
this.setData({
inputvaule:'',
details:''
})
},
// 城市选择后确定--触发
proCityChange: function(e){
let proCityIndex = e.detail.value
this.setData({
proCityIndex: proCityIndex
})
console.log(this.data.proCityArr[0][proCityIndex[0]],
this.data.proCityArr[1][proCityIndex[1]],);
},
// 城市选择时--触发
proCityColumnChange: function(e){
let column = e.detail.column;//第几列被触发
let index = e.detail.value;//变成第几个
let proCityIndex = this.data.proCityIndex;
if(column===0){//第一列选中项数据被滑动修改了...
proCityIndex[0] = index;
proCityIndex[1] = 0;//第一列修改了,第二列始终显示第一个
let currentParentId = this.data.proObjArr[index].id;
let cityArr = [];
for(var i=0; i<this.data.cityObjArr.length; i++){
if(this.data.cityObjArr[i].parentId == currentParentId){
cityArr.push(this.data.cityObjArr[i].name)
}
}
this.setData({
[`proCityArr[1]`]: cityArr, //当前显示数据的重新赋值
[`proCityIndex`]: proCityIndex //当前显示数据的索引的重
|
新赋值
})
}else{//第二列选中项数据被滑动修改了...
proCityIndex[1] = index;
this.setData({
[`proCityIndex`]:
|
conditional_block
|
|
upimg.js
|
18 , name:'兰州拉面' },
{parentId: 1, id: 19 , name:'锡纸烤' },
{parentId: 1, id: 20 , name:'嘿米牛肉饭' },
{parentId: 1, id: 21 , name:'猪脚饭' },
{parentId: 1, id: 22 , name:'豪客士' },
{parentId: 2, id: 1 , name:'99自助餐' },
{parentId: 2, id: 2 , name:'壹米阳光' },
{parentId: 2, id: 3 , name:'六号简餐' },
{parentId: 2, id: 4 , name:'酷乐普斯蛋包饭' },
{parentId: 2, id: 5 , name:'担仔面' },
{parentId: 2, id: 6 , name:'墨西哥特色烤肉' },
{parentId: 2, id: 7 , name:'老鱼炒饭' },
{parentId: 2, id: 8 , name:'鱼谷稻烤鱼饭' },
{parentId: 2, id: 9 , name:'大味小煲' },
{parentId: 2, id: 10 , name:'川蜀季' },
{parentId: 2, id: 11 , name:'百味园' },
{parentId: 2, id: 12 , name:'遵义羊肉粉' },
{parentId: 2, id: 13 , name:'兰州拉面' },
{parentId: 2, id: 14 , name:'好妈烫饭' },
{parentId: 2, id: 15 , name:'爱米渔渔粉' },
{parentId: 2, id: 16 , name:'小米米大碗饭' },
{parentId: 2, id: 17 , name:'天天卤味' },
{parentId: 2, id: 18 , name:'小明烧菜' },
{parentId: 2, id: 19 , name:'麦德豪' },
{parentId: 2, id: 20 , name:'水果捞' },
{parentId: 3, id: 1 , name:'自选快餐' },
{parentId: 3, id: 2 , name:'风味早点' },
{parentId: 4, id: 1 , name:'特色煲仔' },
{parentId: 4, id: 2 , name:'食汇水煮' },
{parentId: 4, id: 3 , name:'手工水饺' },
{parentId: 4, id: 4 , name:'乡村炖品' },
{parentId: 4, id: 5 , name:'饸饹面' },
{parentId: 4, id: 6 , name:'沙茶面' },
{parentId: 4, id: 7 , name:'漳州风味' },
{parentId: 4, id: 8 , name:'闽北特色' },
{parentId: 4, id: 9 , name:'新加坡捞烫' },
{parentId: 5, id: 1 , name:'喔喔手工水饺' },
{parentId: 5, id: 2 , name:'麻辣香锅' },
{parentId: 5, id: 3 , name:'淳百味' },
{parentId: 5, id: 4 , name:'瓦香鸡米饭' },
{parentId: 5, id: 5 , name:'猫叫了只鱼' },
{parentId: 5, id: 6 , name:'港式烧腊' },
{parentId: 5, id: 7 , name:'饭小榜牛肉饭' },
{parentId: 5, id: 8 , name:'品鲜高汤' },
{parentId: 5, id: 9 , name:'老鸭粉面' },
{parentId: 5, id: 10 , name:'李大碗拉面' },
{parentId: 5, id: 11 , name:'廖记瓦罐' },
{parentId: 5, id: 12 , name:'章叔有空' },
{parentId: 5, id: 13 , name:'正粤营养糖水粥' },
{parentId: 6, id: 1 , name:'阿肥发扁食' },
{parentId: 6, id: 2 , name:'阿妈香肉拌饭' },
{parentId: 6, id: 3 , name:'阿兴瓦罐' },
{parentId: 6, id: 4 , name:'典赞花甲粉' },
{parentId: 6, id: 5 , name:'广式煲仔砂锅' },
{parentId: 6, id: 6 , name:'麻辣香锅' },
{parentId: 6, id: 7 , name:'蜀合记' },
{parentId: 6, id: 8 , name:'无骨烤鱼饭' },
{parentId: 6, id: 9 , name:'杨守荣麻辣烫' },
{parentId: 6, id: 10 , name:'旋转小火锅' },
{parentId: 6, id: 11 , name:'壹碗米饭' },
{parentId: 6, id: 12 , name:'自选快餐' },
{parentId: 7, id: 1 , name:'营养快餐' },
{parentId: 7, id: 2 , name:'功夫粥' },
{parentId: 7, id: 3 , name:'腊么香' },
{parentId: 7, id: 4 , name:'新概念瓦罐' },
{parentId: 7, id: 5 , name:'沙县小吃' },
{parentId: 7, id: 6 , name:'小米米' },
{parentId: 7, id: 7 , name:'民族餐厅' },
{parentId: 7, id: 8 , name:'锅先生渔粉' },
{parentId: 7, id: 9 , name:'沙茶面' },
{parentId: 7, id: 10 , name:'水果捞' },
{parentId: 7, id: 11 , name:'鑫龙福麻辣烫' },
{parentId: 7, id: 12 , name:'兰州拉面' },
{parentId: 7, id: 13 , name:'胖子' },
{parentId: 8, id: 1 , name:'迪卡健身餐厅' },
{parentId: 8, id: 2 , name:'肉骨茶' },
{parentId: 8, id: 3 , name:'范青春焖饭' },
{parentId: 8, id: 4 , name:'小猪麻辣烫' },
{parentId: 8, id: 5 , name:'沙茶面' },
{parentId: 8, id: 6 , name:'金翼瓦罐' },
{parentId: 8, id: 7 , name:'石锅鱼' },
{parentId: 8, id: 8 , name:'重庆小面' },
{parentId: 8, id: 9 , name:'一品草膳汤' },
{parentId: 8, id: 10 , name:'老上海混沌' },
{parentId: 8, id: 11 , name:'脆皮鸡米饭' },
{parentId: 8, id: 12 , name:'闽香卤味' },
{parentId: 8, id: 13 , name:'谷田稻乡' },
],
proCityIndex: [0, 0], //2列选择器当前显示数据的索引
},
onLoad: function(){
let proArr = [];
let cityArr = [];
for(var i=0; i<this.data.proObjArr.length; i++){
proArr.push(this.data.proObjArr[i].name)
}
for(var i=0; i<this.data.cityObjArr.length; i++){
if(this.data.cityObjArr[i].parentId == 1){
cityArr.push(this.data.cityObjArr[i].name)
}
}
this.setData({ //根据接口的数据,给当前显示的默认数据
[`proCityArr[0]`]: proArr,
[`proCityArr[1]`]: cityArr,
})
},
formSubmit(e) {
if(
this.data.imgsrctest!="../../images/upimg/update.png"&&
this.data.proCityArr[1][this.data.proCityIndex[1]]!=''&& //判定数据不得为空
|
)
{
console.log('form发生了submit事件,携带数据为:', e.detail.value)
console.log('form发生了submit事件,食堂序号为:', e.detail.value.picker[0])
console.log('form发生了submit事件,店名为:',this.data.proCityArr[1][this.data.proCityIndex[1]])
|
this.data.inputvaule!=''&&
this.data.details!=''
|
random_line_split
|
train.py
|
0].coef_)))
for i, est in enumerate(estimators):
coeffs[i] = est.coef_
fig = plt.figure(figsize=(5.5, 5))
ax = fig.add_subplot(111)
m1 = ax.imshow(coeffs.T, aspect='auto', origin='lower')
ax.set_title('Visualization of the linear regression coefficients')
ax.set_xlabel('Linear regression models')
ax.set_ylabel('Coefficients')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(m1, cax=cax, orientation='vertical')
plt.tight_layout()
plt.savefig(filename, dpi=300)
@benchmark
def process_samples_in_network(eeg_sender, aud_sender):
"""
Computes the ECoG and LogMel features using the node based approach
"""
eeg_sender.start_processing()
aud_sender.start_processing()
eeg_sender.wait_for_completion()
aud_sender.wait_for_completion()
@benchmark
def quantization(y_train, nb_intervals=8):
"""
Quantize the logMel spectrogram
"""
medians, borders = compute_borders_logistic(y_train, nb_intervals=nb_intervals)
q_spectrogram = quantize_spectrogram(y_train, borders)
# print if a spec bin does not contain samples for a interval
for i in range(q_spectrogram.shape[1]):
diff = np.setdiff1d(np.arange(0, nb_intervals), q_spectrogram[:, i])
if diff.size > 0:
logger.info('Spec_bin "{}" misses samples for interval index/indices "{}"'.format(i, str(diff)))
return medians, borders, q_spectrogram
@benchmark
def feature_selection(x_train, y_train, nb_feats=150):
"""
Feature selection using correlation
"""
cs = np.zeros(x_train.shape[1])
for f in range(x_train.shape[1]):
if np.isclose(np.sum(x_train[:, f]), 0):
|
cs[f], p = spearmanr(x_train[:, f], np.mean(y_train, axis=1))
select = np.argsort(np.abs(cs))[np.max([-nb_feats, -len(cs)]):]
return select
@benchmark
def train_estimators(estimators, x_train, y_train):
for mel_bin in range(len(estimators)):
estimators[mel_bin].fit(x_train, y_train[:, mel_bin])
if (mel_bin + 1) % 5 == 0:
logger.info('{:02d} LDAs fitted so far.'.format(mel_bin + 1))
@benchmark
def compute_features(eeg, sfreq_eeg, audio, audio_sr):
x_train = herff2016_b(eeg, sfreq_eeg, 0.05, 0.01)
# resample audio to 16kHz
audio = decimate(audio, 3)
audio_sr = 16000
y_train = compute_spectrogram(audio, audio_sr, 0.016, 0.01)
return x_train, y_train
def train(eeg, audio, sfreq_eeg, sfreq_audio, bad_channels, nb_mel_bins=40):
# exclude bad channels
if len(bad_channels) > 0:
logger.info('EEG original shape: {} x {}'.format(*eeg.shape))
mask = np.ones(eeg.shape[1], bool)
mask[bad_channels] = False
eeg = eeg[:, mask]
logger.info('EEG truncated shape: {} x {}'.format(*eeg.shape))
else:
logger.info('No bad channels specified.')
x_train, y_train = compute_features(eeg, sfreq_eeg, audio, sfreq_audio)
y_train = y_train[20:-4] # Skip 24 samples too align the neural signals to the audio. 20 frames are needed to
# first to have all context for one sample. In addition, the window length is 0.05 sec
# instead of 0.0016 as with the audio, resulting in 4 more frames. Cutting off in the
# beginning aligns the audio to the current frame.
# Quantize the logMel spectrogram
medians, borders, q_spectrogram = quantization(y_train, nb_intervals=9)
# Feature selection using correlation
select = feature_selection(x_train, y_train)
x_train = x_train[:, select]
estimators = [LinearDiscriminantAnalysis() for _ in range(nb_mel_bins)]
y_train = q_spectrogram
logger.info('x_train: ' + str(x_train.shape))
logger.info('y_train: ' + str(y_train.shape))
# just in case there is still in difference in samples
minimum = min(len(x_train), len(y_train))
x_train = x_train[0:minimum, :]
y_train = y_train[0:minimum, :]
train_estimators(estimators=estimators, x_train=x_train, y_train=y_train)
return x_train, y_train, medians, estimators, select
def store_training_to_file(config, x_train, y_train, medians, estimators, bad_channels, select):
if config.getboolean('Training', 'draw_plots'):
# visualize train data
filename = '.'.join(['trainset', 'png'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
d_spectrogram = dequantize_spectrogram(y_train, medians)
visualize_train_data(x_train=x_train, y_train=d_spectrogram, filename=filename)
# save model parameters to file
filename = '.'.join(['LDAs', 'pkl'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
pickle.dump(estimators, open(filename, 'wb'))
# Store training features for activation plot
filename = '.'.join(['training_features', 'npy'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
np.save(filename, x_train)
# store model parameters
filename = '.'.join(['params', 'h5'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
with h5py.File(filename, 'w') as hf:
hf.create_dataset('bad_channels', data=bad_channels)
hf.create_dataset('medians_array', data=medians)
hf.create_dataset('estimators', data=np.void(pickle.dumps(estimators)))
hf.create_dataset('select', data=select)
# Save used config file
filename = '.'.join(['train', 'ini'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
with open(filename, 'w') as configfile:
config.write(configfile)
logger.info('Training configuration written to {}'.format(filename))
logger.info('Training completed.')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Train linear regression models on aligned neural and audio data.')
parser.add_argument('config', help='Path to config file.')
parser.add_argument('--file', help='Comma separated XDF files containing the sEEG data and time aligned audio.')
parser.add_argument('--session', help='Name of the Session.')
parser.add_argument('--storage_dir', help='Path to the storage_dir.')
parser.add_argument('--channels', help='Specify which channels should be used during training and decoding. '
'Accepts a list of regex impressions. The channels will be selected '
'if they match at least one expression. Each regex expression is '
'enclosed in ^EXPRESSION$ to limit its influence.')
args = parser.parse_args()
# initialize the config parser
if not os.path.exists(args.config):
print('WARNING: File path to the config file is invalid. Please specify a proper path. Script will exit!')
exit(1)
config = configparser.ConfigParser()
config.read(args.config)
# if optional script arguments change arguments set in config, update them
if args.file is not None:
config['Training']['file'] = args.file
if args.session is not None:
config['General']['session'] = args.session
if args.storage_dir is not None:
config['General']['storage_dir'] = args.storage_dir
if args.channels is not None:
config['Training']['channels'] = args.channels
xdf_files = config['Training']['file'].split(',')
# create the directory path for storing the session
session_dir = os.path.join(config['General']['storage_dir'], config['General']['session'])
try:
os.makedirs(session_dir, exist_ok=config['Training']['overwrite_on_rerun'] == 'True')
except FileExistsError:
print('The directory path "{}" could not be created, since it is already present and the parameter '
'"overwrite_on_rerun" in the "Training" section is set to False. '
'Script will exit!'.format(session_dir))
exit(1)
# initialize logging handler
log_file = '.'.join(['train', 'log'])
log_file = os.path.join(config['General']['storage_dir'], config['General']['session'], log
|
cs[f] = 0
continue
|
conditional_block
|
train.py
|
[0].coef_)))
for i, est in enumerate(estimators):
coeffs[i] = est.coef_
fig = plt.figure(figsize=(5.5, 5))
ax = fig.add_subplot(111)
m1 = ax.imshow(coeffs.T, aspect='auto', origin='lower')
ax.set_title('Visualization of the linear regression coefficients')
ax.set_xlabel('Linear regression models')
ax.set_ylabel('Coefficients')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(m1, cax=cax, orientation='vertical')
plt.tight_layout()
plt.savefig(filename, dpi=300)
@benchmark
def process_samples_in_network(eeg_sender, aud_sender):
"""
Computes the ECoG and LogMel features using the node based approach
"""
eeg_sender.start_processing()
aud_sender.start_processing()
eeg_sender.wait_for_completion()
aud_sender.wait_for_completion()
@benchmark
def quantization(y_train, nb_intervals=8):
"""
Quantize the logMel spectrogram
"""
medians, borders = compute_borders_logistic(y_train, nb_intervals=nb_intervals)
q_spectrogram = quantize_spectrogram(y_train, borders)
# print if a spec bin does not contain samples for a interval
for i in range(q_spectrogram.shape[1]):
diff = np.setdiff1d(np.arange(0, nb_intervals), q_spectrogram[:, i])
if diff.size > 0:
logger.info('Spec_bin "{}" misses samples for interval index/indices "{}"'.format(i, str(diff)))
return medians, borders, q_spectrogram
@benchmark
def feature_selection(x_train, y_train, nb_feats=150):
"""
Feature selection using correlation
"""
cs = np.zeros(x_train.shape[1])
for f in range(x_train.shape[1]):
if np.isclose(np.sum(x_train[:, f]), 0):
cs[f] = 0
continue
cs[f], p = spearmanr(x_train[:, f], np.mean(y_train, axis=1))
select = np.argsort(np.abs(cs))[np.max([-nb_feats, -len(cs)]):]
return select
@benchmark
def train_estimators(estimators, x_train, y_train):
for mel_bin in range(len(estimators)):
estimators[mel_bin].fit(x_train, y_train[:, mel_bin])
if (mel_bin + 1) % 5 == 0:
logger.info('{:02d} LDAs fitted so far.'.format(mel_bin + 1))
@benchmark
def compute_features(eeg, sfreq_eeg, audio, audio_sr):
x_train = herff2016_b(eeg, sfreq_eeg, 0.05, 0.01)
# resample audio to 16kHz
audio = decimate(audio, 3)
audio_sr = 16000
y_train = compute_spectrogram(audio, audio_sr, 0.016, 0.01)
return x_train, y_train
def train(eeg, audio, sfreq_eeg, sfreq_audio, bad_channels, nb_mel_bins=40):
# exclude bad channels
if len(bad_channels) > 0:
logger.info('EEG original shape: {} x {}'.format(*eeg.shape))
mask = np.ones(eeg.shape[1], bool)
mask[bad_channels] = False
eeg = eeg[:, mask]
logger.info('EEG truncated shape: {} x {}'.format(*eeg.shape))
else:
|
# instead of 0.0016 as with the audio, resulting in 4 more frames. Cutting off in the
# beginning aligns the audio to the current frame.
# Quantize the logMel spectrogram
medians, borders, q_spectrogram = quantization(y_train, nb_intervals=9)
# Feature selection using correlation
select = feature_selection(x_train, y_train)
x_train = x_train[:, select]
estimators = [LinearDiscriminantAnalysis() for _ in range(nb_mel_bins)]
y_train = q_spectrogram
logger.info('x_train: ' + str(x_train.shape))
logger.info('y_train: ' + str(y_train.shape))
# just in case there is still in difference in samples
minimum = min(len(x_train), len(y_train))
x_train = x_train[0:minimum, :]
y_train = y_train[0:minimum, :]
train_estimators(estimators=estimators, x_train=x_train, y_train=y_train)
return x_train, y_train, medians, estimators, select
def store_training_to_file(config, x_train, y_train, medians, estimators, bad_channels, select):
if config.getboolean('Training', 'draw_plots'):
# visualize train data
filename = '.'.join(['trainset', 'png'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
d_spectrogram = dequantize_spectrogram(y_train, medians)
visualize_train_data(x_train=x_train, y_train=d_spectrogram, filename=filename)
# save model parameters to file
filename = '.'.join(['LDAs', 'pkl'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
pickle.dump(estimators, open(filename, 'wb'))
# Store training features for activation plot
filename = '.'.join(['training_features', 'npy'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
np.save(filename, x_train)
# store model parameters
filename = '.'.join(['params', 'h5'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
with h5py.File(filename, 'w') as hf:
hf.create_dataset('bad_channels', data=bad_channels)
hf.create_dataset('medians_array', data=medians)
hf.create_dataset('estimators', data=np.void(pickle.dumps(estimators)))
hf.create_dataset('select', data=select)
# Save used config file
filename = '.'.join(['train', 'ini'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
with open(filename, 'w') as configfile:
config.write(configfile)
logger.info('Training configuration written to {}'.format(filename))
logger.info('Training completed.')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Train linear regression models on aligned neural and audio data.')
parser.add_argument('config', help='Path to config file.')
parser.add_argument('--file', help='Comma separated XDF files containing the sEEG data and time aligned audio.')
parser.add_argument('--session', help='Name of the Session.')
parser.add_argument('--storage_dir', help='Path to the storage_dir.')
parser.add_argument('--channels', help='Specify which channels should be used during training and decoding. '
'Accepts a list of regex impressions. The channels will be selected '
'if they match at least one expression. Each regex expression is '
'enclosed in ^EXPRESSION$ to limit its influence.')
args = parser.parse_args()
# initialize the config parser
if not os.path.exists(args.config):
print('WARNING: File path to the config file is invalid. Please specify a proper path. Script will exit!')
exit(1)
config = configparser.ConfigParser()
config.read(args.config)
# if optional script arguments change arguments set in config, update them
if args.file is not None:
config['Training']['file'] = args.file
if args.session is not None:
config['General']['session'] = args.session
if args.storage_dir is not None:
config['General']['storage_dir'] = args.storage_dir
if args.channels is not None:
config['Training']['channels'] = args.channels
xdf_files = config['Training']['file'].split(',')
# create the directory path for storing the session
session_dir = os.path.join(config['General']['storage_dir'], config['General']['session'])
try:
os.makedirs(session_dir, exist_ok=config['Training']['overwrite_on_rerun'] == 'True')
except FileExistsError:
print('The directory path "{}" could not be created, since it is already present and the parameter '
'"overwrite_on_rerun" in the "Training" section is set to False. '
'Script will exit!'.format(session_dir))
exit(1)
# initialize logging handler
log_file = '.'.join(['train', 'log'])
log_file = os.path.join(config['General']['storage_dir'], config['General']['session'], log
|
logger.info('No bad channels specified.')
x_train, y_train = compute_features(eeg, sfreq_eeg, audio, sfreq_audio)
y_train = y_train[20:-4] # Skip 24 samples too align the neural signals to the audio. 20 frames are needed to
# first to have all context for one sample. In addition, the window length is 0.05 sec
|
random_line_split
|
train.py
|
0].coef_)))
for i, est in enumerate(estimators):
coeffs[i] = est.coef_
fig = plt.figure(figsize=(5.5, 5))
ax = fig.add_subplot(111)
m1 = ax.imshow(coeffs.T, aspect='auto', origin='lower')
ax.set_title('Visualization of the linear regression coefficients')
ax.set_xlabel('Linear regression models')
ax.set_ylabel('Coefficients')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(m1, cax=cax, orientation='vertical')
plt.tight_layout()
plt.savefig(filename, dpi=300)
@benchmark
def process_samples_in_network(eeg_sender, aud_sender):
"""
Computes the ECoG and LogMel features using the node based approach
"""
eeg_sender.start_processing()
aud_sender.start_processing()
eeg_sender.wait_for_completion()
aud_sender.wait_for_completion()
@benchmark
def
|
(y_train, nb_intervals=8):
"""
Quantize the logMel spectrogram
"""
medians, borders = compute_borders_logistic(y_train, nb_intervals=nb_intervals)
q_spectrogram = quantize_spectrogram(y_train, borders)
# print if a spec bin does not contain samples for a interval
for i in range(q_spectrogram.shape[1]):
diff = np.setdiff1d(np.arange(0, nb_intervals), q_spectrogram[:, i])
if diff.size > 0:
logger.info('Spec_bin "{}" misses samples for interval index/indices "{}"'.format(i, str(diff)))
return medians, borders, q_spectrogram
@benchmark
def feature_selection(x_train, y_train, nb_feats=150):
"""
Feature selection using correlation
"""
cs = np.zeros(x_train.shape[1])
for f in range(x_train.shape[1]):
if np.isclose(np.sum(x_train[:, f]), 0):
cs[f] = 0
continue
cs[f], p = spearmanr(x_train[:, f], np.mean(y_train, axis=1))
select = np.argsort(np.abs(cs))[np.max([-nb_feats, -len(cs)]):]
return select
@benchmark
def train_estimators(estimators, x_train, y_train):
for mel_bin in range(len(estimators)):
estimators[mel_bin].fit(x_train, y_train[:, mel_bin])
if (mel_bin + 1) % 5 == 0:
logger.info('{:02d} LDAs fitted so far.'.format(mel_bin + 1))
@benchmark
def compute_features(eeg, sfreq_eeg, audio, audio_sr):
x_train = herff2016_b(eeg, sfreq_eeg, 0.05, 0.01)
# resample audio to 16kHz
audio = decimate(audio, 3)
audio_sr = 16000
y_train = compute_spectrogram(audio, audio_sr, 0.016, 0.01)
return x_train, y_train
def train(eeg, audio, sfreq_eeg, sfreq_audio, bad_channels, nb_mel_bins=40):
# exclude bad channels
if len(bad_channels) > 0:
logger.info('EEG original shape: {} x {}'.format(*eeg.shape))
mask = np.ones(eeg.shape[1], bool)
mask[bad_channels] = False
eeg = eeg[:, mask]
logger.info('EEG truncated shape: {} x {}'.format(*eeg.shape))
else:
logger.info('No bad channels specified.')
x_train, y_train = compute_features(eeg, sfreq_eeg, audio, sfreq_audio)
y_train = y_train[20:-4] # Skip 24 samples too align the neural signals to the audio. 20 frames are needed to
# first to have all context for one sample. In addition, the window length is 0.05 sec
# instead of 0.0016 as with the audio, resulting in 4 more frames. Cutting off in the
# beginning aligns the audio to the current frame.
# Quantize the logMel spectrogram
medians, borders, q_spectrogram = quantization(y_train, nb_intervals=9)
# Feature selection using correlation
select = feature_selection(x_train, y_train)
x_train = x_train[:, select]
estimators = [LinearDiscriminantAnalysis() for _ in range(nb_mel_bins)]
y_train = q_spectrogram
logger.info('x_train: ' + str(x_train.shape))
logger.info('y_train: ' + str(y_train.shape))
# just in case there is still in difference in samples
minimum = min(len(x_train), len(y_train))
x_train = x_train[0:minimum, :]
y_train = y_train[0:minimum, :]
train_estimators(estimators=estimators, x_train=x_train, y_train=y_train)
return x_train, y_train, medians, estimators, select
def store_training_to_file(config, x_train, y_train, medians, estimators, bad_channels, select):
if config.getboolean('Training', 'draw_plots'):
# visualize train data
filename = '.'.join(['trainset', 'png'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
d_spectrogram = dequantize_spectrogram(y_train, medians)
visualize_train_data(x_train=x_train, y_train=d_spectrogram, filename=filename)
# save model parameters to file
filename = '.'.join(['LDAs', 'pkl'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
pickle.dump(estimators, open(filename, 'wb'))
# Store training features for activation plot
filename = '.'.join(['training_features', 'npy'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
np.save(filename, x_train)
# store model parameters
filename = '.'.join(['params', 'h5'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
with h5py.File(filename, 'w') as hf:
hf.create_dataset('bad_channels', data=bad_channels)
hf.create_dataset('medians_array', data=medians)
hf.create_dataset('estimators', data=np.void(pickle.dumps(estimators)))
hf.create_dataset('select', data=select)
# Save used config file
filename = '.'.join(['train', 'ini'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
with open(filename, 'w') as configfile:
config.write(configfile)
logger.info('Training configuration written to {}'.format(filename))
logger.info('Training completed.')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Train linear regression models on aligned neural and audio data.')
parser.add_argument('config', help='Path to config file.')
parser.add_argument('--file', help='Comma separated XDF files containing the sEEG data and time aligned audio.')
parser.add_argument('--session', help='Name of the Session.')
parser.add_argument('--storage_dir', help='Path to the storage_dir.')
parser.add_argument('--channels', help='Specify which channels should be used during training and decoding. '
'Accepts a list of regex impressions. The channels will be selected '
'if they match at least one expression. Each regex expression is '
'enclosed in ^EXPRESSION$ to limit its influence.')
args = parser.parse_args()
# initialize the config parser
if not os.path.exists(args.config):
print('WARNING: File path to the config file is invalid. Please specify a proper path. Script will exit!')
exit(1)
config = configparser.ConfigParser()
config.read(args.config)
# if optional script arguments change arguments set in config, update them
if args.file is not None:
config['Training']['file'] = args.file
if args.session is not None:
config['General']['session'] = args.session
if args.storage_dir is not None:
config['General']['storage_dir'] = args.storage_dir
if args.channels is not None:
config['Training']['channels'] = args.channels
xdf_files = config['Training']['file'].split(',')
# create the directory path for storing the session
session_dir = os.path.join(config['General']['storage_dir'], config['General']['session'])
try:
os.makedirs(session_dir, exist_ok=config['Training']['overwrite_on_rerun'] == 'True')
except FileExistsError:
print('The directory path "{}" could not be created, since it is already present and the parameter '
'"overwrite_on_rerun" in the "Training" section is set to False. '
'Script will exit!'.format(session_dir))
exit(1)
# initialize logging handler
log_file = '.'.join(['train', 'log'])
log_file = os.path.join(config['General']['storage_dir'], config['General']['session'],
|
quantization
|
identifier_name
|
train.py
|
0].coef_)))
for i, est in enumerate(estimators):
coeffs[i] = est.coef_
fig = plt.figure(figsize=(5.5, 5))
ax = fig.add_subplot(111)
m1 = ax.imshow(coeffs.T, aspect='auto', origin='lower')
ax.set_title('Visualization of the linear regression coefficients')
ax.set_xlabel('Linear regression models')
ax.set_ylabel('Coefficients')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(m1, cax=cax, orientation='vertical')
plt.tight_layout()
plt.savefig(filename, dpi=300)
@benchmark
def process_samples_in_network(eeg_sender, aud_sender):
"""
Computes the ECoG and LogMel features using the node based approach
"""
eeg_sender.start_processing()
aud_sender.start_processing()
eeg_sender.wait_for_completion()
aud_sender.wait_for_completion()
@benchmark
def quantization(y_train, nb_intervals=8):
"""
Quantize the logMel spectrogram
"""
medians, borders = compute_borders_logistic(y_train, nb_intervals=nb_intervals)
q_spectrogram = quantize_spectrogram(y_train, borders)
# print if a spec bin does not contain samples for a interval
for i in range(q_spectrogram.shape[1]):
diff = np.setdiff1d(np.arange(0, nb_intervals), q_spectrogram[:, i])
if diff.size > 0:
logger.info('Spec_bin "{}" misses samples for interval index/indices "{}"'.format(i, str(diff)))
return medians, borders, q_spectrogram
@benchmark
def feature_selection(x_train, y_train, nb_feats=150):
"""
Feature selection using correlation
"""
cs = np.zeros(x_train.shape[1])
for f in range(x_train.shape[1]):
if np.isclose(np.sum(x_train[:, f]), 0):
cs[f] = 0
continue
cs[f], p = spearmanr(x_train[:, f], np.mean(y_train, axis=1))
select = np.argsort(np.abs(cs))[np.max([-nb_feats, -len(cs)]):]
return select
@benchmark
def train_estimators(estimators, x_train, y_train):
for mel_bin in range(len(estimators)):
estimators[mel_bin].fit(x_train, y_train[:, mel_bin])
if (mel_bin + 1) % 5 == 0:
logger.info('{:02d} LDAs fitted so far.'.format(mel_bin + 1))
@benchmark
def compute_features(eeg, sfreq_eeg, audio, audio_sr):
|
def train(eeg, audio, sfreq_eeg, sfreq_audio, bad_channels, nb_mel_bins=40):
# exclude bad channels
if len(bad_channels) > 0:
logger.info('EEG original shape: {} x {}'.format(*eeg.shape))
mask = np.ones(eeg.shape[1], bool)
mask[bad_channels] = False
eeg = eeg[:, mask]
logger.info('EEG truncated shape: {} x {}'.format(*eeg.shape))
else:
logger.info('No bad channels specified.')
x_train, y_train = compute_features(eeg, sfreq_eeg, audio, sfreq_audio)
y_train = y_train[20:-4] # Skip 24 samples too align the neural signals to the audio. 20 frames are needed to
# first to have all context for one sample. In addition, the window length is 0.05 sec
# instead of 0.0016 as with the audio, resulting in 4 more frames. Cutting off in the
# beginning aligns the audio to the current frame.
# Quantize the logMel spectrogram
medians, borders, q_spectrogram = quantization(y_train, nb_intervals=9)
# Feature selection using correlation
select = feature_selection(x_train, y_train)
x_train = x_train[:, select]
estimators = [LinearDiscriminantAnalysis() for _ in range(nb_mel_bins)]
y_train = q_spectrogram
logger.info('x_train: ' + str(x_train.shape))
logger.info('y_train: ' + str(y_train.shape))
# just in case there is still in difference in samples
minimum = min(len(x_train), len(y_train))
x_train = x_train[0:minimum, :]
y_train = y_train[0:minimum, :]
train_estimators(estimators=estimators, x_train=x_train, y_train=y_train)
return x_train, y_train, medians, estimators, select
def store_training_to_file(config, x_train, y_train, medians, estimators, bad_channels, select):
if config.getboolean('Training', 'draw_plots'):
# visualize train data
filename = '.'.join(['trainset', 'png'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
d_spectrogram = dequantize_spectrogram(y_train, medians)
visualize_train_data(x_train=x_train, y_train=d_spectrogram, filename=filename)
# save model parameters to file
filename = '.'.join(['LDAs', 'pkl'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
pickle.dump(estimators, open(filename, 'wb'))
# Store training features for activation plot
filename = '.'.join(['training_features', 'npy'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
np.save(filename, x_train)
# store model parameters
filename = '.'.join(['params', 'h5'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
with h5py.File(filename, 'w') as hf:
hf.create_dataset('bad_channels', data=bad_channels)
hf.create_dataset('medians_array', data=medians)
hf.create_dataset('estimators', data=np.void(pickle.dumps(estimators)))
hf.create_dataset('select', data=select)
# Save used config file
filename = '.'.join(['train', 'ini'])
filename = os.path.join(config['General']['storage_dir'], config['General']['session'], filename)
with open(filename, 'w') as configfile:
config.write(configfile)
logger.info('Training configuration written to {}'.format(filename))
logger.info('Training completed.')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Train linear regression models on aligned neural and audio data.')
parser.add_argument('config', help='Path to config file.')
parser.add_argument('--file', help='Comma separated XDF files containing the sEEG data and time aligned audio.')
parser.add_argument('--session', help='Name of the Session.')
parser.add_argument('--storage_dir', help='Path to the storage_dir.')
parser.add_argument('--channels', help='Specify which channels should be used during training and decoding. '
'Accepts a list of regex impressions. The channels will be selected '
'if they match at least one expression. Each regex expression is '
'enclosed in ^EXPRESSION$ to limit its influence.')
args = parser.parse_args()
# initialize the config parser
if not os.path.exists(args.config):
print('WARNING: File path to the config file is invalid. Please specify a proper path. Script will exit!')
exit(1)
config = configparser.ConfigParser()
config.read(args.config)
# if optional script arguments change arguments set in config, update them
if args.file is not None:
config['Training']['file'] = args.file
if args.session is not None:
config['General']['session'] = args.session
if args.storage_dir is not None:
config['General']['storage_dir'] = args.storage_dir
if args.channels is not None:
config['Training']['channels'] = args.channels
xdf_files = config['Training']['file'].split(',')
# create the directory path for storing the session
session_dir = os.path.join(config['General']['storage_dir'], config['General']['session'])
try:
os.makedirs(session_dir, exist_ok=config['Training']['overwrite_on_rerun'] == 'True')
except FileExistsError:
print('The directory path "{}" could not be created, since it is already present and the parameter '
'"overwrite_on_rerun" in the "Training" section is set to False. '
'Script will exit!'.format(session_dir))
exit(1)
# initialize logging handler
log_file = '.'.join(['train', 'log'])
log_file = os.path.join(config['General']['storage_dir'], config['General']['session'], log
|
x_train = herff2016_b(eeg, sfreq_eeg, 0.05, 0.01)
# resample audio to 16kHz
audio = decimate(audio, 3)
audio_sr = 16000
y_train = compute_spectrogram(audio, audio_sr, 0.016, 0.01)
return x_train, y_train
|
identifier_body
|
mod.rs
|
// allocate some memory to write our instructions
let size = size * PAGE_SIZE;
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap();
let contents = unsafe {
let raw = alloc(layout);
write_bytes(raw, 0xc3, size);
libc::mprotect(raw as *mut libc::c_void, size, libc::PROT_NONE);
raw
};
Program { contents, size }
}
pub fn into_sliceable(self) -> SliceableProgram {
SliceableProgram::new(self)
}
pub fn into_callable(self) -> CallableProgram {
CallableProgram::new(self)
}
}
impl Drop for Program {
fn drop(&mut self) {
let layout = Layout::from_size_align(self.size, PAGE_SIZE).unwrap();
unsafe {
dealloc(self.contents, layout);
}
}
}
pub struct SliceableProgram {
program: Program,
}
impl SliceableProgram {
pub fn new(program: Program) -> Self {
unsafe {
libc::mprotect(
program.contents as *mut libc::c_void,
program.size,
libc::PROT_READ | libc::PROT_WRITE,
);
}
SliceableProgram { program }
}
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.program.contents, self.program.size) }
}
pub fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.program.contents, self.program.size) }
}
pub fn lock(self) -> Program {
unsafe {
libc::mprotect(
self.program.contents as *mut libc::c_void,
self.program.size,
libc::PROT_NONE,
);
}
self.program
}
}
pub struct CallableProgram {
program: Program,
}
impl CallableProgram {
pub fn new(program: Program) -> Self {
unsafe {
libc::mprotect(
program.contents as *mut libc::c_void,
program.size,
libc::PROT_READ | libc::PROT_EXEC,
);
}
CallableProgram { program }
}
pub fn as_function(
&mut self,
) -> unsafe extern "C" fn(
*mut u8,
*mut c_void,
*mut WriteWrapper,
*mut c_void,
*mut ReadWrapper,
) -> i32 {
unsafe { transmute(self.program.contents) }
}
pub fn lock(self) -> Program {
self.program
}
}
#[derive(Debug)]
struct JumpInfo {
asm_offset: usize,
target: usize,
}
pub fn
|
(instructions: &[Instruction]) -> Program {
// we'll emit something that respects x86_64 system-v:
// rdi (1st parameter): pointer to cell array
// rsi (2nd parameter): pointer to output function
// rdx (3rd parameter): pointer to WriteWrapper
// rcx (4th parameter): pointer to input function
// r8 (5th parameter): pointer to ReadWrapper
let program = Program::new(8);
let mut sliceable = program.into_sliceable();
let slice = sliceable.as_mut_slice();
let mut emitter = x86::Emitter::new(slice);
// we receive a stack that's misaligned by 8 bytes at the start of the function
// we always push on argument onto it and that aligns it :)
// move arguments to saved registers
// rsi -> rbp
// rdx -> r12
// rcx -> r13
// r8 -> r14
emitter.push(x86::Register::Rbp);
emitter.push(x86::Register::R12);
emitter.push(x86::Register::R13);
emitter.push(x86::Register::R14);
emitter.mov64_reg(x86::Register::Rbp, x86::Register::Rsi);
emitter.mov64_reg(x86::Register::R12, x86::Register::Rdx);
emitter.mov64_reg(x86::Register::R13, x86::Register::Rcx);
emitter.mov64_reg(x86::Register::R14, x86::Register::R8);
let mut jumps = BTreeMap::new();
for (idx, instr) in instructions.iter().enumerate() {
match instr {
Instruction::IncrementPointer(inc) => {
if inc.is_positive() {
emitter.addu8_reg(x86::Register::Rdi, *inc as u8);
} else if inc.is_negative() {
emitter.subu8_reg(x86::Register::Rdi, -*inc as u8);
}
}
Instruction::IncrementByte(inc) => {
if inc.is_positive() {
emitter.addu8_ptr(x86::Register::Rdi, *inc as u8);
} else if inc.is_negative() {
emitter.subu8_ptr(x86::Register::Rdi, -*inc as u8);
}
}
Instruction::IncrementPointerAndByte(pointer_inc, byte_inc) => {
if byte_inc.is_positive() {
emitter.addu8_ptr_u8disp(
x86::Register::Rdi,
*pointer_inc as u8,
*byte_inc as u8,
);
} else if byte_inc.is_negative() {
emitter.subu8_ptr_u8disp(
x86::Register::Rdi,
*pointer_inc as u8,
-*byte_inc as u8,
);
}
if pointer_inc.is_positive() {
emitter.addu8_reg(x86::Register::Rdi, *pointer_inc as u8);
} else if pointer_inc.is_negative() {
emitter.subu8_reg(x86::Register::Rdi, -*pointer_inc as u8);
}
}
// The way I've implemented jumps is terribly hacky. I should probably find a better solution someday
Instruction::JumpBackwardsIfNotZero(jmp) => {
emitter.cmpu8_ptr(x86::Register::Rdi, 0);
let jumpinfo = JumpInfo {
target: idx - jmp,
asm_offset: emitter.index,
};
jumps.insert(idx, jumpinfo);
// bogus temp value
emitter.jneu32(42);
}
Instruction::JumpForwardsIfZero(jmp) => {
emitter.cmpu8_ptr(x86::Register::Rdi, 0);
let jumpinfo = JumpInfo {
target: idx + jmp,
asm_offset: emitter.index,
};
jumps.insert(idx, jumpinfo);
// bogus temp value
emitter.jeu32(42);
}
Instruction::OutputByte => {
// move ptr to WriteWrapper to Rsi
emitter.mov64_reg(x86::Register::Rsi, x86::Register::R12);
emitter.push(x86::Register::Rdi);
emitter.call64(x86::Register::Rbp);
emitter.pop(x86::Register::Rdi);
}
Instruction::ReadByte => {
// move ptr to ReadWrapper to Rsi
emitter.mov64_reg(x86::Register::Rsi, x86::Register::R14);
emitter.push(x86::Register::Rdi);
emitter.call64(x86::Register::R13);
emitter.pop(x86::Register::Rdi);
}
}
}
emitter.pop(x86::Register::R14);
emitter.pop(x86::Register::R13);
emitter.pop(x86::Register::R12);
emitter.pop(x86::Register::Rbp);
for jumpinfo in jumps.values() {
let target = jumps.get(&jumpinfo.target).unwrap();
// this is kinda nuts, but I'll try to explain
// we encode jumps as x86 *near* (used to be short but brainfuck hates me) jumps
// which are *six* bytes: two opcodes and 7 bytes of offset from the NEXT INSTRUCTION (I think?)
// we do this indexing crazyness to rewrite our offset to our target's next instruction offset
// TODO: x86 jumps are hard. IIRC MIPS also does this. Check when I'm less sleepy and fix these comments
let offset = (target.asm_offset as isize) - (jumpinfo.asm_offset as isize);
let le_bytes = i32::try_from(offset)
.expect("offset overflowed i32")
.to_le_bytes();
slice[jumpinfo.asm_offset + 2] = le_bytes[0];
slice[jumpinfo.asm_offset + 3] = le_bytes[1];
slice[jumpinfo.asm_offset + 4] = le_bytes[2];
slice[jumpinfo.asm_offset + 5] = le_bytes[3];
}
sliceable.lock()
}
unsafe extern "C" fn write_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut WriteWrapper) {
let wrapper = &*wrapper_ptr;
let output = &mut *wrapper.write;
let byte = *byte_ptr;
output.write_all(&[byte]).unwrap();
}
unsafe extern "C" fn read_trampoline(byte_ptr: *mut u8, wrapper_ptr:
|
transform
|
identifier_name
|
mod.rs
|
allocate some memory to write our instructions
let size = size * PAGE_SIZE;
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap();
let contents = unsafe {
let raw = alloc(layout);
write_bytes(raw, 0xc3, size);
libc::mprotect(raw as *mut libc::c_void, size, libc::PROT_NONE);
raw
};
Program { contents, size }
}
pub fn into_sliceable(self) -> SliceableProgram {
SliceableProgram::new(self)
}
pub fn into_callable(self) -> CallableProgram {
CallableProgram::new(self)
}
}
impl Drop for Program {
fn drop(&mut self) {
let layout = Layout::from_size_align(self.size, PAGE_SIZE).unwrap();
unsafe {
dealloc(self.contents, layout);
}
}
}
pub struct SliceableProgram {
program: Program,
}
impl SliceableProgram {
pub fn new(program: Program) -> Self {
unsafe {
libc::mprotect(
program.contents as *mut libc::c_void,
program.size,
libc::PROT_READ | libc::PROT_WRITE,
);
}
SliceableProgram { program }
}
pub fn as_slice(&self) -> &[u8]
|
pub fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.program.contents, self.program.size) }
}
pub fn lock(self) -> Program {
unsafe {
libc::mprotect(
self.program.contents as *mut libc::c_void,
self.program.size,
libc::PROT_NONE,
);
}
self.program
}
}
pub struct CallableProgram {
program: Program,
}
impl CallableProgram {
pub fn new(program: Program) -> Self {
unsafe {
libc::mprotect(
program.contents as *mut libc::c_void,
program.size,
libc::PROT_READ | libc::PROT_EXEC,
);
}
CallableProgram { program }
}
pub fn as_function(
&mut self,
) -> unsafe extern "C" fn(
*mut u8,
*mut c_void,
*mut WriteWrapper,
*mut c_void,
*mut ReadWrapper,
) -> i32 {
unsafe { transmute(self.program.contents) }
}
pub fn lock(self) -> Program {
self.program
}
}
#[derive(Debug)]
struct JumpInfo {
asm_offset: usize,
target: usize,
}
pub fn transform(instructions: &[Instruction]) -> Program {
// we'll emit something that respects x86_64 system-v:
// rdi (1st parameter): pointer to cell array
// rsi (2nd parameter): pointer to output function
// rdx (3rd parameter): pointer to WriteWrapper
// rcx (4th parameter): pointer to input function
// r8 (5th parameter): pointer to ReadWrapper
let program = Program::new(8);
let mut sliceable = program.into_sliceable();
let slice = sliceable.as_mut_slice();
let mut emitter = x86::Emitter::new(slice);
// we receive a stack that's misaligned by 8 bytes at the start of the function
// we always push on argument onto it and that aligns it :)
// move arguments to saved registers
// rsi -> rbp
// rdx -> r12
// rcx -> r13
// r8 -> r14
emitter.push(x86::Register::Rbp);
emitter.push(x86::Register::R12);
emitter.push(x86::Register::R13);
emitter.push(x86::Register::R14);
emitter.mov64_reg(x86::Register::Rbp, x86::Register::Rsi);
emitter.mov64_reg(x86::Register::R12, x86::Register::Rdx);
emitter.mov64_reg(x86::Register::R13, x86::Register::Rcx);
emitter.mov64_reg(x86::Register::R14, x86::Register::R8);
let mut jumps = BTreeMap::new();
for (idx, instr) in instructions.iter().enumerate() {
match instr {
Instruction::IncrementPointer(inc) => {
if inc.is_positive() {
emitter.addu8_reg(x86::Register::Rdi, *inc as u8);
} else if inc.is_negative() {
emitter.subu8_reg(x86::Register::Rdi, -*inc as u8);
}
}
Instruction::IncrementByte(inc) => {
if inc.is_positive() {
emitter.addu8_ptr(x86::Register::Rdi, *inc as u8);
} else if inc.is_negative() {
emitter.subu8_ptr(x86::Register::Rdi, -*inc as u8);
}
}
Instruction::IncrementPointerAndByte(pointer_inc, byte_inc) => {
if byte_inc.is_positive() {
emitter.addu8_ptr_u8disp(
x86::Register::Rdi,
*pointer_inc as u8,
*byte_inc as u8,
);
} else if byte_inc.is_negative() {
emitter.subu8_ptr_u8disp(
x86::Register::Rdi,
*pointer_inc as u8,
-*byte_inc as u8,
);
}
if pointer_inc.is_positive() {
emitter.addu8_reg(x86::Register::Rdi, *pointer_inc as u8);
} else if pointer_inc.is_negative() {
emitter.subu8_reg(x86::Register::Rdi, -*pointer_inc as u8);
}
}
// The way I've implemented jumps is terribly hacky. I should probably find a better solution someday
Instruction::JumpBackwardsIfNotZero(jmp) => {
emitter.cmpu8_ptr(x86::Register::Rdi, 0);
let jumpinfo = JumpInfo {
target: idx - jmp,
asm_offset: emitter.index,
};
jumps.insert(idx, jumpinfo);
// bogus temp value
emitter.jneu32(42);
}
Instruction::JumpForwardsIfZero(jmp) => {
emitter.cmpu8_ptr(x86::Register::Rdi, 0);
let jumpinfo = JumpInfo {
target: idx + jmp,
asm_offset: emitter.index,
};
jumps.insert(idx, jumpinfo);
// bogus temp value
emitter.jeu32(42);
}
Instruction::OutputByte => {
// move ptr to WriteWrapper to Rsi
emitter.mov64_reg(x86::Register::Rsi, x86::Register::R12);
emitter.push(x86::Register::Rdi);
emitter.call64(x86::Register::Rbp);
emitter.pop(x86::Register::Rdi);
}
Instruction::ReadByte => {
// move ptr to ReadWrapper to Rsi
emitter.mov64_reg(x86::Register::Rsi, x86::Register::R14);
emitter.push(x86::Register::Rdi);
emitter.call64(x86::Register::R13);
emitter.pop(x86::Register::Rdi);
}
}
}
emitter.pop(x86::Register::R14);
emitter.pop(x86::Register::R13);
emitter.pop(x86::Register::R12);
emitter.pop(x86::Register::Rbp);
for jumpinfo in jumps.values() {
let target = jumps.get(&jumpinfo.target).unwrap();
// this is kinda nuts, but I'll try to explain
// we encode jumps as x86 *near* (used to be short but brainfuck hates me) jumps
// which are *six* bytes: two opcodes and 7 bytes of offset from the NEXT INSTRUCTION (I think?)
// we do this indexing crazyness to rewrite our offset to our target's next instruction offset
// TODO: x86 jumps are hard. IIRC MIPS also does this. Check when I'm less sleepy and fix these comments
let offset = (target.asm_offset as isize) - (jumpinfo.asm_offset as isize);
let le_bytes = i32::try_from(offset)
.expect("offset overflowed i32")
.to_le_bytes();
slice[jumpinfo.asm_offset + 2] = le_bytes[0];
slice[jumpinfo.asm_offset + 3] = le_bytes[1];
slice[jumpinfo.asm_offset + 4] = le_bytes[2];
slice[jumpinfo.asm_offset + 5] = le_bytes[3];
}
sliceable.lock()
}
unsafe extern "C" fn write_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut WriteWrapper) {
let wrapper = &*wrapper_ptr;
let output = &mut *wrapper.write;
let byte = *byte_ptr;
output.write_all(&[byte]).unwrap();
}
unsafe extern "C" fn read_trampoline(byte_ptr: *mut u8, wrapper_ptr:
|
{
unsafe { slice::from_raw_parts(self.program.contents, self.program.size) }
}
|
identifier_body
|
mod.rs
|
// allocate some memory to write our instructions
let size = size * PAGE_SIZE;
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap();
let contents = unsafe {
let raw = alloc(layout);
write_bytes(raw, 0xc3, size);
libc::mprotect(raw as *mut libc::c_void, size, libc::PROT_NONE);
raw
};
Program { contents, size }
}
pub fn into_sliceable(self) -> SliceableProgram {
SliceableProgram::new(self)
}
pub fn into_callable(self) -> CallableProgram {
CallableProgram::new(self)
}
}
impl Drop for Program {
fn drop(&mut self) {
let layout = Layout::from_size_align(self.size, PAGE_SIZE).unwrap();
unsafe {
dealloc(self.contents, layout);
}
}
}
pub struct SliceableProgram {
program: Program,
}
impl SliceableProgram {
pub fn new(program: Program) -> Self {
unsafe {
libc::mprotect(
program.contents as *mut libc::c_void,
program.size,
libc::PROT_READ | libc::PROT_WRITE,
);
}
SliceableProgram { program }
}
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.program.contents, self.program.size) }
|
unsafe { slice::from_raw_parts_mut(self.program.contents, self.program.size) }
}
pub fn lock(self) -> Program {
unsafe {
libc::mprotect(
self.program.contents as *mut libc::c_void,
self.program.size,
libc::PROT_NONE,
);
}
self.program
}
}
pub struct CallableProgram {
program: Program,
}
impl CallableProgram {
pub fn new(program: Program) -> Self {
unsafe {
libc::mprotect(
program.contents as *mut libc::c_void,
program.size,
libc::PROT_READ | libc::PROT_EXEC,
);
}
CallableProgram { program }
}
pub fn as_function(
&mut self,
) -> unsafe extern "C" fn(
*mut u8,
*mut c_void,
*mut WriteWrapper,
*mut c_void,
*mut ReadWrapper,
) -> i32 {
unsafe { transmute(self.program.contents) }
}
pub fn lock(self) -> Program {
self.program
}
}
#[derive(Debug)]
struct JumpInfo {
asm_offset: usize,
target: usize,
}
pub fn transform(instructions: &[Instruction]) -> Program {
// we'll emit something that respects x86_64 system-v:
// rdi (1st parameter): pointer to cell array
// rsi (2nd parameter): pointer to output function
// rdx (3rd parameter): pointer to WriteWrapper
// rcx (4th parameter): pointer to input function
// r8 (5th parameter): pointer to ReadWrapper
let program = Program::new(8);
let mut sliceable = program.into_sliceable();
let slice = sliceable.as_mut_slice();
let mut emitter = x86::Emitter::new(slice);
// we receive a stack that's misaligned by 8 bytes at the start of the function
// we always push on argument onto it and that aligns it :)
// move arguments to saved registers
// rsi -> rbp
// rdx -> r12
// rcx -> r13
// r8 -> r14
emitter.push(x86::Register::Rbp);
emitter.push(x86::Register::R12);
emitter.push(x86::Register::R13);
emitter.push(x86::Register::R14);
emitter.mov64_reg(x86::Register::Rbp, x86::Register::Rsi);
emitter.mov64_reg(x86::Register::R12, x86::Register::Rdx);
emitter.mov64_reg(x86::Register::R13, x86::Register::Rcx);
emitter.mov64_reg(x86::Register::R14, x86::Register::R8);
let mut jumps = BTreeMap::new();
for (idx, instr) in instructions.iter().enumerate() {
match instr {
Instruction::IncrementPointer(inc) => {
if inc.is_positive() {
emitter.addu8_reg(x86::Register::Rdi, *inc as u8);
} else if inc.is_negative() {
emitter.subu8_reg(x86::Register::Rdi, -*inc as u8);
}
}
Instruction::IncrementByte(inc) => {
if inc.is_positive() {
emitter.addu8_ptr(x86::Register::Rdi, *inc as u8);
} else if inc.is_negative() {
emitter.subu8_ptr(x86::Register::Rdi, -*inc as u8);
}
}
Instruction::IncrementPointerAndByte(pointer_inc, byte_inc) => {
if byte_inc.is_positive() {
emitter.addu8_ptr_u8disp(
x86::Register::Rdi,
*pointer_inc as u8,
*byte_inc as u8,
);
} else if byte_inc.is_negative() {
emitter.subu8_ptr_u8disp(
x86::Register::Rdi,
*pointer_inc as u8,
-*byte_inc as u8,
);
}
if pointer_inc.is_positive() {
emitter.addu8_reg(x86::Register::Rdi, *pointer_inc as u8);
} else if pointer_inc.is_negative() {
emitter.subu8_reg(x86::Register::Rdi, -*pointer_inc as u8);
}
}
// The way I've implemented jumps is terribly hacky. I should probably find a better solution someday
Instruction::JumpBackwardsIfNotZero(jmp) => {
emitter.cmpu8_ptr(x86::Register::Rdi, 0);
let jumpinfo = JumpInfo {
target: idx - jmp,
asm_offset: emitter.index,
};
jumps.insert(idx, jumpinfo);
// bogus temp value
emitter.jneu32(42);
}
Instruction::JumpForwardsIfZero(jmp) => {
emitter.cmpu8_ptr(x86::Register::Rdi, 0);
let jumpinfo = JumpInfo {
target: idx + jmp,
asm_offset: emitter.index,
};
jumps.insert(idx, jumpinfo);
// bogus temp value
emitter.jeu32(42);
}
Instruction::OutputByte => {
// move ptr to WriteWrapper to Rsi
emitter.mov64_reg(x86::Register::Rsi, x86::Register::R12);
emitter.push(x86::Register::Rdi);
emitter.call64(x86::Register::Rbp);
emitter.pop(x86::Register::Rdi);
}
Instruction::ReadByte => {
// move ptr to ReadWrapper to Rsi
emitter.mov64_reg(x86::Register::Rsi, x86::Register::R14);
emitter.push(x86::Register::Rdi);
emitter.call64(x86::Register::R13);
emitter.pop(x86::Register::Rdi);
}
}
}
emitter.pop(x86::Register::R14);
emitter.pop(x86::Register::R13);
emitter.pop(x86::Register::R12);
emitter.pop(x86::Register::Rbp);
for jumpinfo in jumps.values() {
let target = jumps.get(&jumpinfo.target).unwrap();
// this is kinda nuts, but I'll try to explain
// we encode jumps as x86 *near* (used to be short but brainfuck hates me) jumps
// which are *six* bytes: two opcodes and 7 bytes of offset from the NEXT INSTRUCTION (I think?)
// we do this indexing crazyness to rewrite our offset to our target's next instruction offset
// TODO: x86 jumps are hard. IIRC MIPS also does this. Check when I'm less sleepy and fix these comments
let offset = (target.asm_offset as isize) - (jumpinfo.asm_offset as isize);
let le_bytes = i32::try_from(offset)
.expect("offset overflowed i32")
.to_le_bytes();
slice[jumpinfo.asm_offset + 2] = le_bytes[0];
slice[jumpinfo.asm_offset + 3] = le_bytes[1];
slice[jumpinfo.asm_offset + 4] = le_bytes[2];
slice[jumpinfo.asm_offset + 5] = le_bytes[3];
}
sliceable.lock()
}
unsafe extern "C" fn write_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut WriteWrapper) {
let wrapper = &*wrapper_ptr;
let output = &mut *wrapper.write;
let byte = *byte_ptr;
output.write_all(&[byte]).unwrap();
}
unsafe extern "C" fn read_trampoline(byte_ptr: *mut u8, wrapper_ptr: *
|
}
pub fn as_mut_slice(&mut self) -> &mut [u8] {
|
random_line_split
|
csv_orders_with_feedback.py
|
:return:
"""
# self.logger.debug('get tick data: %s', md_dic)
symbol = md_dic['symbol']
# 更新最新价格
close_cur = md_dic['close']
self.symbol_latest_price_dic[symbol] = close_cur
# 计算是否需要进行调仓操作
if self.symbol_target_position_dic is None or symbol not in self.symbol_target_position_dic:
# self.logger.debug("当前 symbol='%s' 无操作", symbol)
return
if self.datetime_last_update_position is None:
self.logger.debug("尚未获取持仓数据,跳过")
return
target_currency = self.trade_agent.get_currency(symbol)
# self.logger.debug('target_position_dic[%s]: %s', symbol, self.target_position_dic[symbol])
# 如果的当前合约近期存在交易回报,则交易回报时间一定要小于查询持仓时间:
# 防止出现以及成交单持仓信息未及时更新导致的数据不同步问题
if symbol in self.datetime_last_rtn_trade_dic:
if target_currency not in self.datetime_last_update_position_dic:
logging.debug("持仓数据中没有包含当前合约,最近一次成交回报时间:%s,跳过",
self.datetime_last_rtn_trade_dic[symbol])
self.get_position(symbol, force_refresh=True)
return
if self.datetime_last_rtn_trade_dic[symbol] > self.datetime_last_update_position_dic[target_currency]:
logging.debug("持仓数据尚未更新完成,最近一次成交回报时间:%s 晚于 最近一次持仓更新时间:%s",
self.datetime_last_rtn_trade_dic[symbol],
self.datetime_last_update_position_dic[target_currency])
self.get_position(symbol, force_refresh=True)
return
# 过于密集执行可能会导致重复下单的问题
if symbol in self.symbol_last_deal_datetime:
last_deal_datetime = self.symbol_last_deal_datetime[symbol]
if last_deal_datetime + self.timedelta_between_deal > datetime.now():
# logging.debug("最近一次交易时间:%s,防止交易密度过大,跳过", last_deal_datetime)
return
with self._mutex:
target_position = self.symbol_target_position_dic[symbol]
target_position.check_stop_loss(close_cur)
# self.logger.debug("当前持仓目标:%r", target_position)
# 撤销所有相关订单
self.cancel_order(symbol)
# 计算目标仓位方向及交易数量
position_date_pos_info_dic = self.get_position(symbol)
if position_date_pos_info_dic is None:
# 无当前持仓,有目标仓位,直接按照目标仓位进行开仓动作
# target_direction, target_currency, target_position, symbol, target_price, \
# stop_loss_price, has_stop_loss, gap_threshold_vol = self.get_target_position(symbol)
if not target_position.has_stop_loss:
self.do_order(md_dic, symbol, target_position.position, target_position.price,
target_position.direction, target_position.stop_loss_price, msg='当前无持仓')
else:
# 如果当前有持仓,执行两类动作:
# 1)若 当前持仓与目标持仓不匹配,则进行相应的调仓操作
# 2)若 当前持仓价格超出止损价位,则进行清仓操作
position_holding = sum(
[pos_info_dic['balance'] for pos_info_dic in position_date_pos_info_dic.values()])
self.logger.debug('当前 %s 持仓 %f 价格 %.6f', target_position.currency, position_holding, close_cur)
# 比较当前持仓总量与目标仓位是否一致
# 如果当前有持仓,目标仓位也有持仓,则需要进一步比对
# target_direction, target_currency, target_position, symbol, target_price, \
# stop_loss_price, has_stop_loss, gap_threshold_vol = self.get_target_position(symbol)
if target_position.has_stop_loss:
# 已经触发止损,如果依然有持仓,则进行持续清仓操作
self.do_order(md_dic, symbol, -position_holding, None,
target_position.direction, msg="止损")
else:
# 汇总全部同方向持仓,如果不够目标仓位,则加仓
# 对全部的反方向持仓进行平仓
# 如果持仓超过目标仓位,则平仓多出的部分,如果不足则补充多的部分
position_gap = target_position.position - position_holding
if position_gap > target_position.gap_threshold_vol:
if position_holding < target_position.gap_threshold_vol:
msg = '建仓'
else:
msg = "补充仓位"
# 如果不足则补充多的部分
self.do_order(md_dic, symbol, position_gap, target_position.price,
target_position.direction, target_position.stop_loss_price, msg=msg)
elif position_gap < - target_position.gap_threshold_vol:
if target_position.position == 0:
msg = '清仓'
else:
msg = "持仓超过目标仓位,减仓 %.4f" % position_gap
# 如果持仓超过目标仓位,则平仓多出的部分
self.do_order(md_dic, symbol, position_gap, target_position.price,
target_position.direction, target_position.stop_loss_price, msg=msg)
else:
self.logger.debug('当前持仓 %f 与目标持仓%f 差距 %f 过小,忽略此调整',
position_holding, target_position.position, position_gap)
# 更新最近执行时间
self.symbol_last_deal_datetime[symbol] = datetime.now()
def get_symbol_by_currency(self, currency):
"""目前暂时仅支持currency 与 usdt 之间转换"""
return currency + 'usdt'
def calc_vol_and_stop_loss_price(self, symbol, weight, stop_loss_rate=None, gap_threshold_precision=0.01):
"""
根据权重及当前账号总市值,计算当前 symbol 对应多少 vol, 根据 stop_loss_rate 计算止损价格(目前仅考虑做多的情况)
:param symbol:
:param weight:
:param stop_loss_rate:
:param gap_threshold_precision:
:return:
"""
holding_currency_dic = self.get_holding_currency(exclude_usdt=False)
# tot_value = sum([dic['balance'] * self.symbol_latest_price_dic[self.get_symbol_by_currency(currency)]
# for currency, dic in holding_currency_dic.items()])
if symbol not in self.symbol_latest_price_dic or self.symbol_latest_price_dic[symbol] == 0:
self.logger.error('%s 没有找到有效的最新价格', symbol)
weight_vol = None
gap_threshold_vol = None
stop_loss_price = None
else:
tot_value = 0
for currency, dic in holding_currency_dic.items():
for pos_date_type, dic_sub in dic.items():
if currency == 'usdt':
tot_value += dic_sub['balance']
else:
tot_value += dic_sub['balance'] * self.symbol_latest_price_dic[
self.get_symbol_by_currency(currency)]
price_latest = self.symbol_latest_price_dic[symbol]
weight_vol = tot_value * weight / price_latest
gap_threshold_vol = tot_value * gap_threshold_precision / price_latest
stop_loss_price = price_latest * (1 + stop_loss_rate)
return weight_vol, gap_threshold_vol, stop_loss_price
def get_target_position(self, symbol):
dic = self.symbol_target_position_dic[symbol]
return dic['direction'], dic['currency'], dic['position'], dic['symbol'], \
dic['price'], dic['stop_loss_price'], dic.setdefault('has_stop_loss', False), \
dic.setdefault('gap_threshold_vol', None)
def backup_feedback_files(self):
"""
将所有的 feedback 文件备份
:return:
"""
# 获取文件列表
file_name_list = os.listdir(self._folder_path)
if file_name_list is None:
# self.logger.info('No file')
return
for file_name in file_name_list:
# 仅处理 feedback*.csv文件
if PATTERN_FEEDBACK_FILE_NAME.search(file_name) is None:
continue
file_base_name, file_extension = os.path.splitext(file_name)
file_path = os.path.join(self._folder_path, file_name)
# 文件备份
backup_file_name = f"{file_base_name} {datetime.now().strftime('%Y-%m-%d %H_%M_%S')}" \
f"{file_extension}.bak"
os.rename(file_path, os.path.join(self._folder_path, backup_file_name))
self.logger.info('备份 Feedback 文件 %s -> %s', file_name, backup_file_name)
def create_feedback_file(self):
"""
根据 symbol_target_position_dic 创建 feedback 文件
:return:
"""
symbol_target_position_dic = self.symbol_target_position_dic
data_dic = {}
for key, val in symbol_target_position_dic.items():
val_dic = val.to_dict()
val_dic['direction'] = int(val_dic['direction'])
data_dic[key] = val_dic
file_name = f"feedback_{datetime.now().strftime('%Y-%m-%d %H_%M_%S')}.json"
file_path = os.path.join(self._folder_path, file_name)
with open(file_path, 'w') as file:
json.dump(data_dic, file)
self.logger.info('生产 feedback 文件:%s', file_name)
return file_path
def load_feedback_file(self):
"""
加载 feedback 文件,更新 self.symbol_target_position_dic
:return:
"""
|
random_line_split
|
||
csv_orders_with_feedback.py
|
target_currency_set = set(list(position_df['currency']))
holding_currency_dic = self.get_holding_currency()
# 检查是否所有持仓符合目标配置文件要求
is_all_fit_target = True
# 如果当前 currency 不在目标持仓列表里面,则卖出
for num, (currency, balance_dic) in enumerate(holding_currency_dic.items(), start=1):
# currency 在目标持仓中,无需清仓
if currency in target_currency_set:
continue
# hc 为 货币交易所的一种手续费代币工具,不做交易使用
# if currency == 'hc':
# continue
# 若持仓余额 小于 0.0001 则放弃清仓
tot_balance = 0
for _, dic in balance_dic.items():
tot_balance += dic['balance']
if tot_balance < 0.0001:
continue
symbol = self.get_symbol_by_currency(currency)
if symbol_list is not None and symbol not in symbol_list:
self.logger.warning('%s 持仓: %.6f 不在当前订阅列表中,也不在目标持仓中,该持仓将不会被操作',
symbol, tot_balance)
continue
self.logger.info('计划卖出 %s', symbol)
# TODO: 最小下单量在数据库中有对应信息,有待改进
gap_threshold_vol = 0.1
symbol_target_position_dic[symbol] = TargetPosition(Direction.Long, currency, 0, symbol,
gap_threshold_vol=gap_threshold_vol)
is_all_fit_target = False
# 生成目标持仓列表买入指令
for num, (currency, position_dic) in enumerate(target_holding_dic.items()):
weight = position_dic['weight']
stop_loss_rate = position_dic['stop_loss_rate']
# stop_loss_price = position_dic['stop_loss_rate']
symbol = self.get_symbol_by_currency(currency)
target_vol, gap_threshold_vol, stop_loss_price = self.calc_vol_and_stop_loss_price(symbol, weight, stop_loss_rate)
if target_vol is None:
self.logger.warning('%s 持仓权重 %.2f %% 无法计算目标持仓量', currency, weight * 100)
continue
# 检查当前持仓是否与目标持仓一致,如果一致则跳过
# position_date_pos_info_dic = self.get_position(symbol)
# if position_date_pos_info_dic is not None and len(position_date_pos_info_dic) > 0:
# # 有持仓,比较是否满足目标仓位,否则下指令
# position_cur = sum([pos_info['balance'] for pos_info in position_date_pos_info_dic.values()])
# position_gap = target_vol - position_cur
# # 实盘情况下,很少绝对一致,在一定区间内即可
# if position_gap > gap_threshold_vol:
# # 当前合约累计持仓与目标持仓不一致,则添加目标持仓任务
# is_all_fit_target = False
# else:
# is_all_fit_target = False
# 无论仓位是否存在,均生成交易指令,待交易执行阶段进行比较(以上代码不影响是否生产建仓指令)
# 多头目标持仓
self.logger.info('计划买入 %s 目标仓位:%f 止损价:%f', symbol, target_vol, stop_loss_price)
symbol_target_position_dic[symbol] = TargetPosition(Direction.Long, currency, target_vol, symbol,
None, stop_loss_price,
gap_threshold_vol=gap_threshold_vol)
symbol_target_position_dic_len = len(symbol_target_position_dic)
if symbol_target_position_dic_len > 0:
self.symbol_target_position_dic = symbol_target_position_dic
self.logger.info('发现新的目标持仓指令:')
self.logger_symbol_target_position_dic()
# 生成 feedback 文件
self.create_feedback_file()
else:
self.symbol_target_position_dic = None
self.logger.debug('无仓位调整指令')
def logger_symbol_target_position_dic(self):
"""
展示当前目标持仓信息
:return:
"""
symbol_target_position_dic_len = len(self.symbol_target_position_dic)
for num, (key, val) in enumerate(self.symbol_target_position_dic.items()):
self.logger.info('%d/%d) %s, %r', num, symbol_target_position_dic_len, key, val)
def on_timer(self):
"""
每15秒进行一次文件检查
1)检查王淳的回测文件,匹配最新日期 "TradeBookCryptoCurrency2018-10-08.csv" 中的日期是否与系统日期一致,如果一致则处理,生成“交易指令文件”
2)生成相应目标仓位文件 order_2018-10-08.csv
:param md_df:
:param context:
:return:
"""
self.get_balance()
self.handle_backtest_file()
self.handle_order_file()
def do_order(self, md_dic, instrument_id, order_vol, price=None, direction=Direction.Long, stop_loss_price=0,
msg=""):
# if True:
# self.logger.info("%s %s %f 价格 %f [%s]",
# instrument_id, '买入' if position > 0 else '卖出', position, price, msg)
# return
# position == 0 则代表无需操作
# 执行交易
if direction == Direction.Long:
if order_vol == 0:
return
elif order_vol > 0:
if price is None or price == 0:
price = md_dic['close']
# TODO: 稍后按盘口卖一档价格挂单
# if DEBUG:
# # debug 模式下,价格不要真实成交,只要看一下有委托单就可以了
# price /= 2
if stop_loss_price is not None and stop_loss_price > 0 and price <= stop_loss_price:
self.logger.warning('%s 当前价格 %.6f 已经触发止损价 %.6f 停止买入操作',
instrument_id, price, stop_loss_price)
return
self.open_long(instrument_id, price, order_vol)
self.logger.info("%s %s -> 开多 %.4f 价格:%.4f", instrument_id, msg, order_vol, price)
elif order_vol < 0:
if price is None or price == 0:
price = md_dic['close']
# TODO: 稍后按盘口卖一档价格挂单
# if DEBUG:
# # debug 模式下,价格不要真实成交,只要看一下有委托单就可以了
# price += price
order_vol_net = -order_vol
self.close_long(instrument_id, price, order_vol_net)
self.logger.info("%s %s -> 平多 %.4f 价格:%.4f", instrument_id, msg, order_vol_net, price)
else:
raise ValueError('目前不支持做空')
self.instrument_lastest_order_datetime_dic[instrument_id] = datetime.now()
def on_tick(self, md_dic, context):
"""
tick 级数据进行交易操作
:param md_dic:
:param context:
:return:
"""
# self.logger.debug('get tick data: %s', md_dic)
symbol = md_dic['symbol']
# 更新最新价格
close_cur = md_dic['close']
self.symbol_latest_price_dic[symbol] = close_cur
# 计算是否需要进行调仓操作
if self.symbol_target_position_dic is None or symbol not in self.symbol_target_position_dic:
# self.logger.debug("当前 symbol='%s' 无操作", symbol)
return
if self.datetime_last_update_position is None:
self.logger.debug("尚未获取持仓数据,跳过")
return
target_currency = self.trade_agent.get_currency(symbol)
# self.logger.debug('target_position_dic[%s]: %s', symbol, self.target_position_dic[symbol])
# 如果的当前合约近期存在交易回报,则交易回报时间一定要小于查询持仓时间:
# 防止出现以及成交单持仓信息未及时更新导致的数据不同步问题
if symbol in self.datetime_last_rtn_trade_dic:
if target_currency not in self.datetime_last_update_positio
|
n_dic:
logging.debug("持仓数据中没有包含当前合约,最近一次成交回报时间:%s,跳过",
self.datetime_last_rtn_trade_dic[symbol])
self.get_position(symbol, force_refresh=True)
return
if self.datetime_last_rtn_trade_dic[symbol] > self.datetime_last_update_position_dic[target_currency]:
logging.debug("持仓数据尚未更新完成,最近一次成交回报时间:%s 晚于 最近一次持仓更新时间:%s",
self.datetime_last_rtn_trade_dic[symbol],
self.datetime_last_update_position_dic[target_currency])
self.get_position(symbol, force_refresh=True)
return
# 过于密集执行可能会导致重复下单的问题
if symbol in self.symbol_last_deal_datetime:
last_deal_datetime = self.symbol_last_deal_datetime[symbol]
if last_deal_datetime + self.timedelta_between_deal > datetime.now():
# logging.debug("最近一次交易时间:%s,防止交易密度过大,跳过", last_deal_datetime)
return
with self._mutex:
|
identifier_body
|
|
csv_orders_with_feedback.py
|
hold_vol
def check_stop_loss(self, close):
"""
根据当前价格计算是否已经到达止损点位
如果此前已经到达过止损点位则不再比较,也不需重置状态
:param close:
:return:
"""
# 如果此前已经到达过止损点位则不再比较,也不需重置状态
if self.stop_loss_price is None or self.has_stop_loss:
return
self.has_stop_loss = (self.direction == Direction.Long and close < self.stop_loss_price) or (
self.direction == Direction.Short and close > self.stop_loss_price)
if self.has_stop_loss:
logging.warning('%s 处于止损状态。止损价格 %f 当前价格 %f', self.symbol, self.stop_loss_price, close)
def get_target_position(self):
return self.direction, self.currency, self.position, self.symbol, \
self.price, self.stop_loss_price, self.has_stop_loss, \
self.gap_threshold_vol
def to_dict(self):
return {attr: getattr(self, attr) for attr in dir(self)
if attr.find('_') != 0 and not callable(getattr(self, attr))}
def __repr__(self):
return f'<TargetPosition(symbol={self.symbol}, direction={int(self.direction)}, ' \
f'position={self.position}, price={self.price}, stop_loss_price={self.stop_loss_price}, ' \
f'has_stop_loss={self.has_stop_loss}, gap_threshold_vol={self.gap_threshold_vol})>'
class ReadFileStg(StgBase):
_folder_path = os.path.abspath(os.path.join(os.path.curdir, r'file_order'))
def __init__(self, symbol_list=None):
super().__init__()
self.symbol_list = symbol_list
self._mutex = threading.Lock()
self._last_check_datetime = datetime.now() - timedelta(minutes=1)
self.interval_timedelta = timedelta(seconds=15)
self.symbol_target_position_dic = {}
# 设定相应周期的事件驱动句柄 接收的参数类型
self._on_period_event_dic[PeriodType.Tick].param_type = dict
# 记录合约最近一次执行操作的时间
self.symbol_last_deal_datetime = {}
# 记录合约最近一个发送买卖请求的时间
self.instrument_lastest_order_datetime_dic = {}
# 目前由于交易是异步执行,在尚未记录每一笔订单的情况下,时间太短可能会导致仓位与请求但出现不同步现象,导致下单过多的问题
self.timedelta_between_deal = timedelta(seconds=3)
self.min_order_vol = 0.1
self.symbol_latest_price_dic = defaultdict(float)
self.weight = 1 if not DEBUG else 0.2 # 默认仓位权重
self.stop_loss_rate = -0.03
self.logger.info('接受订单文件目录:%s', self._folder_path)
self.load_feedback_file()
def fetch_pos_by_file(self):
"""读取仓位配置csv文件,返回目标仓位DataFrame"""
# 检查最近一次文件检查的时间,避免重复查询
if self._last_check_datetime + self.interval_timedelta > datetime.now():
return
# 获取文件列表
file_name_list = os.listdir(self._folder_path)
if file_name_list is None:
# self.logger.info('No file')
return
# 读取所有 csv 文件
position_df = None
file_path_list = []
for file_name in file_name_list:
# 仅处理 order*.csv文件
if PATTERN_ORDER_FILE_NAME.search(file_name) is None:
continue
self.logger.debug('处理文件 order 文件: %s', file_name)
file_base_name, file_extension = os.path.splitext(file_name)
file_path = os.path.join(self._folder_path, file_name)
file_path_list.append(file_path)
position_df_tmp = pd.read_csv(file_path)
if position_df is None:
position_df = position_df_tmp
else:
is_ok = True
for col_name in ('currency', 'symbol', 'weight', 'stop_loss_rate'):
if col_name not in position_df_tmp.columns:
is_ok = False
self.logger.error('%s 文件格式不正确,缺少 %s 列数据', file_name, col_name)
break
if not is_ok:
continue
position_df = position_df.append(position_df_tmp)
# 调试阶段暂时不重命名备份,不影响程序使用
if not DEBUG:
# 文件备份
backup_file_name = f"{file_base_name} {datetime.now().strftime('%Y-%m-%d %H_%M_%S')}" \
f"{file_extension}.bak"
os.rename(file_path, os.path.join(self._folder_path, backup_file_name))
self.logger.info('备份 order 文件 %s -> %s', file_name, backup_file_name)
return position_df, file_path_list
def handle_backtest_file(self):
"""
处理王淳的回测文件,生成相应的交易指令文件
:return:
"""
with self._mutex:
# 获取文件列表
file_name_list = os.listdir(self._folder_path)
if file_name_list is None:
# self.logger.info('No file')
return
# 读取所有 csv 文件
for file_name in file_name_list:
file_base_name, file_extension = os.path.splitext(file_name)
# 仅处理 order*.csv文件
m = PATTERN_BACKTEST_FILE_NAME.search(file_name)
if m is None:
continue
file_date_str = m.group()
file_date = str_2_date(file_date_str)
if file_date != date.today():
self.logger.warning('文件:%s 日期与当前系统日期 %s 不匹配,不予处理', file_name, date.today())
continue
self.logger.debug('处理文件 %s 文件日期:%s', file_name, file_date_str)
file_path = os.path.join(self._folder_path, file_name)
data_df = pd.read_csv(file_path)
if data_df is None or data_df.shape[0] == 0:
continue
if str_2_date(data_df.iloc[-1]['Date']) != file_date:
self.logger.warning('文件:%s 回测记录中最新日期与当前文件日期 %s 不匹配,不予处理', file_name, file_date)
continue
# 生成交易指令文件
currency = data_df.iloc[-1]['InstruLong'].lower()
order_dic = {
'currency': [currency],
'symbol': [f'{currency}usdt'],
'weight': [self.weight],
'stop_loss_rate': [self.stop_loss_rate],
}
order_file_name = f'order_{file_date_str}.csv'
order_file_path = os.path.join(self._folder_path, order_file_name)
order_df = pd.DataFrame(order_dic)
order_df.to_csv(order_file_path)
# 调试阶段暂时不重命名备份,不影响程序使用
if not DEBUG:
# 文件备份
backup_file_name = f"{file_base_name} {datetime.now().strftime('%Y-%m-%d %H_%M_%S')}" \
f"{file_extension}.bak"
os.rename(file_path, os.path.join(self._folder_path, backup_file_name))
def handle_order_file(self):
"""
获得目标持仓currency, 权重,止损点位
生成相应交易指令
另外,如果发现新的交易order文件,则将所有的 feedback 文件备份(根据新的order进行下单,生成新的feedback文件)
:return:
"""
with self._mutex:
position_df, file_path_list = self.fetch_pos_by_file()
if position_df is None or position_df.shape[0] == 0:
return
# 如果存在新的 order 指令,则将所有的 feedback 文件备份(根据新的order进行下单,生成新的feedback文件)
self.backup_feedback_files()
self.logger.debug('仓位调整目标:\n%s', position_df)
target_holding_dic = position_df.set_index('currency').dropna().to_dict('index')
if len(self.symbol_latest_price_dic) == 0:
self.logger.warning('当前程序没有缓存到有效的最新价格数据,交易指令暂缓执行')
return
# {currency: (Direction, currency, target_position, symbol, target_price, stop_loss_price)
symbol_target_position_dic = {}
# 检查目标仓位与当前持仓是否相符,否则执行相应交易
target_currency_set = set(list(position_df['currency']))
holding_currency_dic = self.get_holding_currency()
# 检查是否所有持仓符合目标配置文件要求
is_all_fit_target = True
# 如果当前 currency 不在目标持仓列表里面,则卖出
for num, (currency, balance_dic) in enumerate(holding_currency_dic.items(), start=1):
# currency 在目标持仓中,无需清仓
if currency in target_currency_set:
continue
# hc 为 货币交易所的一种手续费代币工具,不做交易使用
# if currency == 'hc':
# continue
# 若持仓余额 小于 0.0001 则放弃清仓
tot_balance = 0
for _, dic in balance_dic.items():
tot_balance += dic['balance']
if tot_balance <
|
ol = gap_thres
|
identifier_name
|
|
csv_orders_with_feedback.py
|
币交易所的一种手续费代币工具,不做交易使用
# if currency == 'hc':
# continue
# 若持仓余额 小于 0.0001 则放弃清仓
tot_balance = 0
for _, dic in balance_dic.items():
tot_balance += dic['balance']
if tot_balance < 0.0001:
continue
symbol = self.get_symbol_by_currency(currency)
if symbol_list is not None and symbol not in symbol_list:
self.logger.warning('%s 持仓: %.6f 不在当前订阅列表中,也不在目标持仓中,该持仓将不会被操作',
symbol, tot_balance)
continue
self.logger.info('计划卖出 %s', symbol)
# TODO: 最小下单量在数据库中有对应信息,有待改进
gap_threshold_vol = 0.1
symbol_target_position_dic[symbol] = TargetPosition(Direction.Long, currency, 0, symbol,
gap_threshold_vol=gap_threshold_vol)
is_all_fit_target = False
# 生成目标持仓列表买入指令
for num, (currency, position_dic) in enumerate(target_holding_dic.items()):
weight = position_dic['weight']
stop_loss_rate = position_dic['stop_loss_rate']
# stop_loss_price = position_dic['stop_loss_rate']
symbol = self.get_symbol_by_currency(currency)
target_vol, gap_threshold_vol, stop_loss_price = self.calc_vol_and_stop_loss_price(symbol, weight, stop_loss_rate)
if target_vol is None:
self.logger.warning('%s 持仓权重 %.2f %% 无法计算目标持仓量', currency, weight * 100)
continue
# 检查当前持仓是否与目标持仓一致,如果一致则跳过
# position_date_pos_info_dic = self.get_position(symbol)
# if position_date_pos_info_dic is not None and len(position_date_pos_info_dic) > 0:
# # 有持仓,比较是否满足目标仓位,否则下指令
# position_cur = sum([pos_info['balance'] for pos_info in position_date_pos_info_dic.values()])
# position_gap = target_vol - position_cur
# # 实盘情况下,很少绝对一致,在一定区间内即可
# if position_gap > gap_threshold_vol:
# # 当前合约累计持仓与目标持仓不一致,则添加目标持仓任务
# is_all_fit_target = False
# else:
# is_all_fit_target = False
# 无论仓位是否存在,均生成交易指令,待交易执行阶段进行比较(以上代码不影响是否生产建仓指令)
# 多头目标持仓
self.logger.info('计划买入 %s 目标仓位:%f 止损价:%f', symbol, target_vol, stop_loss_price)
symbol_target_position_dic[symbol] = TargetPosition(Direction.Long, currency, target_vol, symbol,
None, stop_loss_price,
gap_threshold_vol=gap_threshold_vol)
symbol_target_position_dic_len = len(symbol_target_position_dic)
if symbol_target_position_dic_len > 0:
self.symbol_target_position_dic = symbol_target_position_dic
self.logger.info('发现新的目标持仓指令:')
self.logger_symbol_target_position_dic()
# 生成 feedback 文件
self.create_feedback_file()
else:
self.symbol_target_position_dic = None
self.logger.debug('无仓位调整指令')
def logger_symbol_target_position_dic(self):
"""
展示当前目标持仓信息
:return:
"""
symbol_target_position_dic_len = len(self.symbol_target_position_dic)
for num, (key, val) in enumerate(self.symbol_target_position_dic.items()):
self.logger.info('%d/%d) %s, %r', num, symbol_target_position_dic_len, key, val)
def on_timer(self):
"""
每15秒进行一次文件检查
1)检查王淳的回测文件,匹配最新日期 "TradeBookCryptoCurrency2018-10-08.csv" 中的日期是否与系统日期一致,如果一致则处理,生成“交易指令文件”
2)生成相应目标仓位文件 order_2018-10-08.csv
:param md_df:
:param context:
:return:
"""
self.get_balance()
self.handle_backtest_file()
self.handle_order_file()
def do_order(self, md_dic, instrument_id, order_vol, price=None, direction=Direction.Long, stop_loss_price=0,
msg=""):
# if True:
# self.logger.info("%s %s %f 价格 %f [%s]",
# instrument_id, '买入' if position > 0 else '卖出', position, price, msg)
# return
# position == 0 则代表无需操作
# 执行交易
if direction == Direction.Long:
if order_vol == 0:
return
elif order_vol > 0:
if price is None or price == 0:
price = md_dic['close']
# TODO: 稍后按盘口卖一档价格挂单
# if DEBUG:
# # debug 模式下,价格不要真实成交,只要看一下有委托单就可以了
# price /= 2
if stop_loss_price is not None and stop_loss_price > 0 and price <= stop_loss_price:
self.logger.warning('%s 当前价格 %.6f 已经触发止损价 %.6f 停止买入操作',
instrument_id, price, stop_loss_price)
return
self.open_long(instrument_id, price, order_vol)
self.logger.info("%s %s -> 开多 %.4f 价格:%.4f", instrument_id, msg, order_vol, price)
elif order_vol < 0:
if price is None or price == 0:
price = md_dic['close']
# TODO: 稍后按盘口卖一档价格挂单
# if DEBUG:
# # debug 模式下,价格不要真实成交,只要看一下有委托单就可以了
# price += price
order_vol_net = -order_vol
self.close_long(instrument_id, price, order_vol_net)
self.logger.info("%s %s -> 平多 %.4f 价格:%.4f", instrument_id, msg, order_vol_net, price)
else:
raise ValueError('目前不支持做空')
self.instrument_lastest_order_datetime_dic[instrument_id] = datetime.now()
def on_tick(self, md_dic, context):
"""
tick 级数据进行交易操作
:param md_dic:
:param context:
:return:
"""
# self.logger.debug('get tick data: %s', md_dic)
symbol = md_dic['symbol']
# 更新最新价格
close_cur = md_dic['close']
self.symbol_latest_price_dic[symbol] = close_cur
# 计算是否需要进行调仓操作
if self.symbol_target_position_dic is None or symbol not in self.symbol_target_position_dic:
# self.logger.debug("当前 symbol='%s' 无操作", symbol)
return
if self.datetime_last_update_position is None:
self.logger.debug("尚未获取持仓数据,跳过")
return
target_currency = self.trade_agent.get_currency(symbol)
# self.logger.debug('target_position_dic[%s]: %s', symbol, self.target_position_dic[symbol])
# 如果的当前合约近期存在交易回报,则交易回报时间一定要小于查询持仓时间:
# 防止出现以及成交单持仓信息未及时更新导致的数据不同步问题
if symbol in self.datetime_last_rtn_trade_dic:
if target_currency not in self.datetime_last_update_position_dic:
logging.debug("持仓数据中没有包含当前合约,最近一次成交回报时间:%s,跳过",
self.datetime_last_rtn_trade_dic[symbol])
self.get_position(symbol, force_refresh=True)
return
if self.datetime_last_rtn_trade_dic[symbol] > self.datetime_last_update_position_dic[target_currency]:
logging.debug("持仓数据尚未更新完成,最近一次成交回报时间:%s 晚于 最近一次持仓更新时间:%s",
self.datetime_last_rtn_trade_dic[symbol],
self.datetime_last_update_position_dic[target_currency])
self.get_position(symbol, force_refresh=True)
return
# 过于密集执行可能会导致重复下单的问题
if symbol in self.symbol_last_deal_datetime:
last_deal_datetime = self.symbol_last_deal_datetime[symbol]
if last_deal_datetime + self.timedelta_between_deal > datetime.now():
# logging.debug("最近一次交易时间:%s,防止交易密度过大,跳过", last_deal_datetime)
|
return
with self._mutex:
target_position = self.symbol_target_position_dic[symbol]
target_position.check_stop_loss(close_cur)
# self.logger.debug("当前持仓目标:%r", target_position)
# 撤销所有相关订单
self.cancel_order(symbol)
# 计算目标仓位方向及交易数量
position_date_pos_info_dic = self.get_position(symbol)
if position_date_pos_info_dic is None:
# 无当前持仓,有目标仓位,直接按照目标仓位进行开仓动作
# target_direction, target_currency, target
|
conditional_block
|
|
types.go
|
",
TypeLongBlob: "longBlob",
TypeBlob: "blob",
TypeVarString: "varString",
TypeString: "string",
TypeGeometry: "geometry",
}
func (t ColumnType) isNumeric() bool {
switch t {
case TypeTiny, TypeShort, TypeInt24, TypeLong, TypeLongLong,
TypeFloat, TypeDouble, TypeDecimal, TypeNewDecimal:
return true
}
return false
}
func (t ColumnType) isString() bool {
switch t {
case TypeVarchar, TypeBlob, TypeVarString, TypeString:
return true
}
return false
}
func (t ColumnType) isEnumSet() bool {
return t == TypeEnum || t == TypeSet
}
func (t ColumnType) String() string {
if s, ok := typeNames[t]; ok {
return s
}
return fmt.Sprintf("0x%02x", uint8(t))
}
func (col Column) decodeValue(r *reader) (interface{}, error) {
switch col.Type {
case TypeTiny:
if col.Unsigned {
return r.int1(), r.err
}
return int8(r.int1()), r.err
case TypeShort:
if col.Unsigned {
return r.int2(), r.err
}
return int16(r.int2()), r.err
case TypeInt24:
v := r.int3()
if col.Unsigned {
return v, r.err
}
if v&0x00800000 != 0 {
// negative number
v |= 0xFF000000
}
return int32(v), r.err
case TypeLong:
if col.Unsigned {
return r.int4(), r.err
}
return int32(r.int4()), r.err
case TypeLongLong:
if col.Unsigned {
return r.int8(), r.err
}
return int64(r.int8()), r.err
case TypeNewDecimal:
precision := int(byte(col.Meta))
scale := int(byte(col.Meta >> 8))
buff := r.bytes(decimalSize(precision, scale))
if r.err != nil {
return nil, r.err
}
return decodeDecimal(buff, precision, scale)
case TypeFloat:
return math.Float32frombits(r.int4()), r.err
case TypeDouble:
return math.Float64frombits(r.int8()), r.err
case TypeVarchar, TypeString:
var size int
if col.Meta < 256 {
size = int(r.int1())
} else {
size = int(r.int2())
}
return r.string(size), r.err
case TypeEnum:
switch col.Meta {
case 1:
return Enum{uint16(r.int1()), col.Values}, r.err
case 2:
return Enum{r.int2(), col.Values}, r.err
default:
return nil, fmt.Errorf("binlog.decodeValue: invalid enum length %d", col.Meta)
}
case TypeSet:
n := col.Meta // == length
if n == 0 || n > 8 {
return nil, fmt.Errorf("binlog.decodeValue: invalid num bits in set %d", n)
}
return Set{r.intFixed(int(n)), col.Values}, r.err
case TypeBit:
nbits := ((col.Meta >> 8) * 8) + (col.Meta & 0xFF)
buf := r.bytesInternal(int(nbits+7) / 8)
return bigEndian(buf), r.err
case TypeBlob, TypeGeometry:
size := r.intFixed(int(col.Meta))
v := r.bytes(int(size))
if col.Charset == 0 || col.Charset == 63 {
return v, r.err
}
return string(v), r.err
case TypeJSON:
size := r.intFixed(int(col.Meta))
buf := r.bytesInternal(int(size))
if r.err != nil {
return nil, r.err
}
v, err := new(jsonDecoder).decodeValue(buf)
return JSON{v}, err
case TypeDate:
v := r.int3()
var year, month, day uint32
if v != 0 {
year, month, day = v/(16*32), v/32%16, v%32
}
return time.Date(int(year), time.Month(month), int(day), 0, 0, 0, 0, time.UTC), r.err
case TypeDateTime2:
buf := r.bytesInternal(5)
if r.err != nil {
return nil, r.err
}
dt := bigEndian(buf)
ym := bitSlice(dt, 40, 1, 17)
year, month := ym/13, ym%13
day := bitSlice(dt, 40, 18, 5)
hour := bitSlice(dt, 40, 23, 5)
min := bitSlice(dt, 40, 28, 6)
sec := bitSlice(dt, 40, 34, 6)
frac, err := fractionalSeconds(col.Meta, r)
if err != nil {
return nil, err
}
return time.Date(year, time.Month(month), day, hour, min, sec, frac*1000, time.UTC), r.err
case TypeTimestamp2:
buf := r.bytesInternal(4)
if r.err != nil {
return nil, r.err
}
sec := binary.BigEndian.Uint32(buf)
frac, err := fractionalSeconds(col.Meta, r)
if err != nil {
return nil, err
}
return time.Unix(int64(sec), int64(frac)*1000), r.err
case TypeTime2:
// https://github.com/debezium/debezium/blob/master/debezium-connector-mysql/src/main/java/io/debezium/connector/mysql/RowDeserializers.java#L314
//
// (in big endian)
//
// 1 bit sign (1= non-negative, 0= negative)
// 1 bit unused (reserved for future extensions)
// 10 bits hour (0-838)
// 6 bits minute (0-59)
// 6 bits second (0-59)
//
// (3 bytes in total)
//
// + fractional-seconds storage (size depends on meta)
buf := r.bytesInternal(3)
if r.err != nil {
return nil, r.err
}
t := bigEndian(buf)
sign := bitSlice(t, 24, 0, 1)
hour := bitSlice(t, 24, 2, 10)
min := bitSlice(t, 24, 12, 6)
sec := bitSlice(t, 24, 18, 6)
var frac int
var err error
if sign == 0 {
// -ve
hour = ^hour & mask(10)
hour = hour & unsetSignMask(10) // unset sign bit
min = ^min & mask(6)
min = min & unsetSignMask(6) // unset sign bit
sec = ^sec & mask(6)
sec = sec & unsetSignMask(6) // unset sign bit
frac, err = fractionalSecondsNegative(col.Meta, r)
if err != nil {
return nil, err
}
if frac == 0 && sec < 59 { // weird duration behavior
sec++
}
} else {
frac, err = fractionalSeconds(col.Meta, r)
if err != nil {
return nil, err
}
}
v := time.Duration(hour)*time.Hour +
time.Duration(min)*time.Minute +
time.Duration(sec)*time.Second +
time.Duration(frac)*time.Microsecond
if sign == 0 {
v = -v
}
return v, r.err
case TypeYear:
v := int(r.int1())
if v == 0 {
return 0, r.err
}
return 1900 + v, r.err
}
return nil, fmt.Errorf("decode of mysql type %s is not implemented", col.Type)
}
func bitSlice(v uint64, bits, off, len int) int {
v >>= bits - (off + len)
return int(v & ((1 << len) - 1))
}
func fractionalSeconds(meta uint16, r *reader) (int, error)
|
func fractionalSecondsNegative(meta uint16, r *reader) (int, error) {
n := (meta + 1) / 2
v := int(bigEndian
|
{
n := (meta + 1) / 2
v := bigEndian(r.bytesInternal(int(n)))
return int(v * uint64(math.Pow(100, float64(3-n)))), r.err
}
|
identifier_body
|
types.go
|
.Meta))
scale := int(byte(col.Meta >> 8))
buff := r.bytes(decimalSize(precision, scale))
if r.err != nil {
return nil, r.err
}
return decodeDecimal(buff, precision, scale)
case TypeFloat:
return math.Float32frombits(r.int4()), r.err
case TypeDouble:
return math.Float64frombits(r.int8()), r.err
case TypeVarchar, TypeString:
var size int
if col.Meta < 256 {
size = int(r.int1())
} else {
size = int(r.int2())
}
return r.string(size), r.err
case TypeEnum:
switch col.Meta {
case 1:
return Enum{uint16(r.int1()), col.Values}, r.err
case 2:
return Enum{r.int2(), col.Values}, r.err
default:
return nil, fmt.Errorf("binlog.decodeValue: invalid enum length %d", col.Meta)
}
case TypeSet:
n := col.Meta // == length
if n == 0 || n > 8 {
return nil, fmt.Errorf("binlog.decodeValue: invalid num bits in set %d", n)
}
return Set{r.intFixed(int(n)), col.Values}, r.err
case TypeBit:
nbits := ((col.Meta >> 8) * 8) + (col.Meta & 0xFF)
buf := r.bytesInternal(int(nbits+7) / 8)
return bigEndian(buf), r.err
case TypeBlob, TypeGeometry:
size := r.intFixed(int(col.Meta))
v := r.bytes(int(size))
if col.Charset == 0 || col.Charset == 63 {
return v, r.err
}
return string(v), r.err
case TypeJSON:
size := r.intFixed(int(col.Meta))
buf := r.bytesInternal(int(size))
if r.err != nil {
return nil, r.err
}
v, err := new(jsonDecoder).decodeValue(buf)
return JSON{v}, err
case TypeDate:
v := r.int3()
var year, month, day uint32
if v != 0 {
year, month, day = v/(16*32), v/32%16, v%32
}
return time.Date(int(year), time.Month(month), int(day), 0, 0, 0, 0, time.UTC), r.err
case TypeDateTime2:
buf := r.bytesInternal(5)
if r.err != nil {
return nil, r.err
}
dt := bigEndian(buf)
ym := bitSlice(dt, 40, 1, 17)
year, month := ym/13, ym%13
day := bitSlice(dt, 40, 18, 5)
hour := bitSlice(dt, 40, 23, 5)
min := bitSlice(dt, 40, 28, 6)
sec := bitSlice(dt, 40, 34, 6)
frac, err := fractionalSeconds(col.Meta, r)
if err != nil {
return nil, err
}
return time.Date(year, time.Month(month), day, hour, min, sec, frac*1000, time.UTC), r.err
case TypeTimestamp2:
buf := r.bytesInternal(4)
if r.err != nil {
return nil, r.err
}
sec := binary.BigEndian.Uint32(buf)
frac, err := fractionalSeconds(col.Meta, r)
if err != nil {
return nil, err
}
return time.Unix(int64(sec), int64(frac)*1000), r.err
case TypeTime2:
// https://github.com/debezium/debezium/blob/master/debezium-connector-mysql/src/main/java/io/debezium/connector/mysql/RowDeserializers.java#L314
//
// (in big endian)
//
// 1 bit sign (1= non-negative, 0= negative)
// 1 bit unused (reserved for future extensions)
// 10 bits hour (0-838)
// 6 bits minute (0-59)
// 6 bits second (0-59)
//
// (3 bytes in total)
//
// + fractional-seconds storage (size depends on meta)
buf := r.bytesInternal(3)
if r.err != nil {
return nil, r.err
}
t := bigEndian(buf)
sign := bitSlice(t, 24, 0, 1)
hour := bitSlice(t, 24, 2, 10)
min := bitSlice(t, 24, 12, 6)
sec := bitSlice(t, 24, 18, 6)
var frac int
var err error
if sign == 0 {
// -ve
hour = ^hour & mask(10)
hour = hour & unsetSignMask(10) // unset sign bit
min = ^min & mask(6)
min = min & unsetSignMask(6) // unset sign bit
sec = ^sec & mask(6)
sec = sec & unsetSignMask(6) // unset sign bit
frac, err = fractionalSecondsNegative(col.Meta, r)
if err != nil {
return nil, err
}
if frac == 0 && sec < 59 { // weird duration behavior
sec++
}
} else {
frac, err = fractionalSeconds(col.Meta, r)
if err != nil {
return nil, err
}
}
v := time.Duration(hour)*time.Hour +
time.Duration(min)*time.Minute +
time.Duration(sec)*time.Second +
time.Duration(frac)*time.Microsecond
if sign == 0 {
v = -v
}
return v, r.err
case TypeYear:
v := int(r.int1())
if v == 0 {
return 0, r.err
}
return 1900 + v, r.err
}
return nil, fmt.Errorf("decode of mysql type %s is not implemented", col.Type)
}
func bitSlice(v uint64, bits, off, len int) int {
v >>= bits - (off + len)
return int(v & ((1 << len) - 1))
}
func fractionalSeconds(meta uint16, r *reader) (int, error) {
n := (meta + 1) / 2
v := bigEndian(r.bytesInternal(int(n)))
return int(v * uint64(math.Pow(100, float64(3-n)))), r.err
}
func fractionalSecondsNegative(meta uint16, r *reader) (int, error) {
n := (meta + 1) / 2
v := int(bigEndian(r.bytesInternal(int(n))))
if v != 0 {
bits := int(n * 8)
v = ^v & mask(bits)
v = (v & unsetSignMask(bits)) + 1
}
return v * int(math.Pow(100, float64(3-n))), r.err
}
func mask(bits int) int {
return (1 << bits) - 1
}
func unsetSignMask(bits int) int {
return ^(1 << bits)
}
func (col Column) valueLiteral(v interface{}) string {
if v == nil {
return "NULL"
}
switch col.Type {
case TypeEnum:
v := v.(Enum)
if len(v.Values) > 0 {
return strconv.Quote(v.String())
}
case TypeSet:
v := v.(Set)
if len(v.Values) > 0 {
return strconv.Quote(v.String())
}
case TypeJSON:
var buf bytes.Buffer
_ = json.NewEncoder(&buf).Encode(v)
s := buf.String()
return strconv.Quote(s[:len(s)-1]) // remove trailing newline
case TypeBlob:
if v, ok := v.([]byte); ok { // 63 = binary charset
return fmt.Sprintf(`x"%s"`, hex.EncodeToString(v))
}
}
switch v := v.(type) {
case time.Time:
return strconv.Quote(v.String())
}
return fmt.Sprintf("%#v", v)
}
// Decimal ---
const digitsPerInteger int = 9
var compressedBytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4}
func decodeDecimalDecompressValue(compIndex int, data []byte, mask uint8) (size int, value uint32) {
size = compressedBytes[compIndex]
buff := make([]byte, size)
for i := 0; i < size; i++ {
buff[i] = data[i] ^ mask
}
value = uint32(bigEndian(buff))
return
}
func
|
decimalSize
|
identifier_name
|
|
types.go
|
6) // unset sign bit
frac, err = fractionalSecondsNegative(col.Meta, r)
if err != nil {
return nil, err
}
if frac == 0 && sec < 59 { // weird duration behavior
sec++
}
} else {
frac, err = fractionalSeconds(col.Meta, r)
if err != nil {
return nil, err
}
}
v := time.Duration(hour)*time.Hour +
time.Duration(min)*time.Minute +
time.Duration(sec)*time.Second +
time.Duration(frac)*time.Microsecond
if sign == 0 {
v = -v
}
return v, r.err
case TypeYear:
v := int(r.int1())
if v == 0 {
return 0, r.err
}
return 1900 + v, r.err
}
return nil, fmt.Errorf("decode of mysql type %s is not implemented", col.Type)
}
func bitSlice(v uint64, bits, off, len int) int {
v >>= bits - (off + len)
return int(v & ((1 << len) - 1))
}
func fractionalSeconds(meta uint16, r *reader) (int, error) {
n := (meta + 1) / 2
v := bigEndian(r.bytesInternal(int(n)))
return int(v * uint64(math.Pow(100, float64(3-n)))), r.err
}
func fractionalSecondsNegative(meta uint16, r *reader) (int, error) {
n := (meta + 1) / 2
v := int(bigEndian(r.bytesInternal(int(n))))
if v != 0 {
bits := int(n * 8)
v = ^v & mask(bits)
v = (v & unsetSignMask(bits)) + 1
}
return v * int(math.Pow(100, float64(3-n))), r.err
}
func mask(bits int) int {
return (1 << bits) - 1
}
func unsetSignMask(bits int) int {
return ^(1 << bits)
}
func (col Column) valueLiteral(v interface{}) string {
if v == nil {
return "NULL"
}
switch col.Type {
case TypeEnum:
v := v.(Enum)
if len(v.Values) > 0 {
return strconv.Quote(v.String())
}
case TypeSet:
v := v.(Set)
if len(v.Values) > 0 {
return strconv.Quote(v.String())
}
case TypeJSON:
var buf bytes.Buffer
_ = json.NewEncoder(&buf).Encode(v)
s := buf.String()
return strconv.Quote(s[:len(s)-1]) // remove trailing newline
case TypeBlob:
if v, ok := v.([]byte); ok { // 63 = binary charset
return fmt.Sprintf(`x"%s"`, hex.EncodeToString(v))
}
}
switch v := v.(type) {
case time.Time:
return strconv.Quote(v.String())
}
return fmt.Sprintf("%#v", v)
}
// Decimal ---
const digitsPerInteger int = 9
var compressedBytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4}
func decodeDecimalDecompressValue(compIndex int, data []byte, mask uint8) (size int, value uint32) {
size = compressedBytes[compIndex]
buff := make([]byte, size)
for i := 0; i < size; i++ {
buff[i] = data[i] ^ mask
}
value = uint32(bigEndian(buff))
return
}
func decimalSize(precision int, scale int) int {
integral := precision - scale
uncompIntegral := integral / digitsPerInteger
uncompFractional := scale / digitsPerInteger
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := scale - (uncompFractional * digitsPerInteger)
return uncompIntegral*4 + compressedBytes[compIntegral] +
uncompFractional*4 + compressedBytes[compFractional]
}
func decodeDecimal(data []byte, precision int, scale int) (Decimal, error) {
integral := precision - scale
uncompIntegral := integral / digitsPerInteger
uncompFractional := scale / digitsPerInteger
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := scale - (uncompFractional * digitsPerInteger)
binSize := uncompIntegral*4 + compressedBytes[compIntegral] +
uncompFractional*4 + compressedBytes[compFractional]
buf := make([]byte, binSize)
copy(buf, data[:binSize])
//must copy the data for later change
data = buf
// Support negative
// The sign is encoded in the high bit of the the byte
// But this bit can also be used in the value
value := uint32(data[0])
var res bytes.Buffer
var mask uint32 = 0
if value&0x80 == 0 {
mask = uint32((1 << 32) - 1)
res.WriteString("-")
}
//clear sign
data[0] ^= 0x80
pos, value := decodeDecimalDecompressValue(compIntegral, data, uint8(mask))
res.WriteString(fmt.Sprintf("%d", value))
for i := 0; i < uncompIntegral; i++ {
value = binary.BigEndian.Uint32(data[pos:]) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
res.WriteString(".")
for i := 0; i < uncompFractional; i++ {
value = binary.BigEndian.Uint32(data[pos:]) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
if size, value := decodeDecimalDecompressValue(compFractional, data[pos:], uint8(mask)); size > 0 {
res.WriteString(fmt.Sprintf("%0*d", compFractional, value))
pos += size
}
// remove leading zeros & trailing dot
s := res.String()
res.Reset()
if s[0] == '-' {
res.WriteString("-")
s = s[1:]
}
for len(s) > 1 && s[0] == '0' && s[1] != '.' {
s = s[1:]
}
if len(s) > 0 && s[len(s)-1] == '.' {
s = s[:len(s)-1]
}
res.WriteString(s)
return Decimal(res.String()), nil
}
func bigEndian(buf []byte) uint64 {
var num uint64 = 0
for i, b := range buf {
num |= uint64(b) << (uint(len(buf)-i-1) * 8)
}
return num
}
// Enum represents value of TypeEnum.
//
// https://dev.mysql.com/doc/refman/8.0/en/enum.html
type Enum struct {
// index value. refers to a position in list of permitted values.
// begins with 1.
// 0 means empty string invalid value.
Val uint16
// list of permitted values.
// will be populated only if system
// variable binlog_row_metadata==FULL
Values []string
}
func (e Enum) String() string {
if len(e.Values) > 0 {
if e.Val == 0 {
return ""
}
return e.Values[e.Val-1]
}
return fmt.Sprintf("%d", e.Val)
}
func (e Enum) MarshalJSON() ([]byte, error) {
if len(e.Values) > 0 {
return []byte(strconv.Quote(e.String())), nil
}
return []byte(e.String()), nil
}
// Set represents value of TypeSet.
//
// https://dev.mysql.com/doc/refman/8.0/en/set.html
type Set struct {
// set's numerical value with bits set corresponding
// to the set members that make up the column value.
// 0 means empty string invalid value.
Val uint64
// list of permitted values.
// will be populated only if system
// variable binlog_row_metadata==FULL
Values []string
}
// Members returns the values in this set.
func (s Set) Members() []string {
var m []string
if len(s.Values) > 0 {
for i, val := range s.Values {
if s.Val&(1<<i) != 0 {
m = append(m, val)
}
}
}
return m
}
func (s Set) String() string {
if len(s.Values) > 0 {
if s.Val == 0 {
return ""
}
var buf strings.Builder
for i, val := range s.Values {
if s.Val&(1<<i) != 0 {
if buf.Len() > 0 {
buf.WriteByte(',')
}
buf.WriteString(val)
}
}
return buf.String()
}
return fmt.Sprintf("%d", s.Val)
}
func (s Set) MarshalJSON() ([]byte, error) {
if len(s.Values) > 0
|
{
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(s.Members())
return buf.Bytes(), err
}
|
conditional_block
|
|
types.go
|
.Minute +
time.Duration(sec)*time.Second +
time.Duration(frac)*time.Microsecond
if sign == 0 {
v = -v
}
return v, r.err
case TypeYear:
v := int(r.int1())
if v == 0 {
return 0, r.err
}
return 1900 + v, r.err
}
return nil, fmt.Errorf("decode of mysql type %s is not implemented", col.Type)
}
func bitSlice(v uint64, bits, off, len int) int {
v >>= bits - (off + len)
return int(v & ((1 << len) - 1))
}
func fractionalSeconds(meta uint16, r *reader) (int, error) {
n := (meta + 1) / 2
v := bigEndian(r.bytesInternal(int(n)))
return int(v * uint64(math.Pow(100, float64(3-n)))), r.err
}
func fractionalSecondsNegative(meta uint16, r *reader) (int, error) {
n := (meta + 1) / 2
v := int(bigEndian(r.bytesInternal(int(n))))
if v != 0 {
bits := int(n * 8)
v = ^v & mask(bits)
v = (v & unsetSignMask(bits)) + 1
}
return v * int(math.Pow(100, float64(3-n))), r.err
}
func mask(bits int) int {
return (1 << bits) - 1
}
func unsetSignMask(bits int) int {
return ^(1 << bits)
}
func (col Column) valueLiteral(v interface{}) string {
if v == nil {
return "NULL"
}
switch col.Type {
case TypeEnum:
v := v.(Enum)
if len(v.Values) > 0 {
return strconv.Quote(v.String())
}
case TypeSet:
v := v.(Set)
if len(v.Values) > 0 {
return strconv.Quote(v.String())
}
case TypeJSON:
var buf bytes.Buffer
_ = json.NewEncoder(&buf).Encode(v)
s := buf.String()
return strconv.Quote(s[:len(s)-1]) // remove trailing newline
case TypeBlob:
if v, ok := v.([]byte); ok { // 63 = binary charset
return fmt.Sprintf(`x"%s"`, hex.EncodeToString(v))
}
}
switch v := v.(type) {
case time.Time:
return strconv.Quote(v.String())
}
return fmt.Sprintf("%#v", v)
}
// Decimal ---
const digitsPerInteger int = 9
var compressedBytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4}
func decodeDecimalDecompressValue(compIndex int, data []byte, mask uint8) (size int, value uint32) {
size = compressedBytes[compIndex]
buff := make([]byte, size)
for i := 0; i < size; i++ {
buff[i] = data[i] ^ mask
}
value = uint32(bigEndian(buff))
return
}
func decimalSize(precision int, scale int) int {
integral := precision - scale
uncompIntegral := integral / digitsPerInteger
uncompFractional := scale / digitsPerInteger
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := scale - (uncompFractional * digitsPerInteger)
return uncompIntegral*4 + compressedBytes[compIntegral] +
uncompFractional*4 + compressedBytes[compFractional]
}
func decodeDecimal(data []byte, precision int, scale int) (Decimal, error) {
integral := precision - scale
uncompIntegral := integral / digitsPerInteger
uncompFractional := scale / digitsPerInteger
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := scale - (uncompFractional * digitsPerInteger)
binSize := uncompIntegral*4 + compressedBytes[compIntegral] +
uncompFractional*4 + compressedBytes[compFractional]
buf := make([]byte, binSize)
copy(buf, data[:binSize])
//must copy the data for later change
data = buf
// Support negative
// The sign is encoded in the high bit of the the byte
// But this bit can also be used in the value
value := uint32(data[0])
var res bytes.Buffer
var mask uint32 = 0
if value&0x80 == 0 {
mask = uint32((1 << 32) - 1)
res.WriteString("-")
}
//clear sign
data[0] ^= 0x80
pos, value := decodeDecimalDecompressValue(compIntegral, data, uint8(mask))
res.WriteString(fmt.Sprintf("%d", value))
for i := 0; i < uncompIntegral; i++ {
value = binary.BigEndian.Uint32(data[pos:]) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
res.WriteString(".")
for i := 0; i < uncompFractional; i++ {
value = binary.BigEndian.Uint32(data[pos:]) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
if size, value := decodeDecimalDecompressValue(compFractional, data[pos:], uint8(mask)); size > 0 {
res.WriteString(fmt.Sprintf("%0*d", compFractional, value))
pos += size
}
// remove leading zeros & trailing dot
s := res.String()
res.Reset()
if s[0] == '-' {
res.WriteString("-")
s = s[1:]
}
for len(s) > 1 && s[0] == '0' && s[1] != '.' {
s = s[1:]
}
if len(s) > 0 && s[len(s)-1] == '.' {
s = s[:len(s)-1]
}
res.WriteString(s)
return Decimal(res.String()), nil
}
func bigEndian(buf []byte) uint64 {
var num uint64 = 0
for i, b := range buf {
num |= uint64(b) << (uint(len(buf)-i-1) * 8)
}
return num
}
// Enum represents value of TypeEnum.
//
// https://dev.mysql.com/doc/refman/8.0/en/enum.html
type Enum struct {
// index value. refers to a position in list of permitted values.
// begins with 1.
// 0 means empty string invalid value.
Val uint16
// list of permitted values.
// will be populated only if system
// variable binlog_row_metadata==FULL
Values []string
}
func (e Enum) String() string {
if len(e.Values) > 0 {
if e.Val == 0 {
return ""
}
return e.Values[e.Val-1]
}
return fmt.Sprintf("%d", e.Val)
}
func (e Enum) MarshalJSON() ([]byte, error) {
if len(e.Values) > 0 {
return []byte(strconv.Quote(e.String())), nil
}
return []byte(e.String()), nil
}
// Set represents value of TypeSet.
//
// https://dev.mysql.com/doc/refman/8.0/en/set.html
type Set struct {
// set's numerical value with bits set corresponding
// to the set members that make up the column value.
// 0 means empty string invalid value.
Val uint64
// list of permitted values.
// will be populated only if system
// variable binlog_row_metadata==FULL
Values []string
}
// Members returns the values in this set.
func (s Set) Members() []string {
var m []string
if len(s.Values) > 0 {
for i, val := range s.Values {
if s.Val&(1<<i) != 0 {
m = append(m, val)
}
}
}
return m
}
func (s Set) String() string {
if len(s.Values) > 0 {
if s.Val == 0 {
return ""
}
var buf strings.Builder
for i, val := range s.Values {
if s.Val&(1<<i) != 0 {
if buf.Len() > 0 {
buf.WriteByte(',')
}
buf.WriteString(val)
}
}
return buf.String()
}
return fmt.Sprintf("%d", s.Val)
}
func (s Set) MarshalJSON() ([]byte, error) {
if len(s.Values) > 0 {
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(s.Members())
return buf.Bytes(), err
}
return []byte(s.String()), nil
}
// A Decimal represents a MySQL Decimal/Numeric literal.
//
// https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html
type Decimal string
func (d Decimal) String() string { return string(d) }
// Float64 returns the number as a float64.
func (d Decimal) Float64() (float64, error) {
return strconv.ParseFloat(string(d), 64)
}
|
// BigFloat returns the number as a *big.Float.
|
random_line_split
|
|
raft.go
|
< rf.lastIncludedIndex {
reply.Success = false
reply.NextIndex = lastIndex + 1
return
}
if args.PrevLogIndex+len(args.Entries) <= rf.CommitIndex {
reply.Success = false
reply.NextIndex = rf.CommitIndex + 1
return
}
var lastTerm int
if len(rf.Log) == 0 || args.PrevLogIndex < rf.Log[0].Index {
lastTerm = rf.lastIncludedTerm
} else {
lastTerm = rf.Log[args.PrevLogIndex-rf.Log[0].Index].Term
}
if args.PrevLogTerm != lastTerm {
reply.Success = false
if len(rf.Log) == 0 {
return
}
for i := args.PrevLogIndex - 1; i >= rf.Log[0].Index; i-- {
if rf.Log[i-rf.Log[0].Index].Term != rf.Log[args.PrevLogIndex-rf.Log[0].Index].Term {
reply.NextIndex = i + 1
break
}
}
return
}
reply.Success = true
//if len(args.Entries) > 0 {
// fmt.Println("peer", rf.me, "receives entry", args.PrevLogIndex+1, "-", args.PrevLogIndex+len(args.Entries), "from leader", args.LeaderId)
//}
if len(rf.Log) > 0 {
rf.Log = rf.Log[:args.PrevLogIndex-rf.Log[0].Index+1]
}
rf.Log = append(rf.Log, args.Entries...)
rf.persist()
//fmt.Println("log of peer", rf.me, ":", rf.Log)
if len(rf.Log) > 0 && args.LeaderCommit > rf.CommitIndex {
//fmt.Println("args.LeaderCommit of", rf.me, ":", args.LeaderCommit)
//fmt.Println("rf.Log[len(rf.Log)-1].Index of", rf.me, ":", rf.Log[len(rf.Log)-1].Index)
if args.LeaderCommit < rf.Log[len(rf.Log)-1].Index {
rf.CommitIndex = args.LeaderCommit
} else {
rf.CommitIndex = rf.Log[len(rf.Log)-1].Index
}
rf.commitNow <- true
}
}
func (rf *Raft) sendAppendEntries(server int, args AppendEntriesArgs, reply *AppendEntriesReply) bool {
//if len(args.Entries) > 0 {
//fmt.Println("leader", rf.me, "send entry", args.PrevLogIndex+1, "-", args.PrevLogIndex+len(args.Entries), "to peer", server)
//}
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
if ok {
if rf.identity != LEADER {
return ok
}
if reply.Term > rf.CurrentTerm {
rf.CurrentTerm = reply.Term
rf.identity = FOLLOWER
return ok
}
if reply.Success == true {
rf.nextIndex[server] = args.PrevLogIndex + len(args.Entries) + 1
rf.matchIndex[server] = args.PrevLogIndex + len(args.Entries)
//fmt.Println("rf.matchIndex[", server, "]:", rf.matchIndex[server])
} else {
rf.nextIndex[server] = reply.NextIndex
}
}
return ok
}
func (rf *Raft) sendAppendEntriesToAll() {
rf.mu.Lock()
defer rf.mu.Unlock()
if len(rf.Log) > 0 {
for N := rf.Log[len(rf.Log)-1].Index; N > rf.CommitIndex; N-- {
count := 0
for j := range rf.peers {
if rf.matchIndex[j] >= N {
count++
}
}
//fmt.Println("count:", count)
//fmt.Println("len(rf.peers)/2:", len(rf.peers)/2)
//fmt.Println("rf.Log[N-rf.Log[0].Index].Term:", rf.Log[N-rf.Log[0].Index].Term)
//fmt.Println("rf.CurrentTerm:", rf.CurrentTerm)
if count > len(rf.peers)/2 && rf.Log[N-rf.Log[0].Index].Term <= rf.CurrentTerm {
rf.CommitIndex = N
rf.commitNow <- true
break
}
}
}
for i := range rf.peers {
if i != rf.me && rf.identity == LEADER {
if len(rf.Log) > 0 && rf.nextIndex[i] >= rf.Log[0].Index || len(rf.Log) == 0 && rf.nextIndex[i] > rf.lastIncludedIndex {
var args AppendEntriesArgs
args.Term = rf.CurrentTerm
args.LeaderId = rf.me
if len(rf.Log) == 0 || rf.nextIndex[i] == rf.Log[0].Index {
args.PrevLogIndex = rf.lastIncludedIndex
args.PrevLogTerm = rf.lastIncludedTerm
} else {
args.PrevLogIndex = rf.nextIndex[i] - 1
args.PrevLogTerm = rf.Log[rf.nextIndex[i]-1-rf.Log[0].Index].Term
}
if len(rf.Log) == 0 {
args.Entries = []LogEntry{}
} else {
args.Entries = rf.Log[rf.nextIndex[i]-rf.Log[0].Index:]
}
args.LeaderCommit = rf.CommitIndex
go func(index int, args AppendEntriesArgs) {
var reply AppendEntriesReply
rf.sendAppendEntries(index, args, &reply)
}(i, args)
} else {
var args InstallSnapshotArgs
args.Term = rf.CurrentTerm
args.LeaderId = rf.me
args.LastIncludedIndex = rf.lastIncludedIndex
args.LastIncludedTerm = rf.lastIncludedTerm
args.Snapshot = rf.persister.ReadSnapshot()
go func(index int, args InstallSnapshotArgs) {
var reply InstallSnapshotReply
rf.sendInstallSnapshot(index, args, &reply)
}(i, args)
}
}
}
}
func (rf *Raft) sendInstallSnapshot(server int, args InstallSnapshotArgs, reply *InstallSnapshotReply) bool {
ok := rf.peers[server].Call("Raft.InstallSnapshot", args, reply)
if ok {
if reply.Term > rf.CurrentTerm {
rf.CurrentTerm = reply.Term
rf.identity = FOLLOWER
return ok
}
rf.nextIndex[server] = args.LastIncludedIndex + 1
rf.matchIndex[server] = args.LastIncludedIndex
}
return ok
}
func (rf *Raft) InstallSnapshot(args InstallSnapshotArgs, reply *InstallSnapshotReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
reply.Term = rf.CurrentTerm
if args.Term < rf.CurrentTerm {
return
}
rf.persister.SaveSnapshot(args.Snapshot)
rf.lastIncludedIndex = args.LastIncludedIndex
rf.lastIncludedTerm = args.LastIncludedTerm
rf.CommitIndex = args.LastIncludedIndex
rf.lastApplied = args.LastIncludedIndex
var i int
for i = 0; i <= len(rf.Log)-1; i++ {
if rf.Log[i].Index == args.LastIncludedIndex && rf.Log[i].Term == args.LastIncludedTerm {
break
}
}
if i > len(rf.Log)-1 {
i = len(rf.Log) - 1
}
rf.Log = rf.Log[i+1:]
rf.persist()
msg := ApplyMsg{UseSnapshot: true, Snapshot: args.Snapshot}
rf.ApplyChan <- msg
}
//
// example RequestVote RPC arguments structure.
//
type RequestVoteArgs struct {
// Your data here.
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
//
type RequestVoteReply struct {
// Your data here.
Term int
VoteGranted bool
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {
// Your code here.
rf.mu.Lock()
defer rf.persist()
defer rf.mu.Unlock()
reply.Term = rf.CurrentTerm
|
return
}
if args.Term > rf.CurrentTerm {
rf.VotedFor = -1
rf.CurrentTerm = args.Term
rf.identity = FOLLOWER
}
if rf.VotedFor != -1 && rf.VotedFor != args.CandidateId {
reply.VoteGranted = false
return
}
var rfLogIndex int
var rfLogTerm int
if len(rf.Log) > 0 {
rfLogIndex = rf.Log[len(rf.Log)-1].Index
rfLogTerm = rf.Log[len(rf.Log)-1].Term
} else {
rfLogIndex = rf.lastIncludedIndex
rfLogTerm = rf.lastIncludedTerm
}
if args.LastLogTerm > rfLogTerm || args.LastLogTerm
|
if args.Term < rf.CurrentTerm {
reply.VoteGranted = false
|
random_line_split
|
raft.go
|
AppendEntries(index, args, &reply)
}(i, args)
} else {
var args InstallSnapshotArgs
args.Term = rf.CurrentTerm
args.LeaderId = rf.me
args.LastIncludedIndex = rf.lastIncludedIndex
args.LastIncludedTerm = rf.lastIncludedTerm
args.Snapshot = rf.persister.ReadSnapshot()
go func(index int, args InstallSnapshotArgs) {
var reply InstallSnapshotReply
rf.sendInstallSnapshot(index, args, &reply)
}(i, args)
}
}
}
}
func (rf *Raft) sendInstallSnapshot(server int, args InstallSnapshotArgs, reply *InstallSnapshotReply) bool {
ok := rf.peers[server].Call("Raft.InstallSnapshot", args, reply)
if ok {
if reply.Term > rf.CurrentTerm {
rf.CurrentTerm = reply.Term
rf.identity = FOLLOWER
return ok
}
rf.nextIndex[server] = args.LastIncludedIndex + 1
rf.matchIndex[server] = args.LastIncludedIndex
}
return ok
}
func (rf *Raft) InstallSnapshot(args InstallSnapshotArgs, reply *InstallSnapshotReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
reply.Term = rf.CurrentTerm
if args.Term < rf.CurrentTerm {
return
}
rf.persister.SaveSnapshot(args.Snapshot)
rf.lastIncludedIndex = args.LastIncludedIndex
rf.lastIncludedTerm = args.LastIncludedTerm
rf.CommitIndex = args.LastIncludedIndex
rf.lastApplied = args.LastIncludedIndex
var i int
for i = 0; i <= len(rf.Log)-1; i++ {
if rf.Log[i].Index == args.LastIncludedIndex && rf.Log[i].Term == args.LastIncludedTerm {
break
}
}
if i > len(rf.Log)-1 {
i = len(rf.Log) - 1
}
rf.Log = rf.Log[i+1:]
rf.persist()
msg := ApplyMsg{UseSnapshot: true, Snapshot: args.Snapshot}
rf.ApplyChan <- msg
}
//
// example RequestVote RPC arguments structure.
//
type RequestVoteArgs struct {
// Your data here.
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
//
type RequestVoteReply struct {
// Your data here.
Term int
VoteGranted bool
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {
// Your code here.
rf.mu.Lock()
defer rf.persist()
defer rf.mu.Unlock()
reply.Term = rf.CurrentTerm
if args.Term < rf.CurrentTerm {
reply.VoteGranted = false
return
}
if args.Term > rf.CurrentTerm {
rf.VotedFor = -1
rf.CurrentTerm = args.Term
rf.identity = FOLLOWER
}
if rf.VotedFor != -1 && rf.VotedFor != args.CandidateId {
reply.VoteGranted = false
return
}
var rfLogIndex int
var rfLogTerm int
if len(rf.Log) > 0 {
rfLogIndex = rf.Log[len(rf.Log)-1].Index
rfLogTerm = rf.Log[len(rf.Log)-1].Term
} else {
rfLogIndex = rf.lastIncludedIndex
rfLogTerm = rf.lastIncludedTerm
}
if args.LastLogTerm > rfLogTerm || args.LastLogTerm == rfLogTerm && args.LastLogIndex >= rfLogIndex {
reply.VoteGranted = true
rf.VotedFor = args.CandidateId
rf.identity = FOLLOWER
rf.hasVoted <- true
} else {
reply.VoteGranted = false
}
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// returns true if labrpc says the RPC was delivered.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply, once *sync.Once) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
if ok {
if rf.identity != CANDIDATE {
return ok
}
if reply.Term > rf.CurrentTerm {
rf.CurrentTerm = reply.Term
rf.identity = FOLLOWER
return ok
}
if reply.VoteGranted == true {
rf.votes++
//fmt.Println("peer", server, "vote peer", rf.me, "at term", rf.CurrentTerm)
if rf.votes > len(rf.peers)/2 {
once.Do(func() {
rf.hasBecomeLeader <- true
})
return ok
}
}
}
return ok
}
func (rf *Raft) sendRequestVoteToAll() {
var args RequestVoteArgs
var once sync.Once
args.Term = rf.CurrentTerm
args.CandidateId = rf.me
if len(rf.Log) > 0 {
args.LastLogIndex = rf.Log[len(rf.Log)-1].Index
args.LastLogTerm = rf.Log[len(rf.Log)-1].Term
} else {
args.LastLogIndex = rf.lastIncludedIndex
args.LastLogTerm = rf.lastIncludedTerm
}
for i := range rf.peers {
if i != rf.me && rf.identity == CANDIDATE {
go func(index int) {
var reply RequestVoteReply
//fmt.Println("peer", rf.me, "request vote from peer", index)
rf.sendRequestVote(index, args, &reply, &once)
}(i)
}
}
}
func (rf *Raft) eventloop() {
for rf.alive {
if rf.identity == LEADER { // if this raft peer is a leader now
rf.sendAppendEntriesToAll()
time.Sleep(time.Duration(50) * time.Millisecond)
} else if rf.identity == FOLLOWER { // if as a follower
select {
case <-rf.hasVoted:
case <-rf.hasAppended:
case <-time.After(time.Duration(rand.Intn(100)+500) * time.Millisecond):
rf.identity = CANDIDATE
}
} else if rf.identity == CANDIDATE { // as a candidate
rf.CurrentTerm++
//fmt.Println("peer", rf.me, "stands up as a candidate at term", rf.CurrentTerm)
rf.VotedFor = rf.me
rf.votes = 1
go rf.sendRequestVoteToAll()
select {
case <-rf.hasAppended:
rf.mu.Lock()
rf.identity = FOLLOWER
rf.mu.Unlock()
case <-rf.hasBecomeLeader:
//fmt.Println("peer", rf.me, "becomes leader at term", rf.CurrentTerm)
rf.mu.Lock()
rf.identity = LEADER
rf.nextIndex = make([]int, len(rf.peers))
rf.matchIndex = make([]int, len(rf.peers))
for i := range rf.nextIndex {
var newIndex int
if len(rf.Log) == 0 {
newIndex = rf.lastIncludedIndex + 1
} else {
newIndex = rf.Log[len(rf.Log)-1].Index + 1
}
rf.nextIndex[i] = newIndex
rf.matchIndex[i] = 0
}
rf.mu.Unlock()
case <-time.After(time.Duration(rand.Intn(100)+500) * time.Millisecond):
}
}
}
}
func (rf *Raft) commitloop() {
for rf.alive {
<-rf.commitNow
rf.mu.Lock()
for i := rf.lastApplied + 1; i <= rf.CommitIndex; i++ {
//fmt.Println("peer", rf.me, "apply entry", i)
//fmt.Println("peer", rf.me, "'s commitIndex:", rf.CommitIndex)
rf.lastApplied = i
var args ApplyMsg
args.Index = i
//if rf.IsLeader() {
// fmt.Println("Leader")
//}
//fmt.Println(rf.Log)
//fmt.Println("i:", i)
//fmt.Println("rf.Log[0].Index:", rf.Log[0].Index)
args.Command = rf.Log[i-rf.Log[0].Index].Command
rf.ApplyChan <- args
}
rf.mu.Unlock()
}
}
func (rf *Raft)
|
TakeSnapshot
|
identifier_name
|
|
raft.go
|
func (rf *Raft) RaftStateSize() int {
return rf.persister.RaftStateSize()
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here.
// Example:
w := new(bytes.Buffer)
e := gob.NewEncoder(w)
e.Encode(rf.CurrentTerm)
e.Encode(rf.VotedFor)
e.Encode(rf.Log)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
// Your code here.
// Example:
r := bytes.NewBuffer(data)
d := gob.NewDecoder(r)
rf.mu.Lock()
d.Decode(&rf.CurrentTerm)
d.Decode(&rf.VotedFor)
d.Decode(&rf.Log)
rf.mu.Unlock()
}
type AppendEntriesArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []LogEntry
LeaderCommit int
}
type AppendEntriesReply struct {
Term int
Success bool
NextIndex int
}
type InstallSnapshotArgs struct {
Term int
LeaderId int
LastIncludedIndex int
LastIncludedTerm int
Snapshot []byte
}
type InstallSnapshotReply struct {
Term int
}
func (rf *Raft) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
reply.Term = rf.CurrentTerm
if args.Term < rf.CurrentTerm { // not a legal leader
reply.Success = false
return
}
if rf.identity == FOLLOWER {
rf.hasAppended <- true
} else {
if args.Term > rf.CurrentTerm {
rf.identity = FOLLOWER
}
}
var lastIndex int
if len(rf.Log) == 0 {
lastIndex = rf.lastIncludedIndex
} else {
lastIndex = rf.Log[len(rf.Log)-1].Index
}
if args.PrevLogIndex > lastIndex || args.PrevLogIndex < rf.lastIncludedIndex {
reply.Success = false
reply.NextIndex = lastIndex + 1
return
}
if args.PrevLogIndex+len(args.Entries) <= rf.CommitIndex {
reply.Success = false
reply.NextIndex = rf.CommitIndex + 1
return
}
var lastTerm int
if len(rf.Log) == 0 || args.PrevLogIndex < rf.Log[0].Index {
lastTerm = rf.lastIncludedTerm
} else {
lastTerm = rf.Log[args.PrevLogIndex-rf.Log[0].Index].Term
}
if args.PrevLogTerm != lastTerm {
reply.Success = false
if len(rf.Log) == 0 {
return
}
for i := args.PrevLogIndex - 1; i >= rf.Log[0].Index; i-- {
if rf.Log[i-rf.Log[0].Index].Term != rf.Log[args.PrevLogIndex-rf.Log[0].Index].Term {
reply.NextIndex = i + 1
break
}
}
return
}
reply.Success = true
//if len(args.Entries) > 0 {
// fmt.Println("peer", rf.me, "receives entry", args.PrevLogIndex+1, "-", args.PrevLogIndex+len(args.Entries), "from leader", args.LeaderId)
//}
if len(rf.Log) > 0 {
rf.Log = rf.Log[:args.PrevLogIndex-rf.Log[0].Index+1]
}
rf.Log = append(rf.Log, args.Entries...)
rf.persist()
//fmt.Println("log of peer", rf.me, ":", rf.Log)
if len(rf.Log) > 0 && args.LeaderCommit > rf.CommitIndex {
//fmt.Println("args.LeaderCommit of", rf.me, ":", args.LeaderCommit)
//fmt.Println("rf.Log[len(rf.Log)-1].Index of", rf.me, ":", rf.Log[len(rf.Log)-1].Index)
if args.LeaderCommit < rf.Log[len(rf.Log)-1].Index {
rf.CommitIndex = args.LeaderCommit
} else {
rf.CommitIndex = rf.Log[len(rf.Log)-1].Index
}
rf.commitNow <- true
}
}
func (rf *Raft) sendAppendEntries(server int, args AppendEntriesArgs, reply *AppendEntriesReply) bool {
//if len(args.Entries) > 0 {
//fmt.Println("leader", rf.me, "send entry", args.PrevLogIndex+1, "-", args.PrevLogIndex+len(args.Entries), "to peer", server)
//}
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
if ok {
if rf.identity != LEADER {
return ok
}
if reply.Term > rf.CurrentTerm {
rf.CurrentTerm = reply.Term
rf.identity = FOLLOWER
return ok
}
if reply.Success == true {
rf.nextIndex[server] = args.PrevLogIndex + len(args.Entries) + 1
rf.matchIndex[server] = args.PrevLogIndex + len(args.Entries)
//fmt.Println("rf.matchIndex[", server, "]:", rf.matchIndex[server])
} else {
rf.nextIndex[server] = reply.NextIndex
}
}
return ok
}
func (rf *Raft) sendAppendEntriesToAll() {
rf.mu.Lock()
defer rf.mu.Unlock()
if len(rf.Log) > 0 {
for N := rf.Log[len(rf.Log)-1].Index; N > rf.CommitIndex; N-- {
count := 0
for j := range rf.peers {
if rf.matchIndex[j] >= N {
count++
}
}
//fmt.Println("count:", count)
//fmt.Println("len(rf.peers)/2:", len(rf.peers)/2)
//fmt.Println("rf.Log[N-rf.Log[0].Index].Term:", rf.Log[N-rf.Log[0].Index].Term)
//fmt.Println("rf.CurrentTerm:", rf.CurrentTerm)
if count > len(rf.peers)/2 && rf.Log[N-rf.Log[0].Index].Term <= rf.CurrentTerm {
rf.CommitIndex = N
rf.commitNow <- true
break
}
}
}
for i := range rf.peers {
if i != rf.me && rf.identity == LEADER {
if len(rf.Log) > 0 && rf.nextIndex[i] >= rf.Log[0].Index || len(rf.Log) == 0 && rf.nextIndex[i] > rf.lastIncludedIndex {
var args AppendEntriesArgs
args.Term = rf.CurrentTerm
args.LeaderId = rf.me
if len(rf.Log) == 0 || rf.nextIndex[i] == rf.Log[0].Index {
args.PrevLogIndex = rf.lastIncludedIndex
args.PrevLogTerm = rf.lastIncludedTerm
} else {
args.PrevLogIndex = rf.nextIndex[i] - 1
args.PrevLogTerm = rf.Log[rf.nextIndex[i]-1-rf.Log[0].Index].Term
}
if len(rf.Log) == 0 {
args.Entries = []LogEntry{}
} else {
args.Entries = rf.Log[rf.nextIndex[i]-rf.Log[0].Index:]
}
args.LeaderCommit = rf.CommitIndex
go func(index int, args AppendEntriesArgs) {
var reply AppendEntriesReply
rf.sendAppendEntries(index, args, &reply)
}(i, args)
} else {
var args InstallSnapshotArgs
args.Term = rf.CurrentTerm
args.LeaderId = rf.me
args.LastIncludedIndex = rf.lastIncludedIndex
args.LastIncludedTerm = rf.lastIncludedTerm
args.Snapshot = rf.persister.ReadSnapshot()
go func(index int, args InstallSnapshotArgs) {
var reply InstallSnapshotReply
rf.sendInstallSnapshot(index, args, &reply)
}(i, args)
}
}
}
}
func (rf *Raft) sendInstallSnapshot(server int, args InstallSnapshotArgs, reply *InstallSnapshotReply) bool {
ok := rf.peers[server].Call("Raft.InstallSnapshot", args, reply)
if ok {
if reply.Term > rf.CurrentTerm {
rf.CurrentTerm = reply.Term
rf.identity = FOLLOWER
return ok
}
rf.nextIndex[server] = args.LastIncludedIndex + 1
rf.matchIndex[server] = args.LastIncludedIndex
}
return ok
}
func (rf *Raft) InstallSnapshot(args InstallSnapshotArgs, reply *InstallSnapshotReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
reply.Term = rf.CurrentTerm
|
{
return rf.CurrentTerm, rf.identity == LEADER
}
|
identifier_body
|
|
raft.go
|
, args AppendEntriesArgs) {
var reply AppendEntriesReply
rf.sendAppendEntries(index, args, &reply)
}(i, args)
} else {
var args InstallSnapshotArgs
args.Term = rf.CurrentTerm
args.LeaderId = rf.me
args.LastIncludedIndex = rf.lastIncludedIndex
args.LastIncludedTerm = rf.lastIncludedTerm
args.Snapshot = rf.persister.ReadSnapshot()
go func(index int, args InstallSnapshotArgs) {
var reply InstallSnapshotReply
rf.sendInstallSnapshot(index, args, &reply)
}(i, args)
}
}
}
}
func (rf *Raft) sendInstallSnapshot(server int, args InstallSnapshotArgs, reply *InstallSnapshotReply) bool {
ok := rf.peers[server].Call("Raft.InstallSnapshot", args, reply)
if ok {
if reply.Term > rf.CurrentTerm {
rf.CurrentTerm = reply.Term
rf.identity = FOLLOWER
return ok
}
rf.nextIndex[server] = args.LastIncludedIndex + 1
rf.matchIndex[server] = args.LastIncludedIndex
}
return ok
}
func (rf *Raft) InstallSnapshot(args InstallSnapshotArgs, reply *InstallSnapshotReply) {
rf.mu.Lock()
defer rf.mu.Unlock()
reply.Term = rf.CurrentTerm
if args.Term < rf.CurrentTerm {
return
}
rf.persister.SaveSnapshot(args.Snapshot)
rf.lastIncludedIndex = args.LastIncludedIndex
rf.lastIncludedTerm = args.LastIncludedTerm
rf.CommitIndex = args.LastIncludedIndex
rf.lastApplied = args.LastIncludedIndex
var i int
for i = 0; i <= len(rf.Log)-1; i++ {
if rf.Log[i].Index == args.LastIncludedIndex && rf.Log[i].Term == args.LastIncludedTerm {
break
}
}
if i > len(rf.Log)-1 {
i = len(rf.Log) - 1
}
rf.Log = rf.Log[i+1:]
rf.persist()
msg := ApplyMsg{UseSnapshot: true, Snapshot: args.Snapshot}
rf.ApplyChan <- msg
}
//
// example RequestVote RPC arguments structure.
//
type RequestVoteArgs struct {
// Your data here.
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
//
type RequestVoteReply struct {
// Your data here.
Term int
VoteGranted bool
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {
// Your code here.
rf.mu.Lock()
defer rf.persist()
defer rf.mu.Unlock()
reply.Term = rf.CurrentTerm
if args.Term < rf.CurrentTerm {
reply.VoteGranted = false
return
}
if args.Term > rf.CurrentTerm {
rf.VotedFor = -1
rf.CurrentTerm = args.Term
rf.identity = FOLLOWER
}
if rf.VotedFor != -1 && rf.VotedFor != args.CandidateId {
reply.VoteGranted = false
return
}
var rfLogIndex int
var rfLogTerm int
if len(rf.Log) > 0 {
rfLogIndex = rf.Log[len(rf.Log)-1].Index
rfLogTerm = rf.Log[len(rf.Log)-1].Term
} else {
rfLogIndex = rf.lastIncludedIndex
rfLogTerm = rf.lastIncludedTerm
}
if args.LastLogTerm > rfLogTerm || args.LastLogTerm == rfLogTerm && args.LastLogIndex >= rfLogIndex {
reply.VoteGranted = true
rf.VotedFor = args.CandidateId
rf.identity = FOLLOWER
rf.hasVoted <- true
} else {
reply.VoteGranted = false
}
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// returns true if labrpc says the RPC was delivered.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply, once *sync.Once) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
if ok {
if rf.identity != CANDIDATE {
return ok
}
if reply.Term > rf.CurrentTerm {
rf.CurrentTerm = reply.Term
rf.identity = FOLLOWER
return ok
}
if reply.VoteGranted == true {
rf.votes++
//fmt.Println("peer", server, "vote peer", rf.me, "at term", rf.CurrentTerm)
if rf.votes > len(rf.peers)/2 {
once.Do(func() {
rf.hasBecomeLeader <- true
})
return ok
}
}
}
return ok
}
func (rf *Raft) sendRequestVoteToAll() {
var args RequestVoteArgs
var once sync.Once
args.Term = rf.CurrentTerm
args.CandidateId = rf.me
if len(rf.Log) > 0 {
args.LastLogIndex = rf.Log[len(rf.Log)-1].Index
args.LastLogTerm = rf.Log[len(rf.Log)-1].Term
} else {
args.LastLogIndex = rf.lastIncludedIndex
args.LastLogTerm = rf.lastIncludedTerm
}
for i := range rf.peers {
if i != rf.me && rf.identity == CANDIDATE {
go func(index int) {
var reply RequestVoteReply
//fmt.Println("peer", rf.me, "request vote from peer", index)
rf.sendRequestVote(index, args, &reply, &once)
}(i)
}
}
}
func (rf *Raft) eventloop() {
for rf.alive {
if rf.identity == LEADER { // if this raft peer is a leader now
rf.sendAppendEntriesToAll()
time.Sleep(time.Duration(50) * time.Millisecond)
} else if rf.identity == FOLLOWER { // if as a follower
select {
case <-rf.hasVoted:
case <-rf.hasAppended:
case <-time.After(time.Duration(rand.Intn(100)+500) * time.Millisecond):
rf.identity = CANDIDATE
}
} else if rf.identity == CANDIDATE { // as a candidate
rf.CurrentTerm++
//fmt.Println("peer", rf.me, "stands up as a candidate at term", rf.CurrentTerm)
rf.VotedFor = rf.me
rf.votes = 1
go rf.sendRequestVoteToAll()
select {
case <-rf.hasAppended:
rf.mu.Lock()
rf.identity = FOLLOWER
rf.mu.Unlock()
case <-rf.hasBecomeLeader:
//fmt.Println("peer", rf.me, "becomes leader at term", rf.CurrentTerm)
rf.mu.Lock()
rf.identity = LEADER
rf.nextIndex = make([]int, len(rf.peers))
rf.matchIndex = make([]int, len(rf.peers))
for i := range rf.nextIndex {
var newIndex int
if len(rf.Log) == 0 {
newIndex = rf.lastIncludedIndex + 1
} else {
newIndex = rf.Log[len(rf.Log)-1].Index + 1
}
rf.nextIndex[i] = newIndex
rf.matchIndex[i] = 0
}
rf.mu.Unlock()
case <-time.After(time.Duration(rand.Intn(100)+500) * time.Millisecond):
}
}
}
}
func (rf *Raft) commitloop() {
for rf.alive {
<-rf.commitNow
rf.mu.Lock()
for i := rf.lastApplied + 1; i <= rf.CommitIndex; i++
|
{
//fmt.Println("peer", rf.me, "apply entry", i)
//fmt.Println("peer", rf.me, "'s commitIndex:", rf.CommitIndex)
rf.lastApplied = i
var args ApplyMsg
args.Index = i
//if rf.IsLeader() {
// fmt.Println("Leader")
//}
//fmt.Println(rf.Log)
//fmt.Println("i:", i)
//fmt.Println("rf.Log[0].Index:", rf.Log[0].Index)
args.Command = rf.Log[i-rf.Log[0].Index].Command
rf.ApplyChan <- args
}
|
conditional_block
|
|
kinematics.py
|
< 0 :
angle = 2*np.pi - angle
return angle
return None
# Manages distances on a polynomial chain
class Polychain(object) :
# floating point precision
HVERSOR = np.array([1,0])
def set_chain(self, chain) :
'''
chain (array): each row is a point (x,y)
of the polygonal chain
'''
# ensure it is a numpy array
self.chain = np.array(chain[:])
# the length of the chain (number of vertices)
self.ln = len(self.chain)
# the chain must be greater than one point
if self.ln < 2 :
raise ValueError('Polychain initialized with only one point. Minimum required is two.')
# calculate segments lengths
self.seg_lens = [
np.linalg.norm( self.chain[x] - self.chain[x-1] ) \
for x in range(1, self.ln) ]
# calculate angles at the vertices
self.seg_angles = []
for x in range(1, self.ln ) :
if x == 1 :
ab = self.HVERSOR
else :
ab = self.chain[x-1] - self.chain[x-2]
bc = self.chain[x] - self.chain[x-1]
self.seg_angles.append(get_angle(ab, bc))
def autocollision(self, epsilon = 0.1, is_set_collinear=False):
self.intersect = None
(start, end) = self.chain[[1,-1]]
for x in range(1,len(self.chain) ) :
p = self.chain[x-1]
rp = self.chain[x]
r = rp - p
for y in range(1,len(self.chain) ) :
q = self.chain[y-1]
sq = self.chain[y]
s = sq - q
not_junction = np.all(p != sq) and np.all(q != rp)
if x!=y and not_junction :
rxs = np.linalg.norm(np.cross(r,s))
qpxr = np.linalg.norm(np.cross(q-p, r))
qpxs = np.linalg.norm(np.cross(q-p, s))
rxs_zero = abs(rxs) < epsilon
if is_set_collinear:
test_collinear = ( rxs_zero and qpxr < epsilon )
if test_collinear:
t0 = np.dot(q-p,r)/np.dot(r,r)
t1 = t0 + np.dot(s,r)/np.dot(r,r)
mint = min(t0, t1)
maxt = max(t0, t1)
if (mint > (0+epsilon) and mint < (1+epsilon)) \
or (maxt > (0+epsilon) and maxt < (1-epsilon)) \
or (mint <= (0+epsilon) and maxt >= (1-epsilon)):
return True
if not rxs_zero :
t = qpxs / rxs
u = qpxr / rxs
test_intersect = ((0)<t<(1)) and ((0)<u<(1))
if test_intersect:
self.intersect = p +t*r
return True
return False
def isPointInChain(self, point, epsilon = 0.1 ) :
'''
find out if a point belongs to the chain.
return: a list of distances correspoding to the the line
intersection with that point. Empty list if
the point does not belong to the chain
'''
distances = []
c = array(point)
for x in range(1,len(self.chain) ) :
a = self.chain[x-1]
b = self.chain[x]
# check if the point is within the same line
if np.all(c!=a) and np.all(c!=b) :
if np.linalg.norm(np.cross(b-a, c-a)) < epsilon :
abac = np.dot(b-a, c-a)
ab = np.dot(b-a, b-a)
if 0 <= abac <= ab :
distance = np.sum(self.seg_lens[:(x-1)])
distance += np.linalg.norm(point - self.chain[x-1])
distance = distance/sum(self.seg_lens)
distances.append( distance )
return distances
def
|
(self, distance) :
'''
get a point in the 2D space given a
distance from the first point of the chain
'''
if distance > 1 :
raise ValueError('distance must be a proportion of the polyline length (0,1)')
distance = sum(self.seg_lens)*distance
cum_ln = 0
for l in range(self.ln-1) :
s_ln = self.seg_lens[l]
if cum_ln <= distance <= cum_ln+s_ln :
break
cum_ln += self.seg_lens[l]
rel_ln = distance - cum_ln
return self.chain[l] + \
( rel_ln*np.cos( sum(self.seg_angles[:(l+1)]) ), \
rel_ln*np.sin( sum(self.seg_angles[:(l+1)]) ) )
return -1
def get_dense_chain(self, density) :
tot_len = self.get_length()
curr_len = 0
dense_chain = []
points = density
dense_chain.append(self.get_point( 0 ))
for x in range( density ) :
dense_chain.append(self.get_point( (1+x)/float(density+1) ))
dense_chain.append(self.get_point( 1 ))
return np.vstack(dense_chain)
def get_length(self) :
'''
return: the length of the current polyline
'''
return sum(self.seg_lens)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# ARM -----------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
class PID(object) :
def __init__(self, n=1, dt=0.1, Kp=0.1, Ki=0.9, Kd=0.001 ):
self.n = n
self.dt = dt
self.previous_error = np.zeros(n)
self.integral = np.zeros(n)
self.derivative = np.zeros(n)
self.setpoint = np.zeros(n)
self.output = np.zeros(n)
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
def reset(self):
n = self.n
self.previous_error = np.zeros(n)
self.integral = np.zeros(n)
self.derivative = np.zeros(n)
self.setpoint = np.zeros(n)
self.output = np.zeros(n)
def step(self, measured_value, setpoint=None):
if setpoint is not None:
self.setpoint = np.array(setpoint)
error = setpoint - measured_value
self.integral = self.integral + error*self.dt
self.derivative = (error - self.previous_error)/self.dt
self.output = self.Kp*error + \
self.Ki*self.integral + \
self.Kd*self.derivative
self.previous_error = error
return self.output
class Arm(object):
"""
Kinematics of a number_of_joint-degrees-of-freedom
2-dimensional arm.
Given the increment of joint angles calculate
the current positions of the edges of the
arm segments.
"""
def __init__(self,
number_of_joint = 3,
joint_lims = None,
segment_lengths = None,
origin = [0,0],
mirror = False
):
"""
number_of_joint: (int) number of joints
joint_angles (list): initial joint angles
joint_lims (list): joint angles limits
segment_lengths (list): length of arm segmens
origin (list): origin coords of arm
"""
self.mirror = mirror
self.number_of_joint = number_of_joint
# initialize lengths
if segment_lengths is None:
segment_lengths = np.ones(number_of_joint)
self.segment_lengths = np.array(segment_lengths)
# initialize limits
if joint_lims is None:
joint_lims = vstack([-np.ones(number_of_joint)*
pi,ones(number_of_joint)*pi]).T
self.joint_lims = np.array(joint_lims)
# set origin coords
self.origin = np.array(origin)
def get_joint_positions(self, joint_angles ):
"""
Finds the (x, y) coordinates
of each joint
joint_angles (vector): current angles of the joints
return (array): 'number of joint' [x,y] coordinates
"""
# current angles
res_joint_angles = joint_angles.copy()
# detect limits
maskminus= res_joint_angles > self.joint_lims[:,0]
maskplus = res_joint_angles < self.joint_lims[:,1]
res_joint_angles = res_joint_angles*(maskplus*maskminus)
res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )
res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )
# mirror
if self
|
get_point
|
identifier_name
|
kinematics.py
|
< 0 :
angle = 2*np.pi - angle
return angle
return None
# Manages distances on a polynomial chain
class Polychain(object) :
# floating point precision
HVERSOR = np.array([1,0])
def set_chain(self, chain) :
'''
chain (array): each row is a point (x,y)
of the polygonal chain
'''
# ensure it is a numpy array
self.chain = np.array(chain[:])
# the length of the chain (number of vertices)
self.ln = len(self.chain)
# the chain must be greater than one point
if self.ln < 2 :
raise ValueError('Polychain initialized with only one point. Minimum required is two.')
# calculate segments lengths
self.seg_lens = [
np.linalg.norm( self.chain[x] - self.chain[x-1] ) \
for x in range(1, self.ln) ]
# calculate angles at the vertices
self.seg_angles = []
for x in range(1, self.ln ) :
if x == 1 :
ab = self.HVERSOR
else :
ab = self.chain[x-1] - self.chain[x-2]
bc = self.chain[x] - self.chain[x-1]
self.seg_angles.append(get_angle(ab, bc))
def autocollision(self, epsilon = 0.1, is_set_collinear=False):
self.intersect = None
(start, end) = self.chain[[1,-1]]
for x in range(1,len(self.chain) ) :
p = self.chain[x-1]
rp = self.chain[x]
r = rp - p
for y in range(1,len(self.chain) ) :
q = self.chain[y-1]
sq = self.chain[y]
s = sq - q
not_junction = np.all(p != sq) and np.all(q != rp)
if x!=y and not_junction :
rxs = np.linalg.norm(np.cross(r,s))
qpxr = np.linalg.norm(np.cross(q-p, r))
qpxs = np.linalg.norm(np.cross(q-p, s))
rxs_zero = abs(rxs) < epsilon
if is_set_collinear:
test_collinear = ( rxs_zero and qpxr < epsilon )
if test_collinear:
t0 = np.dot(q-p,r)/np.dot(r,r)
t1 = t0 + np.dot(s,r)/np.dot(r,r)
mint = min(t0, t1)
maxt = max(t0, t1)
if (mint > (0+epsilon) and mint < (1+epsilon)) \
or (maxt > (0+epsilon) and maxt < (1-epsilon)) \
or (mint <= (0+epsilon) and maxt >= (1-epsilon)):
return True
if not rxs_zero :
t = qpxs / rxs
u = qpxr / rxs
test_intersect = ((0)<t<(1)) and ((0)<u<(1))
if test_intersect:
self.intersect = p +t*r
return True
return False
def isPointInChain(self, point, epsilon = 0.1 ) :
'''
find out if a point belongs to the chain.
return: a list of distances correspoding to the the line
intersection with that point. Empty list if
the point does not belong to the chain
'''
distances = []
c = array(point)
for x in range(1,len(self.chain) ) :
a = self.chain[x-1]
b = self.chain[x]
# check if the point is within the same line
if np.all(c!=a) and np.all(c!=b) :
if np.linalg.norm(np.cross(b-a, c-a)) < epsilon :
abac = np.dot(b-a, c-a)
ab = np.dot(b-a, b-a)
if 0 <= abac <= ab :
distance = np.sum(self.seg_lens[:(x-1)])
distance += np.linalg.norm(point - self.chain[x-1])
distance = distance/sum(self.seg_lens)
distances.append( distance )
return distances
def get_point(self, distance) :
'''
get a point in the 2D space given a
distance from the first point of the chain
'''
if distance > 1 :
raise ValueError('distance must be a proportion of the polyline length (0,1)')
distance = sum(self.seg_lens)*distance
cum_ln = 0
for l in range(self.ln-1) :
s_ln = self.seg_lens[l]
if cum_ln <= distance <= cum_ln+s_ln :
break
cum_ln += self.seg_lens[l]
rel_ln = distance - cum_ln
return self.chain[l] + \
( rel_ln*np.cos( sum(self.seg_angles[:(l+1)]) ), \
rel_ln*np.sin( sum(self.seg_angles[:(l+1)]) ) )
return -1
def get_dense_chain(self, density) :
tot_len = self.get_length()
curr_len = 0
dense_chain = []
points = density
dense_chain.append(self.get_point( 0 ))
for x in range( density ) :
dense_chain.append(self.get_point( (1+x)/float(density+1) ))
dense_chain.append(self.get_point( 1 ))
return np.vstack(dense_chain)
def get_length(self) :
|
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# ARM -----------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
class PID(object) :
def __init__(self, n=1, dt=0.1, Kp=0.1, Ki=0.9, Kd=0.001 ):
self.n = n
self.dt = dt
self.previous_error = np.zeros(n)
self.integral = np.zeros(n)
self.derivative = np.zeros(n)
self.setpoint = np.zeros(n)
self.output = np.zeros(n)
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
def reset(self):
n = self.n
self.previous_error = np.zeros(n)
self.integral = np.zeros(n)
self.derivative = np.zeros(n)
self.setpoint = np.zeros(n)
self.output = np.zeros(n)
def step(self, measured_value, setpoint=None):
if setpoint is not None:
self.setpoint = np.array(setpoint)
error = setpoint - measured_value
self.integral = self.integral + error*self.dt
self.derivative = (error - self.previous_error)/self.dt
self.output = self.Kp*error + \
self.Ki*self.integral + \
self.Kd*self.derivative
self.previous_error = error
return self.output
class Arm(object):
"""
Kinematics of a number_of_joint-degrees-of-freedom
2-dimensional arm.
Given the increment of joint angles calculate
the current positions of the edges of the
arm segments.
"""
def __init__(self,
number_of_joint = 3,
joint_lims = None,
segment_lengths = None,
origin = [0,0],
mirror = False
):
"""
number_of_joint: (int) number of joints
joint_angles (list): initial joint angles
joint_lims (list): joint angles limits
segment_lengths (list): length of arm segmens
origin (list): origin coords of arm
"""
self.mirror = mirror
self.number_of_joint = number_of_joint
# initialize lengths
if segment_lengths is None:
segment_lengths = np.ones(number_of_joint)
self.segment_lengths = np.array(segment_lengths)
# initialize limits
if joint_lims is None:
joint_lims = vstack([-np.ones(number_of_joint)*
pi,ones(number_of_joint)*pi]).T
self.joint_lims = np.array(joint_lims)
# set origin coords
self.origin = np.array(origin)
def get_joint_positions(self, joint_angles ):
"""
Finds the (x, y) coordinates
of each joint
joint_angles (vector): current angles of the joints
return (array): 'number of joint' [x,y] coordinates
"""
# current angles
res_joint_angles = joint_angles.copy()
# detect limits
maskminus= res_joint_angles > self.joint_lims[:,0]
maskplus = res_joint_angles < self.joint_lims[:,1]
res_joint_angles = res_joint_angles*(maskplus*maskminus)
res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )
res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )
# mirror
if self
|
'''
return: the length of the current polyline
'''
return sum(self.seg_lens)
|
identifier_body
|
kinematics.py
|
< 0 :
angle = 2*np.pi - angle
return angle
return None
# Manages distances on a polynomial chain
class Polychain(object) :
# floating point precision
HVERSOR = np.array([1,0])
def set_chain(self, chain) :
'''
chain (array): each row is a point (x,y)
of the polygonal chain
'''
# ensure it is a numpy array
self.chain = np.array(chain[:])
# the length of the chain (number of vertices)
self.ln = len(self.chain)
# the chain must be greater than one point
if self.ln < 2 :
raise ValueError('Polychain initialized with only one point. Minimum required is two.')
# calculate segments lengths
self.seg_lens = [
np.linalg.norm( self.chain[x] - self.chain[x-1] ) \
for x in range(1, self.ln) ]
# calculate angles at the vertices
self.seg_angles = []
for x in range(1, self.ln ) :
if x == 1 :
ab = self.HVERSOR
else :
ab = self.chain[x-1] - self.chain[x-2]
bc = self.chain[x] - self.chain[x-1]
self.seg_angles.append(get_angle(ab, bc))
def autocollision(self, epsilon = 0.1, is_set_collinear=False):
self.intersect = None
(start, end) = self.chain[[1,-1]]
for x in range(1,len(self.chain) ) :
p = self.chain[x-1]
rp = self.chain[x]
r = rp - p
for y in range(1,len(self.chain) ) :
q = self.chain[y-1]
sq = self.chain[y]
s = sq - q
not_junction = np.all(p != sq) and np.all(q != rp)
if x!=y and not_junction :
rxs = np.linalg.norm(np.cross(r,s))
qpxr = np.linalg.norm(np.cross(q-p, r))
qpxs = np.linalg.norm(np.cross(q-p, s))
rxs_zero = abs(rxs) < epsilon
if is_set_collinear:
test_collinear = ( rxs_zero and qpxr < epsilon )
if test_collinear:
t0 = np.dot(q-p,r)/np.dot(r,r)
t1 = t0 + np.dot(s,r)/np.dot(r,r)
mint = min(t0, t1)
maxt = max(t0, t1)
if (mint > (0+epsilon) and mint < (1+epsilon)) \
or (maxt > (0+epsilon) and maxt < (1-epsilon)) \
or (mint <= (0+epsilon) and maxt >= (1-epsilon)):
return True
if not rxs_zero :
t = qpxs / rxs
u = qpxr / rxs
test_intersect = ((0)<t<(1)) and ((0)<u<(1))
if test_intersect:
self.intersect = p +t*r
return True
return False
def isPointInChain(self, point, epsilon = 0.1 ) :
'''
find out if a point belongs to the chain.
return: a list of distances correspoding to the the line
intersection with that point. Empty list if
the point does not belong to the chain
'''
distances = []
c = array(point)
for x in range(1,len(self.chain) ) :
a = self.chain[x-1]
b = self.chain[x]
# check if the point is within the same line
if np.all(c!=a) and np.all(c!=b) :
if np.linalg.norm(np.cross(b-a, c-a)) < epsilon :
abac = np.dot(b-a, c-a)
ab = np.dot(b-a, b-a)
if 0 <= abac <= ab :
distance = np.sum(self.seg_lens[:(x-1)])
distance += np.linalg.norm(point - self.chain[x-1])
distance = distance/sum(self.seg_lens)
distances.append( distance )
return distances
def get_point(self, distance) :
'''
get a point in the 2D space given a
distance from the first point of the chain
'''
if distance > 1 :
raise ValueError('distance must be a proportion of the polyline length (0,1)')
distance = sum(self.seg_lens)*distance
cum_ln = 0
for l in range(self.ln-1) :
s_ln = self.seg_lens[l]
if cum_ln <= distance <= cum_ln+s_ln :
break
cum_ln += self.seg_lens[l]
rel_ln = distance - cum_ln
return self.chain[l] + \
( rel_ln*np.cos( sum(self.seg_angles[:(l+1)]) ), \
rel_ln*np.sin( sum(self.seg_angles[:(l+1)]) ) )
return -1
def get_dense_chain(self, density) :
tot_len = self.get_length()
curr_len = 0
dense_chain = []
points = density
dense_chain.append(self.get_point( 0 ))
for x in range( density ) :
dense_chain.append(self.get_point( (1+x)/float(density+1) ))
dense_chain.append(self.get_point( 1 ))
return np.vstack(dense_chain)
def get_length(self) :
'''
return: the length of the current polyline
'''
return sum(self.seg_lens)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# ARM -----------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
class PID(object) :
def __init__(self, n=1, dt=0.1, Kp=0.1, Ki=0.9, Kd=0.001 ):
self.n = n
self.dt = dt
self.previous_error = np.zeros(n)
self.integral = np.zeros(n)
self.derivative = np.zeros(n)
self.setpoint = np.zeros(n)
self.output = np.zeros(n)
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
def reset(self):
n = self.n
self.previous_error = np.zeros(n)
self.integral = np.zeros(n)
self.derivative = np.zeros(n)
self.setpoint = np.zeros(n)
self.output = np.zeros(n)
def step(self, measured_value, setpoint=None):
if setpoint is not None:
self.setpoint = np.array(setpoint)
error = setpoint - measured_value
self.integral = self.integral + error*self.dt
self.derivative = (error - self.previous_error)/self.dt
self.output = self.Kp*error + \
self.Ki*self.integral + \
self.Kd*self.derivative
self.previous_error = error
return self.output
class Arm(object):
"""
Kinematics of a number_of_joint-degrees-of-freedom
2-dimensional arm.
Given the increment of joint angles calculate
the current positions of the edges of the
arm segments.
"""
def __init__(self,
number_of_joint = 3,
joint_lims = None,
segment_lengths = None,
origin = [0,0],
|
"""
number_of_joint: (int) number of joints
joint_angles (list): initial joint angles
joint_lims (list): joint angles limits
segment_lengths (list): length of arm segmens
origin (list): origin coords of arm
"""
self.mirror = mirror
self.number_of_joint = number_of_joint
# initialize lengths
if segment_lengths is None:
segment_lengths = np.ones(number_of_joint)
self.segment_lengths = np.array(segment_lengths)
# initialize limits
if joint_lims is None:
joint_lims = vstack([-np.ones(number_of_joint)*
pi,ones(number_of_joint)*pi]).T
self.joint_lims = np.array(joint_lims)
# set origin coords
self.origin = np.array(origin)
def get_joint_positions(self, joint_angles ):
"""
Finds the (x, y) coordinates
of each joint
joint_angles (vector): current angles of the joints
return (array): 'number of joint' [x,y] coordinates
"""
# current angles
res_joint_angles = joint_angles.copy()
# detect limits
maskminus= res_joint_angles > self.joint_lims[:,0]
maskplus = res_joint_angles < self.joint_lims[:,1]
res_joint_angles = res_joint_angles*(maskplus*maskminus)
res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )
res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )
# mirror
if self.m
|
mirror = False
):
|
random_line_split
|
kinematics.py
|
# Manages distances on a polynomial chain
class Polychain(object) :
# floating point precision
HVERSOR = np.array([1,0])
def set_chain(self, chain) :
'''
chain (array): each row is a point (x,y)
of the polygonal chain
'''
# ensure it is a numpy array
self.chain = np.array(chain[:])
# the length of the chain (number of vertices)
self.ln = len(self.chain)
# the chain must be greater than one point
if self.ln < 2 :
raise ValueError('Polychain initialized with only one point. Minimum required is two.')
# calculate segments lengths
self.seg_lens = [
np.linalg.norm( self.chain[x] - self.chain[x-1] ) \
for x in range(1, self.ln) ]
# calculate angles at the vertices
self.seg_angles = []
for x in range(1, self.ln ) :
if x == 1 :
ab = self.HVERSOR
else :
ab = self.chain[x-1] - self.chain[x-2]
bc = self.chain[x] - self.chain[x-1]
self.seg_angles.append(get_angle(ab, bc))
def autocollision(self, epsilon = 0.1, is_set_collinear=False):
self.intersect = None
(start, end) = self.chain[[1,-1]]
for x in range(1,len(self.chain) ) :
p = self.chain[x-1]
rp = self.chain[x]
r = rp - p
for y in range(1,len(self.chain) ) :
q = self.chain[y-1]
sq = self.chain[y]
s = sq - q
not_junction = np.all(p != sq) and np.all(q != rp)
if x!=y and not_junction :
rxs = np.linalg.norm(np.cross(r,s))
qpxr = np.linalg.norm(np.cross(q-p, r))
qpxs = np.linalg.norm(np.cross(q-p, s))
rxs_zero = abs(rxs) < epsilon
if is_set_collinear:
test_collinear = ( rxs_zero and qpxr < epsilon )
if test_collinear:
t0 = np.dot(q-p,r)/np.dot(r,r)
t1 = t0 + np.dot(s,r)/np.dot(r,r)
mint = min(t0, t1)
maxt = max(t0, t1)
if (mint > (0+epsilon) and mint < (1+epsilon)) \
or (maxt > (0+epsilon) and maxt < (1-epsilon)) \
or (mint <= (0+epsilon) and maxt >= (1-epsilon)):
return True
if not rxs_zero :
t = qpxs / rxs
u = qpxr / rxs
test_intersect = ((0)<t<(1)) and ((0)<u<(1))
if test_intersect:
self.intersect = p +t*r
return True
return False
def isPointInChain(self, point, epsilon = 0.1 ) :
'''
find out if a point belongs to the chain.
return: a list of distances correspoding to the the line
intersection with that point. Empty list if
the point does not belong to the chain
'''
distances = []
c = array(point)
for x in range(1,len(self.chain) ) :
a = self.chain[x-1]
b = self.chain[x]
# check if the point is within the same line
if np.all(c!=a) and np.all(c!=b) :
if np.linalg.norm(np.cross(b-a, c-a)) < epsilon :
abac = np.dot(b-a, c-a)
ab = np.dot(b-a, b-a)
if 0 <= abac <= ab :
distance = np.sum(self.seg_lens[:(x-1)])
distance += np.linalg.norm(point - self.chain[x-1])
distance = distance/sum(self.seg_lens)
distances.append( distance )
return distances
def get_point(self, distance) :
'''
get a point in the 2D space given a
distance from the first point of the chain
'''
if distance > 1 :
raise ValueError('distance must be a proportion of the polyline length (0,1)')
distance = sum(self.seg_lens)*distance
cum_ln = 0
for l in range(self.ln-1) :
s_ln = self.seg_lens[l]
if cum_ln <= distance <= cum_ln+s_ln :
break
cum_ln += self.seg_lens[l]
rel_ln = distance - cum_ln
return self.chain[l] + \
( rel_ln*np.cos( sum(self.seg_angles[:(l+1)]) ), \
rel_ln*np.sin( sum(self.seg_angles[:(l+1)]) ) )
return -1
def get_dense_chain(self, density) :
tot_len = self.get_length()
curr_len = 0
dense_chain = []
points = density
dense_chain.append(self.get_point( 0 ))
for x in range( density ) :
dense_chain.append(self.get_point( (1+x)/float(density+1) ))
dense_chain.append(self.get_point( 1 ))
return np.vstack(dense_chain)
def get_length(self) :
'''
return: the length of the current polyline
'''
return sum(self.seg_lens)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# ARM -----------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
class PID(object) :
def __init__(self, n=1, dt=0.1, Kp=0.1, Ki=0.9, Kd=0.001 ):
self.n = n
self.dt = dt
self.previous_error = np.zeros(n)
self.integral = np.zeros(n)
self.derivative = np.zeros(n)
self.setpoint = np.zeros(n)
self.output = np.zeros(n)
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
def reset(self):
n = self.n
self.previous_error = np.zeros(n)
self.integral = np.zeros(n)
self.derivative = np.zeros(n)
self.setpoint = np.zeros(n)
self.output = np.zeros(n)
def step(self, measured_value, setpoint=None):
if setpoint is not None:
self.setpoint = np.array(setpoint)
error = setpoint - measured_value
self.integral = self.integral + error*self.dt
self.derivative = (error - self.previous_error)/self.dt
self.output = self.Kp*error + \
self.Ki*self.integral + \
self.Kd*self.derivative
self.previous_error = error
return self.output
class Arm(object):
"""
Kinematics of a number_of_joint-degrees-of-freedom
2-dimensional arm.
Given the increment of joint angles calculate
the current positions of the edges of the
arm segments.
"""
def __init__(self,
number_of_joint = 3,
joint_lims = None,
segment_lengths = None,
origin = [0,0],
mirror = False
):
"""
number_of_joint: (int) number of joints
joint_angles (list): initial joint angles
joint_lims (list): joint angles limits
segment_lengths (list): length of arm segmens
origin (list): origin coords of arm
"""
self.mirror = mirror
self.number_of_joint = number_of_joint
# initialize lengths
if segment_lengths is None:
segment_lengths = np.ones(number_of_joint)
self.segment_lengths = np.array(segment_lengths)
# initialize limits
if joint_lims is None:
joint_lims = vstack([-np.ones(number_of_joint)*
pi,ones(number_of_joint)*pi]).T
self.joint_lims = np.array(joint_lims)
# set origin coords
self.origin = np.array(origin)
def get_joint_positions(self, joint_angles ):
"""
Finds the (x, y) coordinates
of each joint
joint_angles (vector): current angles of the joints
return (array): 'number of joint' [x,y] coordinates
"""
# current angles
res_joint_angles = joint_angles.copy()
# detect limits
maskminus= res_joint_angles > self.joint_lims[:,0]
maskplus = res_joint_angles < self.joint_lims[:,1]
res_joint_angles = res_joint_angles*(maskplus*maskminus)
res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )
res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )
# mirror
if self.mirror :
|
res_joint_angles = -res_joint_angles
res_joint_angles[0] += np.pi
|
conditional_block
|
|
modeltranslator.go
|
.Process = *process
*dbSpans = append(*dbSpans, dbSpan)
}
}
return nil
}
func (c *Translator) spanWithoutProcess(span pdata.Span) (*dbmodel.Span, error) {
if span.IsNil() {
return nil, nil
}
traceID, err := convertTraceID(span.TraceID())
if err != nil {
return nil, err
}
spanID, err := convertSpanID(span.SpanID())
if err != nil {
return nil, err
}
references, err := references(span.Links(), span.ParentSpanID(), traceID)
if err != nil {
return nil, err
}
startTime := toTime(span.StartTime())
startTimeMicros := model.TimeAsEpochMicroseconds(startTime)
tags, tagMap := c.tags(span)
return &dbmodel.Span{
TraceID: traceID,
SpanID: spanID,
References: references,
OperationName: span.Name(),
StartTime: startTimeMicros,
StartTimeMillis: startTimeMicros / 1000,
Duration: model.DurationAsMicroseconds(toTime(span.EndTime()).Sub(startTime)),
Tags: tags,
Tag: tagMap,
Logs: logs(span.Events()),
}, nil
}
func toTime(nano pdata.TimestampUnixNano) time.Time {
return time.Unix(0, int64(nano)).UTC()
}
func references(links pdata.SpanLinkSlice, parentSpanID pdata.SpanID, traceID dbmodel.TraceID) ([]dbmodel.Reference, error) {
parentSpanIDSet := len(parentSpanID.Bytes()) != 0
if !parentSpanIDSet && links.Len() == 0 {
return emptyReferenceList, nil
}
refsCount := links.Len()
if parentSpanIDSet {
refsCount++
}
refs := make([]dbmodel.Reference, 0, refsCount)
// Put parent span ID at the first place because usually backends look for it
// as the first CHILD_OF item in the model.SpanRef slice.
if parentSpanIDSet {
jParentSpanID, err := convertSpanID(parentSpanID)
if err != nil {
return nil, fmt.Errorf("OC incorrect parent span ID: %v", err)
}
refs = append(refs, dbmodel.Reference{
TraceID: traceID,
SpanID: jParentSpanID,
RefType: dbmodel.ChildOf,
})
}
for i := 0; i < links.Len(); i++ {
link := links.At(i)
if link.IsNil() {
continue
}
traceID, err := convertTraceID(link.TraceID())
if err != nil {
continue // skip invalid link
}
spanID, err := convertSpanID(link.SpanID())
if err != nil {
continue // skip invalid link
}
refs = append(refs, dbmodel.Reference{
TraceID: traceID,
SpanID: spanID,
// Since Jaeger RefType is not captured in internal data,
// use SpanRefType_FOLLOWS_FROM by default.
// SpanRefType_CHILD_OF supposed to be set only from parentSpanID.
RefType: dbmodel.FollowsFrom,
})
}
return refs, nil
}
func convertSpanID(spanID pdata.SpanID) (dbmodel.SpanID, error) {
spanIDInt, err := tracetranslator.BytesToUInt64SpanID(spanID)
if err != nil {
return "", err
}
if spanIDInt == 0 {
return "", errZeroSpanID
}
return dbmodel.SpanID(fmt.Sprintf("%016x", spanIDInt)), nil
}
func convertTraceID(traceID pdata.TraceID) (dbmodel.TraceID, error) {
high, low, err := tracetranslator.BytesToUInt64TraceID(traceID)
if err != nil {
return "", err
}
if low == 0 && high == 0 {
return "", errZeroTraceID
}
return dbmodel.TraceID(traceIDToString(high, low)), nil
}
func traceIDToString(high, low uint64) string {
if high == 0 {
return fmt.Sprintf("%016x", low)
}
return fmt.Sprintf("%016x%016x", high, low)
}
func (c *Translator) process(resource pdata.Resource) *dbmodel.Process {
if resource.IsNil() || resource.Attributes().Len() == 0 {
return nil
}
p := &dbmodel.Process{}
attrs := resource.Attributes()
attrsCount := attrs.Len()
if serviceName, ok := attrs.Get(conventions.AttributeServiceName); ok {
p.ServiceName = serviceName.StringVal()
attrsCount--
}
if attrsCount == 0 {
return p
}
tags := make([]dbmodel.KeyValue, 0, attrsCount)
var tagMap map[string]interface{}
if c.allTagsAsFields || len(c.tagKeysAsFields) > 0 {
tagMap = make(map[string]interface{}, attrsCount)
}
tags, tagMap = c.appendTagsFromAttributes(tags, tagMap, attrs, true)
p.Tags = tags
if len(tagMap) > 0 {
p.Tag = tagMap
}
return p
}
func (c *Translator) tags(span pdata.Span) ([]dbmodel.KeyValue, map[string]interface{}) {
var spanKindTag, statusCodeTag, errorTag, statusMsgTag dbmodel.KeyValue
var spanKindTagFound, statusCodeTagFound, errorTagFound, statusMsgTagFound bool
tagsCount := span.Attributes().Len()
spanKindTag, spanKindTagFound = getTagFromSpanKind(span.Kind())
if spanKindTagFound {
tagsCount++
}
status := span.Status()
if !status.IsNil() {
statusCodeTag, statusCodeTagFound = getTagFromStatusCode(status.Code())
tagsCount++
errorTag, errorTagFound = getErrorTagFromStatusCode(status.Code())
if errorTagFound {
tagsCount++
}
statusMsgTag, statusMsgTagFound = getTagFromStatusMsg(status.Message())
if statusMsgTagFound {
tagsCount++
}
}
if tagsCount == 0 {
return emptyTagList, nil
}
tags := make([]dbmodel.KeyValue, 0, tagsCount)
var tagMap map[string]interface{}
if spanKindTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[spanKindTag.Key] {
tagMap = c.addToTagMap(spanKindTag.Key, spanKindTag.Value, tagMap)
} else {
tags = append(tags, spanKindTag)
}
}
if statusCodeTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[statusCodeTag.Key] {
tagMap = c.addToTagMap(statusCodeTag.Key, statusCodeTag.Value, tagMap)
} else {
tags = append(tags, statusCodeTag)
}
}
if errorTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[errorTag.Key] {
tagMap = c.addToTagMap(errorTag.Key, errorTag.Value, tagMap)
} else {
tags = append(tags, errorTag)
}
}
if statusMsgTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[statusMsgTag.Key] {
tagMap = c.addToTagMap(statusMsgTag.Key, statusMsgTag.Value, tagMap)
} else {
tags = append(tags, statusMsgTag)
}
}
return c.appendTagsFromAttributes(tags, tagMap, span.Attributes(), false)
}
func (c *Translator) addToTagMap(key string, val interface{}, tagMap map[string]interface{}) map[string]interface{} {
if tagMap == nil {
tagMap = map[string]interface{}{}
}
tagMap[strings.Replace(key, ".", c.tagDotReplacement, -1)] = val
return tagMap
}
func getTagFromSpanKind(spanKind pdata.SpanKind) (dbmodel.KeyValue, bool) {
var tagStr string
switch spanKind {
case pdata.SpanKindCLIENT:
tagStr = string(tracetranslator.OpenTracingSpanKindClient)
case pdata.SpanKindSERVER:
tagStr = string(tracetranslator.OpenTracingSpanKindServer)
case pdata.SpanKindPRODUCER:
tagStr = string(tracetranslator.OpenTracingSpanKindProducer)
case pdata.SpanKindCONSUMER:
tagStr = string(tracetranslator.OpenTracingSpanKindConsumer)
default:
return dbmodel.KeyValue{}, false
}
return dbmodel.KeyValue{
Key: tracetranslator.TagSpanKind,
Type: dbmodel.StringType,
Value: tagStr,
}, true
}
func getTagFromStatusCode(statusCode pdata.StatusCode) (dbmodel.KeyValue, bool) {
return dbmodel.KeyValue{
Key: tracetranslator.TagStatusCode,
// TODO is this ok?
Value: statusCode.String(),
Type: dbmodel.StringType,
}, true
}
func getErrorTagFromStatusCode(statusCode pdata.StatusCode) (dbmodel.KeyValue, bool) {
if statusCode == pdata.StatusCode(0)
|
{
return dbmodel.KeyValue{}, false
}
|
conditional_block
|
|
modeltranslator.go
|
// ConvertSpans converts spans from OTEL model to Jaeger Elasticsearch model
func (c *Translator) ConvertSpans(traces pdata.Traces) ([]*dbmodel.Span, error) {
rss := traces.ResourceSpans()
if rss.Len() == 0 {
return nil, nil
}
dbSpans := make([]*dbmodel.Span, 0, traces.SpanCount())
for i := 0; i < rss.Len(); i++ {
// this would correspond to a single batch
err := c.resourceSpans(rss.At(i), &dbSpans)
if err != nil {
return nil, err
}
}
return dbSpans, nil
}
func (c *Translator) resourceSpans(spans pdata.ResourceSpans, dbSpans *[]*dbmodel.Span) error {
ils := spans.InstrumentationLibrarySpans()
process := c.process(spans.Resource())
for i := 0; i < ils.Len(); i++ {
// TODO convert instrumentation library info
//ils.At(i).InstrumentationLibrary()
spans := ils.At(i).Spans()
for j := 0; j < spans.Len(); j++ {
dbSpan, err := c.spanWithoutProcess(spans.At(j))
if err != nil {
return err
}
dbSpan.Process = *process
*dbSpans = append(*dbSpans, dbSpan)
}
}
return nil
}
func (c *Translator) spanWithoutProcess(span pdata.Span) (*dbmodel.Span, error) {
if span.IsNil() {
return nil, nil
}
traceID, err := convertTraceID(span.TraceID())
if err != nil {
return nil, err
}
spanID, err := convertSpanID(span.SpanID())
if err != nil {
return nil, err
}
references, err := references(span.Links(), span.ParentSpanID(), traceID)
if err != nil {
return nil, err
}
startTime := toTime(span.StartTime())
startTimeMicros := model.TimeAsEpochMicroseconds(startTime)
tags, tagMap := c.tags(span)
return &dbmodel.Span{
TraceID: traceID,
SpanID: spanID,
References: references,
OperationName: span.Name(),
StartTime: startTimeMicros,
StartTimeMillis: startTimeMicros / 1000,
Duration: model.DurationAsMicroseconds(toTime(span.EndTime()).Sub(startTime)),
Tags: tags,
Tag: tagMap,
Logs: logs(span.Events()),
}, nil
}
func toTime(nano pdata.TimestampUnixNano) time.Time {
return time.Unix(0, int64(nano)).UTC()
}
func references(links pdata.SpanLinkSlice, parentSpanID pdata.SpanID, traceID dbmodel.TraceID) ([]dbmodel.Reference, error) {
parentSpanIDSet := len(parentSpanID.Bytes()) != 0
if !parentSpanIDSet && links.Len() == 0 {
return emptyReferenceList, nil
}
refsCount := links.Len()
if parentSpanIDSet {
refsCount++
}
refs := make([]dbmodel.Reference, 0, refsCount)
// Put parent span ID at the first place because usually backends look for it
// as the first CHILD_OF item in the model.SpanRef slice.
if parentSpanIDSet {
jParentSpanID, err := convertSpanID(parentSpanID)
if err != nil {
return nil, fmt.Errorf("OC incorrect parent span ID: %v", err)
}
refs = append(refs, dbmodel.Reference{
TraceID: traceID,
SpanID: jParentSpanID,
RefType: dbmodel.ChildOf,
})
}
for i := 0; i < links.Len(); i++ {
link := links.At(i)
if link.IsNil() {
continue
}
traceID, err := convertTraceID(link.TraceID())
if err != nil {
continue // skip invalid link
}
spanID, err := convertSpanID(link.SpanID())
if err != nil {
continue // skip invalid link
}
refs = append(refs, dbmodel.Reference{
TraceID: traceID,
SpanID: spanID,
// Since Jaeger RefType is not captured in internal data,
// use SpanRefType_FOLLOWS_FROM by default.
// SpanRefType_CHILD_OF supposed to be set only from parentSpanID.
RefType: dbmodel.FollowsFrom,
})
}
return refs, nil
}
func convertSpanID(spanID pdata.SpanID) (dbmodel.SpanID, error) {
spanIDInt, err := tracetranslator.BytesToUInt64SpanID(spanID)
if err != nil {
return "", err
}
if spanIDInt == 0 {
return "", errZeroSpanID
}
return dbmodel.SpanID(fmt.Sprintf("%016x", spanIDInt)), nil
}
func convertTraceID(traceID pdata.TraceID) (dbmodel.TraceID, error) {
high, low, err := tracetranslator.BytesToUInt64TraceID(traceID)
if err != nil {
return "", err
}
if low == 0 && high == 0 {
return "", errZeroTraceID
}
return dbmodel.TraceID(traceIDToString(high, low)), nil
}
func traceIDToString(high, low uint64) string {
if high == 0 {
return fmt.Sprintf("%016x", low)
}
return fmt.Sprintf("%016x%016x", high, low)
}
func (c *Translator) process(resource pdata.Resource) *dbmodel.Process {
if resource.IsNil() || resource.Attributes().Len() == 0 {
return nil
}
p := &dbmodel.Process{}
attrs := resource.Attributes()
attrsCount := attrs.Len()
if serviceName, ok := attrs.Get(conventions.AttributeServiceName); ok {
p.ServiceName = serviceName.StringVal()
attrsCount--
}
if attrsCount == 0 {
return p
}
tags := make([]dbmodel.KeyValue, 0, attrsCount)
var tagMap map[string]interface{}
if c.allTagsAsFields || len(c.tagKeysAsFields) > 0 {
tagMap = make(map[string]interface{}, attrsCount)
}
tags, tagMap = c.appendTagsFromAttributes(tags, tagMap, attrs, true)
p.Tags = tags
if len(tagMap) > 0 {
p.Tag = tagMap
}
return p
}
func (c *Translator) tags(span pdata.Span) ([]dbmodel.KeyValue, map[string]interface{}) {
var spanKindTag, statusCodeTag, errorTag, statusMsgTag dbmodel.KeyValue
var spanKindTagFound, statusCodeTagFound, errorTagFound, statusMsgTagFound bool
tagsCount := span.Attributes().Len()
spanKindTag, spanKindTagFound = getTagFromSpanKind(span.Kind())
if spanKindTagFound {
tagsCount++
}
status := span.Status()
if !status.IsNil() {
statusCodeTag, statusCodeTagFound = getTagFromStatusCode(status.Code())
tagsCount++
errorTag, errorTagFound = getErrorTagFromStatusCode(status.Code())
if errorTagFound {
tagsCount++
}
statusMsgTag, statusMsgTagFound = getTagFromStatusMsg(status.Message())
if statusMsgTagFound {
tagsCount++
}
}
if tagsCount == 0 {
return emptyTagList, nil
}
tags := make([]dbmodel.KeyValue, 0, tagsCount)
var tagMap map[string]interface{}
if spanKindTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[spanKindTag.Key] {
tagMap = c.addToTagMap(spanKindTag.Key, spanKindTag.Value, tagMap)
} else {
tags = append(tags, spanKindTag)
}
}
if statusCodeTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[statusCodeTag.Key] {
tagMap = c.addToTagMap(statusCodeTag.Key, statusCodeTag.Value, tagMap)
} else {
tags = append(tags, statusCodeTag)
}
}
if errorTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[errorTag.Key] {
tagMap = c.addToTagMap(errorTag.Key, errorTag.Value, tagMap)
} else {
tags = append(tags, errorTag)
}
}
if statusMsgTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[statusMsgTag.Key] {
tagMap = c.addToTagMap(statusMsgTag.Key, statusMsgTag.Value, tagMap)
} else {
tags = append(tags, statusMsgTag)
}
}
return c.append
|
{
tagsKeysAsFieldsMap := map[string]bool{}
for _, v := range tagsKeysAsFields {
tagsKeysAsFieldsMap[v] = true
}
return &Translator{
allTagsAsFields: allTagsAsFields,
tagKeysAsFields: tagsKeysAsFieldsMap,
tagDotReplacement: tagDotReplacement,
}
}
|
identifier_body
|
|
modeltranslator.go
|
traceID,
SpanID: spanID,
// Since Jaeger RefType is not captured in internal data,
// use SpanRefType_FOLLOWS_FROM by default.
// SpanRefType_CHILD_OF supposed to be set only from parentSpanID.
RefType: dbmodel.FollowsFrom,
})
}
return refs, nil
}
func convertSpanID(spanID pdata.SpanID) (dbmodel.SpanID, error) {
spanIDInt, err := tracetranslator.BytesToUInt64SpanID(spanID)
if err != nil {
return "", err
}
if spanIDInt == 0 {
return "", errZeroSpanID
}
return dbmodel.SpanID(fmt.Sprintf("%016x", spanIDInt)), nil
}
func convertTraceID(traceID pdata.TraceID) (dbmodel.TraceID, error) {
high, low, err := tracetranslator.BytesToUInt64TraceID(traceID)
if err != nil {
return "", err
}
if low == 0 && high == 0 {
return "", errZeroTraceID
}
return dbmodel.TraceID(traceIDToString(high, low)), nil
}
func traceIDToString(high, low uint64) string {
if high == 0 {
return fmt.Sprintf("%016x", low)
}
return fmt.Sprintf("%016x%016x", high, low)
}
func (c *Translator) process(resource pdata.Resource) *dbmodel.Process {
if resource.IsNil() || resource.Attributes().Len() == 0 {
return nil
}
p := &dbmodel.Process{}
attrs := resource.Attributes()
attrsCount := attrs.Len()
if serviceName, ok := attrs.Get(conventions.AttributeServiceName); ok {
p.ServiceName = serviceName.StringVal()
attrsCount--
}
if attrsCount == 0 {
return p
}
tags := make([]dbmodel.KeyValue, 0, attrsCount)
var tagMap map[string]interface{}
if c.allTagsAsFields || len(c.tagKeysAsFields) > 0 {
tagMap = make(map[string]interface{}, attrsCount)
}
tags, tagMap = c.appendTagsFromAttributes(tags, tagMap, attrs, true)
p.Tags = tags
if len(tagMap) > 0 {
p.Tag = tagMap
}
return p
}
func (c *Translator) tags(span pdata.Span) ([]dbmodel.KeyValue, map[string]interface{}) {
var spanKindTag, statusCodeTag, errorTag, statusMsgTag dbmodel.KeyValue
var spanKindTagFound, statusCodeTagFound, errorTagFound, statusMsgTagFound bool
tagsCount := span.Attributes().Len()
spanKindTag, spanKindTagFound = getTagFromSpanKind(span.Kind())
if spanKindTagFound {
tagsCount++
}
status := span.Status()
if !status.IsNil() {
statusCodeTag, statusCodeTagFound = getTagFromStatusCode(status.Code())
tagsCount++
errorTag, errorTagFound = getErrorTagFromStatusCode(status.Code())
if errorTagFound {
tagsCount++
}
statusMsgTag, statusMsgTagFound = getTagFromStatusMsg(status.Message())
if statusMsgTagFound {
tagsCount++
}
}
if tagsCount == 0 {
return emptyTagList, nil
}
tags := make([]dbmodel.KeyValue, 0, tagsCount)
var tagMap map[string]interface{}
if spanKindTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[spanKindTag.Key] {
tagMap = c.addToTagMap(spanKindTag.Key, spanKindTag.Value, tagMap)
} else {
tags = append(tags, spanKindTag)
}
}
if statusCodeTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[statusCodeTag.Key] {
tagMap = c.addToTagMap(statusCodeTag.Key, statusCodeTag.Value, tagMap)
} else {
tags = append(tags, statusCodeTag)
}
}
if errorTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[errorTag.Key] {
tagMap = c.addToTagMap(errorTag.Key, errorTag.Value, tagMap)
} else {
tags = append(tags, errorTag)
}
}
if statusMsgTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[statusMsgTag.Key] {
tagMap = c.addToTagMap(statusMsgTag.Key, statusMsgTag.Value, tagMap)
} else {
tags = append(tags, statusMsgTag)
}
}
return c.appendTagsFromAttributes(tags, tagMap, span.Attributes(), false)
}
func (c *Translator) addToTagMap(key string, val interface{}, tagMap map[string]interface{}) map[string]interface{} {
if tagMap == nil {
tagMap = map[string]interface{}{}
}
tagMap[strings.Replace(key, ".", c.tagDotReplacement, -1)] = val
return tagMap
}
func getTagFromSpanKind(spanKind pdata.SpanKind) (dbmodel.KeyValue, bool) {
var tagStr string
switch spanKind {
case pdata.SpanKindCLIENT:
tagStr = string(tracetranslator.OpenTracingSpanKindClient)
case pdata.SpanKindSERVER:
tagStr = string(tracetranslator.OpenTracingSpanKindServer)
case pdata.SpanKindPRODUCER:
tagStr = string(tracetranslator.OpenTracingSpanKindProducer)
case pdata.SpanKindCONSUMER:
tagStr = string(tracetranslator.OpenTracingSpanKindConsumer)
default:
return dbmodel.KeyValue{}, false
}
return dbmodel.KeyValue{
Key: tracetranslator.TagSpanKind,
Type: dbmodel.StringType,
Value: tagStr,
}, true
}
func getTagFromStatusCode(statusCode pdata.StatusCode) (dbmodel.KeyValue, bool) {
return dbmodel.KeyValue{
Key: tracetranslator.TagStatusCode,
// TODO is this ok?
Value: statusCode.String(),
Type: dbmodel.StringType,
}, true
}
func getErrorTagFromStatusCode(statusCode pdata.StatusCode) (dbmodel.KeyValue, bool) {
if statusCode == pdata.StatusCode(0) {
return dbmodel.KeyValue{}, false
}
return dbmodel.KeyValue{
Key: tracetranslator.TagError,
Value: "true",
Type: dbmodel.BoolType,
}, true
}
func getTagFromStatusMsg(statusMsg string) (dbmodel.KeyValue, bool) {
if statusMsg == "" {
return dbmodel.KeyValue{}, false
}
return dbmodel.KeyValue{
Key: tracetranslator.TagStatusMsg,
Value: statusMsg,
Type: dbmodel.StringType,
}, true
}
func logs(events pdata.SpanEventSlice) []dbmodel.Log {
if events.Len() == 0 {
return emptyLogList
}
logs := make([]dbmodel.Log, 0, events.Len())
for i := 0; i < events.Len(); i++ {
event := events.At(i)
if event.IsNil() {
continue
}
var fields []dbmodel.KeyValue
if event.Attributes().Len() > 0 {
fields = make([]dbmodel.KeyValue, 0, event.Attributes().Len()+1)
if event.Name() != "" {
fields = append(fields, dbmodel.KeyValue{Key: eventNameKey, Value: event.Name(), Type: dbmodel.StringType})
}
event.Attributes().ForEach(func(k string, v pdata.AttributeValue) {
fields = append(fields, attributeToKeyValue(k, v))
})
}
logs = append(logs, dbmodel.Log{
Timestamp: model.TimeAsEpochMicroseconds(toTime(event.Timestamp())),
Fields: fields,
})
}
return logs
}
func (c *Translator) appendTagsFromAttributes(tags []dbmodel.KeyValue, tagMap map[string]interface{}, attrs pdata.AttributeMap, skipService bool) ([]dbmodel.KeyValue, map[string]interface{}) {
attrs.ForEach(func(key string, attr pdata.AttributeValue) {
if skipService && key == conventions.AttributeServiceName {
return
}
if c.allTagsAsFields || c.tagKeysAsFields[key] {
tagMap = c.addToTagMap(key, attributeValueToInterface(attr), tagMap)
} else {
tags = append(tags, attributeToKeyValue(key, attr))
}
})
return tags, tagMap
}
func attributeToKeyValue(key string, attr pdata.AttributeValue) dbmodel.KeyValue {
tag := dbmodel.KeyValue{
Key: key,
}
switch attr.Type() {
case pdata.AttributeValueSTRING:
tag.Type = dbmodel.StringType
tag.Value = attr.StringVal()
case pdata.AttributeValueBOOL:
tag.Type = dbmodel.BoolType
if attr.BoolVal() {
tag.Value = "true"
} else {
tag.Value = "false"
}
case pdata.AttributeValueINT:
tag.Type = dbmodel.Int64Type
|
tag.Value = strconv.FormatInt(attr.IntVal(), 10)
case pdata.AttributeValueDOUBLE:
tag.Type = dbmodel.Float64Type
tag.Value = strconv.FormatFloat(attr.DoubleVal(), 'g', 10, 64)
}
|
random_line_split
|
|
modeltranslator.go
|
}
references, err := references(span.Links(), span.ParentSpanID(), traceID)
if err != nil {
return nil, err
}
startTime := toTime(span.StartTime())
startTimeMicros := model.TimeAsEpochMicroseconds(startTime)
tags, tagMap := c.tags(span)
return &dbmodel.Span{
TraceID: traceID,
SpanID: spanID,
References: references,
OperationName: span.Name(),
StartTime: startTimeMicros,
StartTimeMillis: startTimeMicros / 1000,
Duration: model.DurationAsMicroseconds(toTime(span.EndTime()).Sub(startTime)),
Tags: tags,
Tag: tagMap,
Logs: logs(span.Events()),
}, nil
}
func toTime(nano pdata.TimestampUnixNano) time.Time {
return time.Unix(0, int64(nano)).UTC()
}
func references(links pdata.SpanLinkSlice, parentSpanID pdata.SpanID, traceID dbmodel.TraceID) ([]dbmodel.Reference, error) {
parentSpanIDSet := len(parentSpanID.Bytes()) != 0
if !parentSpanIDSet && links.Len() == 0 {
return emptyReferenceList, nil
}
refsCount := links.Len()
if parentSpanIDSet {
refsCount++
}
refs := make([]dbmodel.Reference, 0, refsCount)
// Put parent span ID at the first place because usually backends look for it
// as the first CHILD_OF item in the model.SpanRef slice.
if parentSpanIDSet {
jParentSpanID, err := convertSpanID(parentSpanID)
if err != nil {
return nil, fmt.Errorf("OC incorrect parent span ID: %v", err)
}
refs = append(refs, dbmodel.Reference{
TraceID: traceID,
SpanID: jParentSpanID,
RefType: dbmodel.ChildOf,
})
}
for i := 0; i < links.Len(); i++ {
link := links.At(i)
if link.IsNil() {
continue
}
traceID, err := convertTraceID(link.TraceID())
if err != nil {
continue // skip invalid link
}
spanID, err := convertSpanID(link.SpanID())
if err != nil {
continue // skip invalid link
}
refs = append(refs, dbmodel.Reference{
TraceID: traceID,
SpanID: spanID,
// Since Jaeger RefType is not captured in internal data,
// use SpanRefType_FOLLOWS_FROM by default.
// SpanRefType_CHILD_OF supposed to be set only from parentSpanID.
RefType: dbmodel.FollowsFrom,
})
}
return refs, nil
}
func convertSpanID(spanID pdata.SpanID) (dbmodel.SpanID, error) {
spanIDInt, err := tracetranslator.BytesToUInt64SpanID(spanID)
if err != nil {
return "", err
}
if spanIDInt == 0 {
return "", errZeroSpanID
}
return dbmodel.SpanID(fmt.Sprintf("%016x", spanIDInt)), nil
}
func convertTraceID(traceID pdata.TraceID) (dbmodel.TraceID, error) {
high, low, err := tracetranslator.BytesToUInt64TraceID(traceID)
if err != nil {
return "", err
}
if low == 0 && high == 0 {
return "", errZeroTraceID
}
return dbmodel.TraceID(traceIDToString(high, low)), nil
}
func traceIDToString(high, low uint64) string {
if high == 0 {
return fmt.Sprintf("%016x", low)
}
return fmt.Sprintf("%016x%016x", high, low)
}
func (c *Translator) process(resource pdata.Resource) *dbmodel.Process {
if resource.IsNil() || resource.Attributes().Len() == 0 {
return nil
}
p := &dbmodel.Process{}
attrs := resource.Attributes()
attrsCount := attrs.Len()
if serviceName, ok := attrs.Get(conventions.AttributeServiceName); ok {
p.ServiceName = serviceName.StringVal()
attrsCount--
}
if attrsCount == 0 {
return p
}
tags := make([]dbmodel.KeyValue, 0, attrsCount)
var tagMap map[string]interface{}
if c.allTagsAsFields || len(c.tagKeysAsFields) > 0 {
tagMap = make(map[string]interface{}, attrsCount)
}
tags, tagMap = c.appendTagsFromAttributes(tags, tagMap, attrs, true)
p.Tags = tags
if len(tagMap) > 0 {
p.Tag = tagMap
}
return p
}
func (c *Translator) tags(span pdata.Span) ([]dbmodel.KeyValue, map[string]interface{}) {
var spanKindTag, statusCodeTag, errorTag, statusMsgTag dbmodel.KeyValue
var spanKindTagFound, statusCodeTagFound, errorTagFound, statusMsgTagFound bool
tagsCount := span.Attributes().Len()
spanKindTag, spanKindTagFound = getTagFromSpanKind(span.Kind())
if spanKindTagFound {
tagsCount++
}
status := span.Status()
if !status.IsNil() {
statusCodeTag, statusCodeTagFound = getTagFromStatusCode(status.Code())
tagsCount++
errorTag, errorTagFound = getErrorTagFromStatusCode(status.Code())
if errorTagFound {
tagsCount++
}
statusMsgTag, statusMsgTagFound = getTagFromStatusMsg(status.Message())
if statusMsgTagFound {
tagsCount++
}
}
if tagsCount == 0 {
return emptyTagList, nil
}
tags := make([]dbmodel.KeyValue, 0, tagsCount)
var tagMap map[string]interface{}
if spanKindTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[spanKindTag.Key] {
tagMap = c.addToTagMap(spanKindTag.Key, spanKindTag.Value, tagMap)
} else {
tags = append(tags, spanKindTag)
}
}
if statusCodeTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[statusCodeTag.Key] {
tagMap = c.addToTagMap(statusCodeTag.Key, statusCodeTag.Value, tagMap)
} else {
tags = append(tags, statusCodeTag)
}
}
if errorTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[errorTag.Key] {
tagMap = c.addToTagMap(errorTag.Key, errorTag.Value, tagMap)
} else {
tags = append(tags, errorTag)
}
}
if statusMsgTagFound {
if c.allTagsAsFields || c.tagKeysAsFields[statusMsgTag.Key] {
tagMap = c.addToTagMap(statusMsgTag.Key, statusMsgTag.Value, tagMap)
} else {
tags = append(tags, statusMsgTag)
}
}
return c.appendTagsFromAttributes(tags, tagMap, span.Attributes(), false)
}
func (c *Translator) addToTagMap(key string, val interface{}, tagMap map[string]interface{}) map[string]interface{} {
if tagMap == nil {
tagMap = map[string]interface{}{}
}
tagMap[strings.Replace(key, ".", c.tagDotReplacement, -1)] = val
return tagMap
}
func getTagFromSpanKind(spanKind pdata.SpanKind) (dbmodel.KeyValue, bool) {
var tagStr string
switch spanKind {
case pdata.SpanKindCLIENT:
tagStr = string(tracetranslator.OpenTracingSpanKindClient)
case pdata.SpanKindSERVER:
tagStr = string(tracetranslator.OpenTracingSpanKindServer)
case pdata.SpanKindPRODUCER:
tagStr = string(tracetranslator.OpenTracingSpanKindProducer)
case pdata.SpanKindCONSUMER:
tagStr = string(tracetranslator.OpenTracingSpanKindConsumer)
default:
return dbmodel.KeyValue{}, false
}
return dbmodel.KeyValue{
Key: tracetranslator.TagSpanKind,
Type: dbmodel.StringType,
Value: tagStr,
}, true
}
func getTagFromStatusCode(statusCode pdata.StatusCode) (dbmodel.KeyValue, bool) {
return dbmodel.KeyValue{
Key: tracetranslator.TagStatusCode,
// TODO is this ok?
Value: statusCode.String(),
Type: dbmodel.StringType,
}, true
}
func getErrorTagFromStatusCode(statusCode pdata.StatusCode) (dbmodel.KeyValue, bool) {
if statusCode == pdata.StatusCode(0) {
return dbmodel.KeyValue{}, false
}
return dbmodel.KeyValue{
Key: tracetranslator.TagError,
Value: "true",
Type: dbmodel.BoolType,
}, true
}
func getTagFromStatusMsg(statusMsg string) (dbmodel.KeyValue, bool) {
if statusMsg == "" {
return dbmodel.KeyValue{}, false
}
return dbmodel.KeyValue{
Key: tracetranslator.TagStatusMsg,
Value: statusMsg,
Type: dbmodel.StringType,
}, true
}
func
|
logs
|
identifier_name
|
|
bosh_exporter.go
|
.uaa.client-secret", "",
"BOSH UAA Client Secret ($BOSH_EXPORTER_BOSH_UAA_CLIENT_SECRET).",
)
boshLogLevel = flag.String(
"bosh.log-level", "ERROR",
"BOSH Log Level ($BOSH_EXPORTER_BOSH_LOG_LEVEL).",
)
boshCACertFile = flag.String(
"bosh.ca-cert-file", "",
"BOSH CA Certificate file ($BOSH_EXPORTER_BOSH_CA_CERT_FILE).",
)
filterDeployments = flag.String(
"filter.deployments", "",
"Comma separated deployments to filter ($BOSH_EXPORTER_FILTER_DEPLOYMENTS).",
)
filterAZs = flag.String(
"filter.azs", "",
"Comma separated AZs to filter ($BOSH_EXPORTER_FILTER_AZS).",
)
filterCollectors = flag.String(
"filter.collectors", "",
"Comma separated collectors to filter (Deployments,Jobs,ServiceDiscovery) ($BOSH_EXPORTER_FILTER_COLLECTORS).",
)
metricsNamespace = flag.String(
"metrics.namespace", "bosh",
"Metrics Namespace ($BOSH_EXPORTER_METRICS_NAMESPACE).",
)
metricsEnvironment = flag.String(
"metrics.environment", "",
"Environment label to be attached to metrics ($BOSH_EXPORTER_METRICS_ENVIRONMENT).",
)
sdFilename = flag.String(
"sd.filename", "bosh_target_groups.json",
"Full path to the Service Discovery output file ($BOSH_EXPORTER_SD_FILENAME).",
)
sdProcessesRegexp = flag.String(
"sd.processes_regexp", "",
"Regexp to filter Service Discovery processes names ($BOSH_EXPORTER_SD_PROCESSES_REGEXP).",
)
showVersion = flag.Bool(
"version", false,
"Print version information.",
)
listenAddress = flag.String(
"web.listen-address", ":9190",
"Address to listen on for web interface and telemetry ($BOSH_EXPORTER_WEB_LISTEN_ADDRESS).",
)
metricsPath = flag.String(
"web.telemetry-path", "/metrics",
"Path under which to expose Prometheus metrics ($BOSH_EXPORTER_WEB_TELEMETRY_PATH).",
)
authUsername = flag.String(
"web.auth.username", "",
"Username for web interface basic auth ($BOSH_EXPORTER_WEB_AUTH_USERNAME).",
)
authPassword = flag.String(
"web.auth.password", "",
"Password for web interface basic auth ($BOSH_EXPORTER_WEB_AUTH_PASSWORD).",
)
tlsCertFile = flag.String(
"web.tls.cert_file", "",
"Path to a file that contains the TLS certificate (PEM format). If the certificate is signed by a certificate authority, the file should be the concatenation of the server's certificate, any intermediates, and the CA's certificate ($BOSH_EXPORTER_WEB_TLS_CERTFILE).",
)
tlsKeyFile = flag.String(
"web.tls.key_file", "",
"Path to a file that contains the TLS private key (PEM format) ($BOSH_EXPORTER_WEB_TLS_KEYFILE).",
)
)
func init() {
prometheus.MustRegister(version.NewCollector(*metricsNamespace))
}
func overrideFlagsWithEnvVars() {
overrideWithEnvVar("BOSH_EXPORTER_BOSH_URL", boshURL)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_USERNAME", boshUsername)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_PASSWORD", boshPassword)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_UAA_CLIENT_ID", boshUAAClientID)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_UAA_CLIENT_SECRET", boshUAAClientSecret)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_LOG_LEVEL", boshLogLevel)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_CA_CERT_FILE", boshCACertFile)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_DEPLOYMENTS", filterDeployments)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_AZS", filterAZs)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_COLLECTORS", filterCollectors)
overrideWithEnvVar("BOSH_EXPORTER_METRICS_NAMESPACE", metricsNamespace)
overrideWithEnvVar("BOSH_EXPORTER_METRICS_ENVIRONMENT", metricsEnvironment)
overrideWithEnvVar("BOSH_EXPORTER_SD_FILENAME", sdFilename)
overrideWithEnvVar("BOSH_EXPORTER_SD_PROCESSES_REGEXP", sdProcessesRegexp)
overrideWithEnvVar("BOSH_EXPORTER_WEB_LISTEN_ADDRESS", listenAddress)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TELEMETRY_PATH", metricsPath)
overrideWithEnvVar("BOSH_EXPORTER_WEB_AUTH_USERNAME", authUsername)
overrideWithEnvVar("BOSH_EXPORTER_WEB_AUTH_PASSWORD", authPassword)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TLS_CERTFILE", tlsCertFile)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TLS_KEYFILE", tlsKeyFile)
}
func overrideWithEnvVar(name string, value *string) {
envValue := os.Getenv(name)
if envValue != "" {
*value = envValue
}
}
type basicAuthHandler struct {
handler http.HandlerFunc
username string
password string
}
func (h *basicAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok || username != h.username || password != h.password {
log.Errorf("Invalid HTTP auth from `%s`", r.RemoteAddr)
w.Header().Set("WWW-Authenticate", "Basic realm=\"metrics\"")
http.Error(w, "Invalid username or password", http.StatusUnauthorized)
return
}
h.handler(w, r)
return
}
func prometheusHandler() http.Handler {
handler := prometheus.Handler()
if *authUsername != "" && *authPassword != "" {
handler = &basicAuthHandler{
handler: prometheus.Handler().ServeHTTP,
username: *authUsername,
password: *authPassword,
}
}
return handler
}
func readCACert(CACertFile string, logger logger.Logger) (string, error) {
if CACertFile != "" {
fs := system.NewOsFileSystem(logger)
CACertFileFullPath, err := fs.ExpandPath(CACertFile)
if err != nil {
return "", err
}
CACert, err := fs.ReadFileString(CACertFileFullPath)
if err != nil {
return "", err
}
return CACert, nil
}
return "", nil
}
func buildBOSHClient() (director.Director, error) {
logLevel, err := logger.Levelify(*boshLogLevel)
if err != nil {
return nil, err
}
logger := logger.NewLogger(logLevel)
directorConfig, err := director.NewConfigFromURL(*boshURL)
if err != nil {
return nil, err
}
boshCACert, err := readCACert(*boshCACertFile, logger)
if err != nil {
return nil, err
}
directorConfig.CACert = boshCACert
anonymousDirector, err := director.NewFactory(logger).New(directorConfig, nil, nil)
if err != nil {
return nil, err
}
boshInfo, err := anonymousDirector.Info()
if err != nil {
return nil, err
}
if boshInfo.Auth.Type != "uaa" {
directorConfig.Client = *boshUsername
directorConfig.ClientSecret = *boshPassword
} else {
uaaURL := boshInfo.Auth.Options["url"]
uaaURLStr, ok := uaaURL.(string)
if !ok {
return nil, errors.New(fmt.Sprintf("Expected UAA URL '%s' to be a string", uaaURL))
|
}
uaaConfig, err := uaa.NewConfigFromURL(uaaURLStr)
if err != nil {
return nil, err
}
uaaConfig.CACert = boshCACert
if *boshUAAClientID != "" && *boshUAAClientSecret != "" {
uaaConfig.Client = *boshUAAClientID
uaaConfig.ClientSecret = *boshUAAClientSecret
} else {
uaaConfig.Client = "bosh_cli"
}
uaaFactory := uaa.NewFactory(logger)
uaaClient, err := uaaFactory.New(uaaConfig)
if err != nil {
return nil, err
}
if *boshUAAClientID != "" && *boshUAAClientSecret != "" {
directorConfig.TokenFunc = uaa.NewClientTokenSession(uaaClient).TokenFunc
} else {
answers := []uaa.PromptAnswer{
uaa.PromptAnswer{
Key: "username",
Value: *boshUsername,
},
uaa.PromptAnswer{
Key: "password",
Value: *boshPassword,
},
}
accessToken, err := uaaClient.OwnerPasswordCredentialsGrant(answers)
if err != nil {
return nil, err
}
origToken := uaaClient.NewStaleAccessToken(accessToken.RefreshToken().Value())
directorConfig.TokenFunc = uaa.NewAccessTokenSession(orig
|
random_line_split
|
|
bosh_exporter.go
|
.uaa.client-secret", "",
"BOSH UAA Client Secret ($BOSH_EXPORTER_BOSH_UAA_CLIENT_SECRET).",
)
boshLogLevel = flag.String(
"bosh.log-level", "ERROR",
"BOSH Log Level ($BOSH_EXPORTER_BOSH_LOG_LEVEL).",
)
boshCACertFile = flag.String(
"bosh.ca-cert-file", "",
"BOSH CA Certificate file ($BOSH_EXPORTER_BOSH_CA_CERT_FILE).",
)
filterDeployments = flag.String(
"filter.deployments", "",
"Comma separated deployments to filter ($BOSH_EXPORTER_FILTER_DEPLOYMENTS).",
)
filterAZs = flag.String(
"filter.azs", "",
"Comma separated AZs to filter ($BOSH_EXPORTER_FILTER_AZS).",
)
filterCollectors = flag.String(
"filter.collectors", "",
"Comma separated collectors to filter (Deployments,Jobs,ServiceDiscovery) ($BOSH_EXPORTER_FILTER_COLLECTORS).",
)
metricsNamespace = flag.String(
"metrics.namespace", "bosh",
"Metrics Namespace ($BOSH_EXPORTER_METRICS_NAMESPACE).",
)
metricsEnvironment = flag.String(
"metrics.environment", "",
"Environment label to be attached to metrics ($BOSH_EXPORTER_METRICS_ENVIRONMENT).",
)
sdFilename = flag.String(
"sd.filename", "bosh_target_groups.json",
"Full path to the Service Discovery output file ($BOSH_EXPORTER_SD_FILENAME).",
)
sdProcessesRegexp = flag.String(
"sd.processes_regexp", "",
"Regexp to filter Service Discovery processes names ($BOSH_EXPORTER_SD_PROCESSES_REGEXP).",
)
showVersion = flag.Bool(
"version", false,
"Print version information.",
)
listenAddress = flag.String(
"web.listen-address", ":9190",
"Address to listen on for web interface and telemetry ($BOSH_EXPORTER_WEB_LISTEN_ADDRESS).",
)
metricsPath = flag.String(
"web.telemetry-path", "/metrics",
"Path under which to expose Prometheus metrics ($BOSH_EXPORTER_WEB_TELEMETRY_PATH).",
)
authUsername = flag.String(
"web.auth.username", "",
"Username for web interface basic auth ($BOSH_EXPORTER_WEB_AUTH_USERNAME).",
)
authPassword = flag.String(
"web.auth.password", "",
"Password for web interface basic auth ($BOSH_EXPORTER_WEB_AUTH_PASSWORD).",
)
tlsCertFile = flag.String(
"web.tls.cert_file", "",
"Path to a file that contains the TLS certificate (PEM format). If the certificate is signed by a certificate authority, the file should be the concatenation of the server's certificate, any intermediates, and the CA's certificate ($BOSH_EXPORTER_WEB_TLS_CERTFILE).",
)
tlsKeyFile = flag.String(
"web.tls.key_file", "",
"Path to a file that contains the TLS private key (PEM format) ($BOSH_EXPORTER_WEB_TLS_KEYFILE).",
)
)
func init() {
prometheus.MustRegister(version.NewCollector(*metricsNamespace))
}
func overrideFlagsWithEnvVars() {
overrideWithEnvVar("BOSH_EXPORTER_BOSH_URL", boshURL)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_USERNAME", boshUsername)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_PASSWORD", boshPassword)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_UAA_CLIENT_ID", boshUAAClientID)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_UAA_CLIENT_SECRET", boshUAAClientSecret)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_LOG_LEVEL", boshLogLevel)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_CA_CERT_FILE", boshCACertFile)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_DEPLOYMENTS", filterDeployments)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_AZS", filterAZs)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_COLLECTORS", filterCollectors)
overrideWithEnvVar("BOSH_EXPORTER_METRICS_NAMESPACE", metricsNamespace)
overrideWithEnvVar("BOSH_EXPORTER_METRICS_ENVIRONMENT", metricsEnvironment)
overrideWithEnvVar("BOSH_EXPORTER_SD_FILENAME", sdFilename)
overrideWithEnvVar("BOSH_EXPORTER_SD_PROCESSES_REGEXP", sdProcessesRegexp)
overrideWithEnvVar("BOSH_EXPORTER_WEB_LISTEN_ADDRESS", listenAddress)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TELEMETRY_PATH", metricsPath)
overrideWithEnvVar("BOSH_EXPORTER_WEB_AUTH_USERNAME", authUsername)
overrideWithEnvVar("BOSH_EXPORTER_WEB_AUTH_PASSWORD", authPassword)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TLS_CERTFILE", tlsCertFile)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TLS_KEYFILE", tlsKeyFile)
}
func overrideWithEnvVar(name string, value *string) {
envValue := os.Getenv(name)
if envValue != "" {
*value = envValue
}
}
type basicAuthHandler struct {
handler http.HandlerFunc
username string
password string
}
func (h *basicAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok || username != h.username || password != h.password {
log.Errorf("Invalid HTTP auth from `%s`", r.RemoteAddr)
w.Header().Set("WWW-Authenticate", "Basic realm=\"metrics\"")
http.Error(w, "Invalid username or password", http.StatusUnauthorized)
return
}
h.handler(w, r)
return
}
func prometheusHandler() http.Handler {
handler := prometheus.Handler()
if *authUsername != "" && *authPassword != "" {
handler = &basicAuthHandler{
handler: prometheus.Handler().ServeHTTP,
username: *authUsername,
password: *authPassword,
}
}
return handler
}
func readCACert(CACertFile string, logger logger.Logger) (string, error) {
if CACertFile != "" {
fs := system.NewOsFileSystem(logger)
CACertFileFullPath, err := fs.ExpandPath(CACertFile)
if err != nil {
return "", err
}
CACert, err := fs.ReadFileString(CACertFileFullPath)
if err != nil {
return "", err
}
return CACert, nil
}
return "", nil
}
func buildBOSHClient() (director.Director, error) {
logLevel, err := logger.Levelify(*boshLogLevel)
if err != nil
|
logger := logger.NewLogger(logLevel)
directorConfig, err := director.NewConfigFromURL(*boshURL)
if err != nil {
return nil, err
}
boshCACert, err := readCACert(*boshCACertFile, logger)
if err != nil {
return nil, err
}
directorConfig.CACert = boshCACert
anonymousDirector, err := director.NewFactory(logger).New(directorConfig, nil, nil)
if err != nil {
return nil, err
}
boshInfo, err := anonymousDirector.Info()
if err != nil {
return nil, err
}
if boshInfo.Auth.Type != "uaa" {
directorConfig.Client = *boshUsername
directorConfig.ClientSecret = *boshPassword
} else {
uaaURL := boshInfo.Auth.Options["url"]
uaaURLStr, ok := uaaURL.(string)
if !ok {
return nil, errors.New(fmt.Sprintf("Expected UAA URL '%s' to be a string", uaaURL))
}
uaaConfig, err := uaa.NewConfigFromURL(uaaURLStr)
if err != nil {
return nil, err
}
uaaConfig.CACert = boshCACert
if *boshUAAClientID != "" && *boshUAAClientSecret != "" {
uaaConfig.Client = *boshUAAClientID
uaaConfig.ClientSecret = *boshUAAClientSecret
} else {
uaaConfig.Client = "bosh_cli"
}
uaaFactory := uaa.NewFactory(logger)
uaaClient, err := uaaFactory.New(uaaConfig)
if err != nil {
return nil, err
}
if *boshUAAClientID != "" && *boshUAAClientSecret != "" {
directorConfig.TokenFunc = uaa.NewClientTokenSession(uaaClient).TokenFunc
} else {
answers := []uaa.PromptAnswer{
uaa.PromptAnswer{
Key: "username",
Value: *boshUsername,
},
uaa.PromptAnswer{
Key: "password",
Value: *boshPassword,
},
}
accessToken, err := uaaClient.OwnerPasswordCredentialsGrant(answers)
if err != nil {
return nil, err
}
origToken := uaaClient.NewStaleAccessToken(accessToken.RefreshToken().Value())
directorConfig.TokenFunc = uaa.NewAccessToken
|
{
return nil, err
}
|
conditional_block
|
bosh_exporter.go
|
.uaa.client-secret", "",
"BOSH UAA Client Secret ($BOSH_EXPORTER_BOSH_UAA_CLIENT_SECRET).",
)
boshLogLevel = flag.String(
"bosh.log-level", "ERROR",
"BOSH Log Level ($BOSH_EXPORTER_BOSH_LOG_LEVEL).",
)
boshCACertFile = flag.String(
"bosh.ca-cert-file", "",
"BOSH CA Certificate file ($BOSH_EXPORTER_BOSH_CA_CERT_FILE).",
)
filterDeployments = flag.String(
"filter.deployments", "",
"Comma separated deployments to filter ($BOSH_EXPORTER_FILTER_DEPLOYMENTS).",
)
filterAZs = flag.String(
"filter.azs", "",
"Comma separated AZs to filter ($BOSH_EXPORTER_FILTER_AZS).",
)
filterCollectors = flag.String(
"filter.collectors", "",
"Comma separated collectors to filter (Deployments,Jobs,ServiceDiscovery) ($BOSH_EXPORTER_FILTER_COLLECTORS).",
)
metricsNamespace = flag.String(
"metrics.namespace", "bosh",
"Metrics Namespace ($BOSH_EXPORTER_METRICS_NAMESPACE).",
)
metricsEnvironment = flag.String(
"metrics.environment", "",
"Environment label to be attached to metrics ($BOSH_EXPORTER_METRICS_ENVIRONMENT).",
)
sdFilename = flag.String(
"sd.filename", "bosh_target_groups.json",
"Full path to the Service Discovery output file ($BOSH_EXPORTER_SD_FILENAME).",
)
sdProcessesRegexp = flag.String(
"sd.processes_regexp", "",
"Regexp to filter Service Discovery processes names ($BOSH_EXPORTER_SD_PROCESSES_REGEXP).",
)
showVersion = flag.Bool(
"version", false,
"Print version information.",
)
listenAddress = flag.String(
"web.listen-address", ":9190",
"Address to listen on for web interface and telemetry ($BOSH_EXPORTER_WEB_LISTEN_ADDRESS).",
)
metricsPath = flag.String(
"web.telemetry-path", "/metrics",
"Path under which to expose Prometheus metrics ($BOSH_EXPORTER_WEB_TELEMETRY_PATH).",
)
authUsername = flag.String(
"web.auth.username", "",
"Username for web interface basic auth ($BOSH_EXPORTER_WEB_AUTH_USERNAME).",
)
authPassword = flag.String(
"web.auth.password", "",
"Password for web interface basic auth ($BOSH_EXPORTER_WEB_AUTH_PASSWORD).",
)
tlsCertFile = flag.String(
"web.tls.cert_file", "",
"Path to a file that contains the TLS certificate (PEM format). If the certificate is signed by a certificate authority, the file should be the concatenation of the server's certificate, any intermediates, and the CA's certificate ($BOSH_EXPORTER_WEB_TLS_CERTFILE).",
)
tlsKeyFile = flag.String(
"web.tls.key_file", "",
"Path to a file that contains the TLS private key (PEM format) ($BOSH_EXPORTER_WEB_TLS_KEYFILE).",
)
)
func init() {
prometheus.MustRegister(version.NewCollector(*metricsNamespace))
}
func overrideFlagsWithEnvVars() {
overrideWithEnvVar("BOSH_EXPORTER_BOSH_URL", boshURL)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_USERNAME", boshUsername)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_PASSWORD", boshPassword)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_UAA_CLIENT_ID", boshUAAClientID)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_UAA_CLIENT_SECRET", boshUAAClientSecret)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_LOG_LEVEL", boshLogLevel)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_CA_CERT_FILE", boshCACertFile)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_DEPLOYMENTS", filterDeployments)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_AZS", filterAZs)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_COLLECTORS", filterCollectors)
overrideWithEnvVar("BOSH_EXPORTER_METRICS_NAMESPACE", metricsNamespace)
overrideWithEnvVar("BOSH_EXPORTER_METRICS_ENVIRONMENT", metricsEnvironment)
overrideWithEnvVar("BOSH_EXPORTER_SD_FILENAME", sdFilename)
overrideWithEnvVar("BOSH_EXPORTER_SD_PROCESSES_REGEXP", sdProcessesRegexp)
overrideWithEnvVar("BOSH_EXPORTER_WEB_LISTEN_ADDRESS", listenAddress)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TELEMETRY_PATH", metricsPath)
overrideWithEnvVar("BOSH_EXPORTER_WEB_AUTH_USERNAME", authUsername)
overrideWithEnvVar("BOSH_EXPORTER_WEB_AUTH_PASSWORD", authPassword)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TLS_CERTFILE", tlsCertFile)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TLS_KEYFILE", tlsKeyFile)
}
func overrideWithEnvVar(name string, value *string) {
envValue := os.Getenv(name)
if envValue != "" {
*value = envValue
}
}
type basicAuthHandler struct {
handler http.HandlerFunc
username string
password string
}
func (h *basicAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok || username != h.username || password != h.password {
log.Errorf("Invalid HTTP auth from `%s`", r.RemoteAddr)
w.Header().Set("WWW-Authenticate", "Basic realm=\"metrics\"")
http.Error(w, "Invalid username or password", http.StatusUnauthorized)
return
}
h.handler(w, r)
return
}
func prometheusHandler() http.Handler {
handler := prometheus.Handler()
if *authUsername != "" && *authPassword != "" {
handler = &basicAuthHandler{
handler: prometheus.Handler().ServeHTTP,
username: *authUsername,
password: *authPassword,
}
}
return handler
}
func
|
(CACertFile string, logger logger.Logger) (string, error) {
if CACertFile != "" {
fs := system.NewOsFileSystem(logger)
CACertFileFullPath, err := fs.ExpandPath(CACertFile)
if err != nil {
return "", err
}
CACert, err := fs.ReadFileString(CACertFileFullPath)
if err != nil {
return "", err
}
return CACert, nil
}
return "", nil
}
func buildBOSHClient() (director.Director, error) {
logLevel, err := logger.Levelify(*boshLogLevel)
if err != nil {
return nil, err
}
logger := logger.NewLogger(logLevel)
directorConfig, err := director.NewConfigFromURL(*boshURL)
if err != nil {
return nil, err
}
boshCACert, err := readCACert(*boshCACertFile, logger)
if err != nil {
return nil, err
}
directorConfig.CACert = boshCACert
anonymousDirector, err := director.NewFactory(logger).New(directorConfig, nil, nil)
if err != nil {
return nil, err
}
boshInfo, err := anonymousDirector.Info()
if err != nil {
return nil, err
}
if boshInfo.Auth.Type != "uaa" {
directorConfig.Client = *boshUsername
directorConfig.ClientSecret = *boshPassword
} else {
uaaURL := boshInfo.Auth.Options["url"]
uaaURLStr, ok := uaaURL.(string)
if !ok {
return nil, errors.New(fmt.Sprintf("Expected UAA URL '%s' to be a string", uaaURL))
}
uaaConfig, err := uaa.NewConfigFromURL(uaaURLStr)
if err != nil {
return nil, err
}
uaaConfig.CACert = boshCACert
if *boshUAAClientID != "" && *boshUAAClientSecret != "" {
uaaConfig.Client = *boshUAAClientID
uaaConfig.ClientSecret = *boshUAAClientSecret
} else {
uaaConfig.Client = "bosh_cli"
}
uaaFactory := uaa.NewFactory(logger)
uaaClient, err := uaaFactory.New(uaaConfig)
if err != nil {
return nil, err
}
if *boshUAAClientID != "" && *boshUAAClientSecret != "" {
directorConfig.TokenFunc = uaa.NewClientTokenSession(uaaClient).TokenFunc
} else {
answers := []uaa.PromptAnswer{
uaa.PromptAnswer{
Key: "username",
Value: *boshUsername,
},
uaa.PromptAnswer{
Key: "password",
Value: *boshPassword,
},
}
accessToken, err := uaaClient.OwnerPasswordCredentialsGrant(answers)
if err != nil {
return nil, err
}
origToken := uaaClient.NewStaleAccessToken(accessToken.RefreshToken().Value())
directorConfig.TokenFunc = uaa.NewAccessTokenSession
|
readCACert
|
identifier_name
|
bosh_exporter.go
|
filter.collectors", "",
"Comma separated collectors to filter (Deployments,Jobs,ServiceDiscovery) ($BOSH_EXPORTER_FILTER_COLLECTORS).",
)
metricsNamespace = flag.String(
"metrics.namespace", "bosh",
"Metrics Namespace ($BOSH_EXPORTER_METRICS_NAMESPACE).",
)
metricsEnvironment = flag.String(
"metrics.environment", "",
"Environment label to be attached to metrics ($BOSH_EXPORTER_METRICS_ENVIRONMENT).",
)
sdFilename = flag.String(
"sd.filename", "bosh_target_groups.json",
"Full path to the Service Discovery output file ($BOSH_EXPORTER_SD_FILENAME).",
)
sdProcessesRegexp = flag.String(
"sd.processes_regexp", "",
"Regexp to filter Service Discovery processes names ($BOSH_EXPORTER_SD_PROCESSES_REGEXP).",
)
showVersion = flag.Bool(
"version", false,
"Print version information.",
)
listenAddress = flag.String(
"web.listen-address", ":9190",
"Address to listen on for web interface and telemetry ($BOSH_EXPORTER_WEB_LISTEN_ADDRESS).",
)
metricsPath = flag.String(
"web.telemetry-path", "/metrics",
"Path under which to expose Prometheus metrics ($BOSH_EXPORTER_WEB_TELEMETRY_PATH).",
)
authUsername = flag.String(
"web.auth.username", "",
"Username for web interface basic auth ($BOSH_EXPORTER_WEB_AUTH_USERNAME).",
)
authPassword = flag.String(
"web.auth.password", "",
"Password for web interface basic auth ($BOSH_EXPORTER_WEB_AUTH_PASSWORD).",
)
tlsCertFile = flag.String(
"web.tls.cert_file", "",
"Path to a file that contains the TLS certificate (PEM format). If the certificate is signed by a certificate authority, the file should be the concatenation of the server's certificate, any intermediates, and the CA's certificate ($BOSH_EXPORTER_WEB_TLS_CERTFILE).",
)
tlsKeyFile = flag.String(
"web.tls.key_file", "",
"Path to a file that contains the TLS private key (PEM format) ($BOSH_EXPORTER_WEB_TLS_KEYFILE).",
)
)
func init() {
prometheus.MustRegister(version.NewCollector(*metricsNamespace))
}
func overrideFlagsWithEnvVars() {
overrideWithEnvVar("BOSH_EXPORTER_BOSH_URL", boshURL)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_USERNAME", boshUsername)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_PASSWORD", boshPassword)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_UAA_CLIENT_ID", boshUAAClientID)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_UAA_CLIENT_SECRET", boshUAAClientSecret)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_LOG_LEVEL", boshLogLevel)
overrideWithEnvVar("BOSH_EXPORTER_BOSH_CA_CERT_FILE", boshCACertFile)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_DEPLOYMENTS", filterDeployments)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_AZS", filterAZs)
overrideWithEnvVar("BOSH_EXPORTER_FILTER_COLLECTORS", filterCollectors)
overrideWithEnvVar("BOSH_EXPORTER_METRICS_NAMESPACE", metricsNamespace)
overrideWithEnvVar("BOSH_EXPORTER_METRICS_ENVIRONMENT", metricsEnvironment)
overrideWithEnvVar("BOSH_EXPORTER_SD_FILENAME", sdFilename)
overrideWithEnvVar("BOSH_EXPORTER_SD_PROCESSES_REGEXP", sdProcessesRegexp)
overrideWithEnvVar("BOSH_EXPORTER_WEB_LISTEN_ADDRESS", listenAddress)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TELEMETRY_PATH", metricsPath)
overrideWithEnvVar("BOSH_EXPORTER_WEB_AUTH_USERNAME", authUsername)
overrideWithEnvVar("BOSH_EXPORTER_WEB_AUTH_PASSWORD", authPassword)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TLS_CERTFILE", tlsCertFile)
overrideWithEnvVar("BOSH_EXPORTER_WEB_TLS_KEYFILE", tlsKeyFile)
}
func overrideWithEnvVar(name string, value *string) {
envValue := os.Getenv(name)
if envValue != "" {
*value = envValue
}
}
type basicAuthHandler struct {
handler http.HandlerFunc
username string
password string
}
func (h *basicAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok || username != h.username || password != h.password {
log.Errorf("Invalid HTTP auth from `%s`", r.RemoteAddr)
w.Header().Set("WWW-Authenticate", "Basic realm=\"metrics\"")
http.Error(w, "Invalid username or password", http.StatusUnauthorized)
return
}
h.handler(w, r)
return
}
func prometheusHandler() http.Handler {
handler := prometheus.Handler()
if *authUsername != "" && *authPassword != "" {
handler = &basicAuthHandler{
handler: prometheus.Handler().ServeHTTP,
username: *authUsername,
password: *authPassword,
}
}
return handler
}
func readCACert(CACertFile string, logger logger.Logger) (string, error) {
if CACertFile != "" {
fs := system.NewOsFileSystem(logger)
CACertFileFullPath, err := fs.ExpandPath(CACertFile)
if err != nil {
return "", err
}
CACert, err := fs.ReadFileString(CACertFileFullPath)
if err != nil {
return "", err
}
return CACert, nil
}
return "", nil
}
func buildBOSHClient() (director.Director, error) {
logLevel, err := logger.Levelify(*boshLogLevel)
if err != nil {
return nil, err
}
logger := logger.NewLogger(logLevel)
directorConfig, err := director.NewConfigFromURL(*boshURL)
if err != nil {
return nil, err
}
boshCACert, err := readCACert(*boshCACertFile, logger)
if err != nil {
return nil, err
}
directorConfig.CACert = boshCACert
anonymousDirector, err := director.NewFactory(logger).New(directorConfig, nil, nil)
if err != nil {
return nil, err
}
boshInfo, err := anonymousDirector.Info()
if err != nil {
return nil, err
}
if boshInfo.Auth.Type != "uaa" {
directorConfig.Client = *boshUsername
directorConfig.ClientSecret = *boshPassword
} else {
uaaURL := boshInfo.Auth.Options["url"]
uaaURLStr, ok := uaaURL.(string)
if !ok {
return nil, errors.New(fmt.Sprintf("Expected UAA URL '%s' to be a string", uaaURL))
}
uaaConfig, err := uaa.NewConfigFromURL(uaaURLStr)
if err != nil {
return nil, err
}
uaaConfig.CACert = boshCACert
if *boshUAAClientID != "" && *boshUAAClientSecret != "" {
uaaConfig.Client = *boshUAAClientID
uaaConfig.ClientSecret = *boshUAAClientSecret
} else {
uaaConfig.Client = "bosh_cli"
}
uaaFactory := uaa.NewFactory(logger)
uaaClient, err := uaaFactory.New(uaaConfig)
if err != nil {
return nil, err
}
if *boshUAAClientID != "" && *boshUAAClientSecret != "" {
directorConfig.TokenFunc = uaa.NewClientTokenSession(uaaClient).TokenFunc
} else {
answers := []uaa.PromptAnswer{
uaa.PromptAnswer{
Key: "username",
Value: *boshUsername,
},
uaa.PromptAnswer{
Key: "password",
Value: *boshPassword,
},
}
accessToken, err := uaaClient.OwnerPasswordCredentialsGrant(answers)
if err != nil {
return nil, err
}
origToken := uaaClient.NewStaleAccessToken(accessToken.RefreshToken().Value())
directorConfig.TokenFunc = uaa.NewAccessTokenSession(origToken).TokenFunc
}
}
boshFactory := director.NewFactory(logger)
boshClient, err := boshFactory.New(directorConfig, director.NewNoopTaskReporter(), director.NewNoopFileReporter())
if err != nil {
return nil, err
}
return boshClient, nil
}
func main()
|
{
flag.Parse()
overrideFlagsWithEnvVars()
if *showVersion {
fmt.Fprintln(os.Stdout, version.Print("bosh_exporter"))
os.Exit(0)
}
log.Infoln("Starting bosh_exporter", version.Info())
log.Infoln("Build context", version.BuildContext())
boshClient, err := buildBOSHClient()
if err != nil {
log.Errorf("Error creating BOSH Client: %s", err.Error())
os.Exit(1)
}
boshInfo, err := boshClient.Info()
if err != nil {
|
identifier_body
|
|
main.py
|
(Gammes[0]))
if afficher_commentaires: print("arcrouge ",(indice_pièceETmachine,indice_fin));
A[indice_pièceETmachine][indice_fin] = Valeur[NOLINK][1]
return A
#question1
def GRAPHE(s,gamme = Gammes,afficher_commentaires=False):
#j'initialise le graphe et j'ajoute les arcs, verts, rouge et noirs
graphe = fct_Matrice_Adjacence(gamme,afficher_commentaires)
#je m'assure que la solution et la gamme sont deux matrices
if 2+ len(s)*len(s[0]) != len(graphe):
print("erreur: le nombre de ligne de la gamme doit être égal au nombre de colonne de la solution et inversément")
print("len(graphe)=",len(graphe))
print("2+ {}*{} = {}".format( len(s),len(s[0]),2+ len(s)*len(s[0]) ) )
return np.zeros((1,1))
#plusieurs pièces passent en ordre sur une même machine: les arcs bleus q
for j in range(len(s)):
indice_machine = j
for i in range (len(s[0])-1): #on parcours les pieces sauf la dernière
indice_piece1 = trouver_indice_piece_dans_la_solution_s(s,j,i)
indice_piece2 = trouver_indice_piece_dans_la_solution_s(s,j,i+1)
pos1 = trouver_indice_couple(indice_piece1,indice_machine,len(gamme),len(gamme[0]))
pos2 = trouver_indice_couple(indice_piece2,indice_machine,len(gamme),len(gamme[0]))
if afficher_commentaires: print("arc bleu ",(pos1,pos2))
graphe[pos1][pos2] = Valeur[NOLINK][1]
return graphe
S = [ [1,2,3],
[2,1,3],
[1,2,3]
]#s est une solution. On a une machine sur chaque ligne
afficher_matrice( GRAPHE(S,afficher_commentaires=True))
print("fin question 1", end = "\n\n\n\n\n")
#questiion2
#deterliner le plus long chemin du devbut à chaque point
#fct preliminaires
def predecesseurs(G,s):
predecesseurs = [i for i in range(len(G)) if G[i][s]!=NOLINK]+int(G[0][s]==Valeur[NOLINK][0])*[0]
return list(set(predecesseurs))
def successeurs(G,s):
successeurs=[j for j in range(len(G[0])) if G[s][j]!=NOLINK]+[len(G)-1]
return successeurs
def intersection(lst1, lst2): #intersection de deux listes
return list(set(lst1) & set(lst2))
def PLC(G,sommet_depart=0,afficher_commentaires=False): #G graphe orienté sans circuit; G est une matrice (n,n) et s, un entier entre 0 et len(A)-1, est le sommet de départ
#des vérifications
A = G; B = np.array(A)
if sommet_depart not in range(0,len(A)):
print("sommet inexistant")
return
if (B.diagonal()!=NOLINK).any():
print("retranchez les valeurs des diagonales")
return
#des initialisations
λ = [INF for i in range(len(A))];plc = λ #liste des distances à sommet_depart
λ[sommet_depart] = 0
rang = 0
S = [sommet_depart]#liste des sommets en cours S
r = [0 for i in range(len(G))] #liste des rangs des sommets
liste_sommets = [i for i in range(len(G))]
while len(S)!=len(A):
rang +=1
if afficher_commentaires: print("A: ",A)
listeDesSommes_prive_de_S = [i for i in liste_sommets if i not in S]
W = [i for i in listeDesSommes_prive_de_S if intersection(predecesseurs(G,i) , listeDesSommes_prive_de_S)==[] ]
if afficher_commentaires: print(intersection(predecesseurs(G,7) , listeDesSommes_prive_de_S))
#l'ensemble des sommets qui ne sont pas dans S et qui n'ont pars de prédecesseurs
#on pose rang(v)=rang pour tout les éléments de W
for v in W:
r[v]=rang
S.extend(W)
for k in range(1,rang+1):
for v in [ v for v in liste_sommets if r[v]==k]:
N_moins = predecesseurs(G,v)
plc[v] = max([ plc[w] + t(G, w, v) for w in N_moins])
if afficher_commentaires:print("distance max entre {} et {} trouvé = {}".format(sommet_depart,v,plc[v]))
return λ
##########test
S = [ [1,2,3],
[2,1,3],
[1,2,3]
]#s est une solution. On a une machine sur chaque ligne
print("plc = ",PLC(GRAPHE(S),sommet_depart=0,afficher_commentaires=True),end="\n\n")
def plc(i,j,G=GRAPHE(S)):
liste_plc = PLC(G,sommet_depart=0)
indice_piece = i-1; indice_machine= j-1
indice_pièceETmachine = trouver_indice_couple(indice_piece,indice_machine,len(S[0]),len(S))
return liste_plc[indice_pièceETmachine]
print("plc(2,1)= ", plc(2,1))
print("fin question 2", end = "\n\n\n\n\n")
Gammes= [ [1,2,3],[2,1,3],[1,2,3]]
S= [ [1,2,3],[2,1,3],[1,2,3]]#s est une solution. On a une machine sur chaque ligne
#question3
#trouver pour chaque tâche (pièce, machine), l'heure de démarrage au plus tard
def AuPlusTard(Gammes,solution,sommet_fin,afficher_commentaires=False):#G graphe orienté sans circuit; G est une matrice (n,n) et s, un entier entre 0 et len(A)-1, est le sommet de départ
Graphe = GRAPHE(solution,Gammes)
#des vérifications
A = Graphe; B = np.array(A)
if sommet_fin not in range(0,len(A)):
print("sommet inexistant")
return
if (B.diagonal()!=NOLINK).any():
print("retranchez les valeurs des diagonales")
return
if ( [len(Gammes),len(Gammes[0])] != [len(solution[0]),len(solution)] ):
print("la solution et la gamme ne correspondent pas")
return
#des initialisations
auplustard = [INF for i in range(len(A))] #liste des distances à sommet_depart
auplustard[sommet_fin] = PLC(Graphe,sommet_depart=0)[-1]
rang = 0
S = [sommet_fin]#liste des sommets en cours s
r = [0 for i in range(len(Graphe))] #liste des rangs des sommets
liste_sommets = [i for i in range(len(Graphe))]
while len(S)!=len(A):
rang +=1
if afficher_commentaires:print("S",S)
listeDesSommes_prive_de_S = [i for i in liste_sommets if i not in S]
W = [i for i in listeDesSommes_prive_de_S if intersection(successeurs(Graphe,i) , listeDesSommes_prive_de_S)==[] ]
#l'ensemble des sommets qui ne sont pas dans S et qui n'ont pars de prédecesseur
#on pose rang(v)=rang pour tout les éléments de W
for v in W:
r[v]=rang
S.extend(W)
#print("\n")
for k in range(1,rang+1):
for v in [ v for v in liste_sommets if r[v]==k]:
N_plus = succes
|
seurs(Graphe,v)
auplustard[v] = min([ auplustard[w] - t(Graphe, v,w) for w in N_plus])
return auplustard
Gammes= [ [1,2,3],[2
|
conditional_block
|
|
main.py
|
première_machine = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, 0);
indice_pièceETmachine = trouver_indice_couple(indice_pièce,première_machine,len(Gammes),len(Gammes[0]) )
if afficher_commentaires: print("arc vert ",(indice_debut,indice_pièceETmachine));
A[indice_debut][indice_pièceETmachine] = Valeur[NOLINK][0]
#lier la fin à (piècei, premièrdernière machine associée)
indice_fin = len(A)-1
dernière_machine = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, -1+len(Gammes[i]));
indice_pièceETmachine = trouver_indice_couple(indice_pièce,dernière_machine,len(Gammes),len(Gammes[0]))
if afficher_commentaires: print("arcrouge ",(indice_pièceETmachine,indice_fin));
A[indice_pièceETmachine][indice_fin] = Valeur[NOLINK][1]
return A
#question1
def GRAPHE(s,gamme = Gammes,a
fficher_commentaires=False):
#j'initialise le graphe et j'ajoute les arcs, verts, rouge et noirs
graphe = fct_Matrice_Adjacence(gamme,afficher_commentaires)
#je m'assure que la solution et la gamme sont deux matrices
if 2+ len(s)*len(s[0]) != len(graphe):
print("erreur: le nombre de ligne de la gamme doit être égal au nombre de colonne de la solution et inversément")
print("len(graphe)=",len(graphe))
print("2+ {}*{} = {}".format( len(s),len(s[0]),2+ len(s)*len(s[0]) ) )
return np.zeros((1,1))
#plusieurs pièces passent en ordre sur une même machine: les arcs bleus q
for j in range(len(s)):
indice_machine = j
for i in range (len(s[0])-1): #on parcours les pieces sauf la dernière
indice_piece1 = trouver_indice_piece_dans_la_solution_s(s,j,i)
indice_piece2 = trouver_indice_piece_dans_la_solution_s(s,j,i+1)
pos1 = trouver_indice_couple(indice_piece1,indice_machine,len(gamme),len(gamme[0]))
pos2 = trouver_indice_couple(indice_piece2,indice_machine,len(gamme),len(gamme[0]))
if afficher_commentaires: print("arc bleu ",(pos1,pos2))
graphe[pos1][pos2] = Valeur[NOLINK][1]
return graphe
S = [ [1,2,3],
[2,1,3],
[1,2,3]
]#s est une solution. On a une machine sur chaque ligne
afficher_matrice( GRAPHE(S,afficher_commentaires=True))
print("fin question 1", end = "\n\n\n\n\n")
#questiion2
#deterliner le plus long chemin du devbut à chaque point
#fct preliminaires
def predecesseurs(G,s):
predecesseurs = [i for i in range(len(G)) if G[i][s]!=NOLINK]+int(G[0][s]==Valeur[NOLINK][0])*[0]
return list(set(predecesseurs))
def successeurs(G,s):
successeurs=[j for j in range(len(G[0])) if G[s][j]!=NOLINK]+[len(G)-1]
return successeurs
def intersection(lst1, lst2): #intersection de deux listes
return list(set(lst1) & set(lst2))
def PLC(G,sommet_depart=0,afficher_commentaires=False): #G graphe orienté sans circuit; G est une matrice (n,n) et s, un entier entre 0 et len(A)-1, est le sommet de départ
#des vérifications
A = G; B = np.array(A)
if sommet_depart not in range(0,len(A)):
print("sommet inexistant")
return
if (B.diagonal()!=NOLINK).any():
print("retranchez les valeurs des diagonales")
return
#des initialisations
λ = [INF for i in range(len(A))];plc = λ #liste des distances à sommet_depart
λ[sommet_depart] = 0
rang = 0
S = [sommet_depart]#liste des sommets en cours S
r = [0 for i in range(len(G))] #liste des rangs des sommets
liste_sommets = [i for i in range(len(G))]
while len(S)!=len(A):
rang +=1
if afficher_commentaires: print("A: ",A)
listeDesSommes_prive_de_S = [i for i in liste_sommets if i not in S]
W = [i for i in listeDesSommes_prive_de_S if intersection(predecesseurs(G,i) , listeDesSommes_prive_de_S)==[] ]
if afficher_commentaires: print(intersection(predecesseurs(G,7) , listeDesSommes_prive_de_S))
#l'ensemble des sommets qui ne sont pas dans S et qui n'ont pars de prédecesseurs
#on pose rang(v)=rang pour tout les éléments de W
for v in W:
r[v]=rang
S.extend(W)
for k in range(1,rang+1):
for v in [ v for v in liste_sommets if r[v]==k]:
N_moins = predecesseurs(G,v)
plc[v] = max([ plc[w] + t(G, w, v) for w in N_moins])
if afficher_commentaires:print("distance max entre {} et {} trouvé = {}".format(sommet_depart,v,plc[v]))
return λ
##########test
S = [ [1,2,3],
[2,1,3],
[1,2,3]
]#s est une solution. On a une machine sur chaque ligne
print("plc = ",PLC(GRAPHE(S),sommet_depart=0,afficher_commentaires=True),end="\n\n")
def plc(i,j,G=GRAPHE(S)):
liste_plc = PLC(G,sommet_depart=0)
indice_piece = i-1; indice_machine= j-1
indice_pièceETmachine = trouver_indice_couple(indice_piece,indice_machine,len(S[0]),len(S))
return liste_plc[indice_pièceETmachine]
print("plc(2,1)= ", plc(2,1))
print("fin question 2", end = "\n\n\n\n\n")
Gammes= [ [1,2,3],[2,1,3],[1,2,3]]
S= [ [1,2,3],[2,1,3],[1,2,3]]#s est une solution. On a une machine sur chaque ligne
#question3
#trouver pour chaque tâche (pièce, machine), l'heure de démarrage au plus tard
def AuPlusTard(Gammes,solution,sommet_fin,afficher_commentaires=False):#G graphe orienté sans circuit; G est une matrice (n,n) et s,
|
= 2 + len(Gammes)*len(Gammes[0])
taille_matrice = nb_noeuds
A = np.full((taille_matrice,taille_matrice),NOLINK)
#la même pièce i passe par des machines j: arcs noirs
for i in range(len(Gammes)):
for j in range (len(Gammes[0])-1): #on parcours les machines sauf la dernière
pièce = i + 1;indice_pièce = i
indice_machine1 = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, j)
indice_machine2 = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, j+1)
pos1 = trouver_indice_couple(indice_pièce,indice_machine1,len(Gammes),len(Gammes[0]))
pos2 = trouver_indice_couple(indice_pièce,indice_machine2,len(Gammes),len(Gammes[0]))
if afficher_commentaires: print("arc noir ",(pos1,pos2))
A[pos1][pos2] = Valeur[NOLINK][1]
#le debut et la fin liés débuts des pièces: arcs verts
for indice_pièce in range (len(Gammes)):
#lier le début à (piècei, première machine associée)
indice_debut = 0
|
identifier_body
|
|
main.py
|
arcs noirs
for i in range(len(Gammes)):
for j in range (len(Gammes[0])-1): #on parcours les machines sauf la dernière
pièce = i + 1;indice_pièce = i
indice_machine1 = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, j)
indice_machine2 = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, j+1)
pos1 = trouver_indice_couple(indice_pièce,indice_machine1,len(Gammes),len(Gammes[0]))
pos2 = trouver_indice_couple(indice_pièce,indice_machine2,len(Gammes),len(Gammes[0]))
if afficher_commentaires: print("arc noir ",(pos1,pos2))
A[pos1][pos2] = Valeur[NOLINK][1]
#le debut et la fin liés débuts des pièces: arcs verts
for indice_pièce in range (len(Gammes)):
#lier le début à (piècei, première machine associée)
indice_debut = 0
première_machine = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, 0);
indice_pièceETmachine = trouver_indice_couple(indice_pièce,première_machine,len(Gammes),len(Gammes[0]) )
if afficher_commentaires: print("arc vert ",(indice_debut,indice_pièceETmachine));
A[indice_debut][indice_pièceETmachine] = Valeur[NOLINK][0]
#lier la fin à (piècei, premièrdernière machine associée)
indice_fin = len(A)-1
dernière_machine = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, -1+len(Gammes[i]));
indice_pièceETmachine = trouver_indice_couple(indice_pièce,dernière_machine,len(Gammes),len(Gammes[0]))
if afficher_commentaires: print("arcrouge ",(indice_pièceETmachine,indice_fin));
A[indice_pièceETmachine][indice_fin] = Valeur[NOLINK][1]
return A
#question1
def GRAPHE(s,gamme = Gammes,afficher_commentaires=
|
:
#j'initialise le graphe et j'ajoute les arcs, verts, rouge et noirs
graphe = fct_Matrice_Adjacence(gamme,afficher_commentaires)
#je m'assure que la solution et la gamme sont deux matrices
if 2+ len(s)*len(s[0]) != len(graphe):
print("erreur: le nombre de ligne de la gamme doit être égal au nombre de colonne de la solution et inversément")
print("len(graphe)=",len(graphe))
print("2+ {}*{} = {}".format( len(s),len(s[0]),2+ len(s)*len(s[0]) ) )
return np.zeros((1,1))
#plusieurs pièces passent en ordre sur une même machine: les arcs bleus q
for j in range(len(s)):
indice_machine = j
for i in range (len(s[0])-1): #on parcours les pieces sauf la dernière
indice_piece1 = trouver_indice_piece_dans_la_solution_s(s,j,i)
indice_piece2 = trouver_indice_piece_dans_la_solution_s(s,j,i+1)
pos1 = trouver_indice_couple(indice_piece1,indice_machine,len(gamme),len(gamme[0]))
pos2 = trouver_indice_couple(indice_piece2,indice_machine,len(gamme),len(gamme[0]))
if afficher_commentaires: print("arc bleu ",(pos1,pos2))
graphe[pos1][pos2] = Valeur[NOLINK][1]
return graphe
S = [ [1,2,3],
[2,1,3],
[1,2,3]
]#s est une solution. On a une machine sur chaque ligne
afficher_matrice( GRAPHE(S,afficher_commentaires=True))
print("fin question 1", end = "\n\n\n\n\n")
#questiion2
#deterliner le plus long chemin du devbut à chaque point
#fct preliminaires
def predecesseurs(G,s):
predecesseurs = [i for i in range(len(G)) if G[i][s]!=NOLINK]+int(G[0][s]==Valeur[NOLINK][0])*[0]
return list(set(predecesseurs))
def successeurs(G,s):
successeurs=[j for j in range(len(G[0])) if G[s][j]!=NOLINK]+[len(G)-1]
return successeurs
def intersection(lst1, lst2): #intersection de deux listes
return list(set(lst1) & set(lst2))
def PLC(G,sommet_depart=0,afficher_commentaires=False): #G graphe orienté sans circuit; G est une matrice (n,n) et s, un entier entre 0 et len(A)-1, est le sommet de départ
#des vérifications
A = G; B = np.array(A)
if sommet_depart not in range(0,len(A)):
print("sommet inexistant")
return
if (B.diagonal()!=NOLINK).any():
print("retranchez les valeurs des diagonales")
return
#des initialisations
λ = [INF for i in range(len(A))];plc = λ #liste des distances à sommet_depart
λ[sommet_depart] = 0
rang = 0
S = [sommet_depart]#liste des sommets en cours S
r = [0 for i in range(len(G))] #liste des rangs des sommets
liste_sommets = [i for i in range(len(G))]
while len(S)!=len(A):
rang +=1
if afficher_commentaires: print("A: ",A)
listeDesSommes_prive_de_S = [i for i in liste_sommets if i not in S]
W = [i for i in listeDesSommes_prive_de_S if intersection(predecesseurs(G,i) , listeDesSommes_prive_de_S)==[] ]
if afficher_commentaires: print(intersection(predecesseurs(G,7) , listeDesSommes_prive_de_S))
#l'ensemble des sommets qui ne sont pas dans S et qui n'ont pars de prédecesseurs
#on pose rang(v)=rang pour tout les éléments de W
for v in W:
r[v]=rang
S.extend(W)
for k in range(1,rang+1):
for v in [ v for v in liste_sommets if r[v]==k]:
N_moins = predecesseurs(G,v)
plc[v] = max([ plc[w] + t(G, w, v) for w in N_moins])
if afficher_commentaires:print("distance max entre {} et {} trouvé = {}".format(sommet_depart,v,plc[v]))
return λ
##########test
S = [ [1,2,3],
[2,1,3],
[1,2,3]
]#s est une solution. On a une machine sur chaque ligne
print("plc = ",PLC(GRAPHE(S),sommet_depart=0,afficher_commentaires=True),end="\n\n")
def plc(i,j,G=GRAPHE(S)):
liste_plc = PLC(G,sommet_depart=0)
indice_piece = i-1; indice_machine= j-1
indice_pièceETmachine = trouver_indice_couple(indice_piece,indice_machine,len(S[0]),len(S))
return liste_plc[indice_pièceETmachine]
print("plc(2,1)= ", plc(2,1))
print("fin question 2", end = "\n\n\n\n\n")
Gammes= [ [1,2,3],[2,1,3],[1,2,3]]
S= [ [1,2,3],[2,1,3],[1,2,3]]#s est une solution. On a une machine sur chaque ligne
#question3
#trouver pour chaque tâche (pièce, machine), l'heure de démarrage au plus tard
def AuPlusTard(Gammes,solution,sommet_fin,afficher_commentaires=False):#G graphe orienté sans circuit; G est une matrice (n,n) et s, un entier entre 0 et len(A)-1, est le sommet de départ
Graphe = GRAPHE(solution,Gammes)
#des vérifications
A = Graphe; B = np.array(A)
if sommet_fin
|
False)
|
identifier_name
|
main.py
|
: arcs noirs
for i in range(len(Gammes)):
for j in range (len(Gammes[0])-1): #on parcours les machines sauf la dernière
pièce = i + 1;indice_pièce = i
indice_machine1 = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, j)
indice_machine2 = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, j+1)
pos1 = trouver_indice_couple(indice_pièce,indice_machine1,len(Gammes),len(Gammes[0]))
pos2 = trouver_indice_couple(indice_pièce,indice_machine2,len(Gammes),len(Gammes[0]))
if afficher_commentaires: print("arc noir ",(pos1,pos2))
A[pos1][pos2] = Valeur[NOLINK][1]
#le debut et la fin liés débuts des pièces: arcs verts
for indice_pièce in range (len(Gammes)):
#lier le début à (piècei, première machine associée)
indice_debut = 0
première_machine = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, 0);
indice_pièceETmachine = trouver_indice_couple(indice_pièce,première_machine,len(Gammes),len(Gammes[0]) )
if afficher_commentaires: print("arc vert ",(indice_debut,indice_pièceETmachine));
A[indice_debut][indice_pièceETmachine] = Valeur[NOLINK][0]
#lier la fin à (piècei, premièrdernière machine associée)
indice_fin = len(A)-1
dernière_machine = trouver_indice_machine_dans_la_gamme(Gammes,indice_pièce, -1+len(Gammes[i]));
indice_pièceETmachine = trouver_indice_couple(indice_pièce,dernière_machine,len(Gammes),len(Gammes[0]))
if afficher_commentaires: print("arcrouge ",(indice_pièceETmachine,indice_fin));
A[indice_pièceETmachine][indice_fin] = Valeur[NOLINK][1]
return A
#question1
def GRAPHE(s,gamme = Gammes,afficher_commentaires=False):
#j'initialise le graphe et j'ajoute les arcs, verts, rouge et noirs
graphe = fct_Matrice_Adjacence(gamme,afficher_commentaires)
#je m'assure que la solution et la gamme sont deux matrices
if 2+ len(s)*len(s[0]) != len(graphe):
print("erreur: le nombre de ligne de la gamme doit être égal au nombre de colonne de la solution et inversément")
print("len(graphe)=",len(graphe))
print("2+ {}*{} = {}".format( len(s),len(s[0]),2+ len(s)*len(s[0]) ) )
return np.zeros((1,1))
#plusieurs pièces passent en ordre sur une même machine: les arcs bleus q
for j in range(len(s)):
indice_machine = j
for i in range (len(s[0])-1): #on parcours les pieces sauf la dernière
indice_piece1 = trouver_indice_piece_dans_la_solution_s(s,j,i)
indice_piece2 = trouver_indice_piece_dans_la_solution_s(s,j,i+1)
pos1 = trouver_indice_couple(indice_piece1,indice_machine,len(gamme),len(gamme[0]))
pos2 = trouver_indice_couple(indice_piece2,indice_machine,len(gamme),len(gamme[0]))
if afficher_commentaires: print("arc bleu ",(pos1,pos2))
graphe[pos1][pos2] = Valeur[NOLINK][1]
return graphe
S = [ [1,2,3],
[2,1,3],
[1,2,3]
]#s est une solution. On a une machine sur chaque ligne
afficher_matrice( GRAPHE(S,afficher_commentaires=True))
print("fin question 1", end = "\n\n\n\n\n")
#questiion2
#deterliner le plus long chemin du devbut à chaque point
#fct preliminaires
def predecesseurs(G,s):
predecesseurs = [i for i in range(len(G)) if G[i][s]!=NOLINK]+int(G[0][s]==Valeur[NOLINK][0])*[0]
return list(set(predecesseurs))
def successeurs(G,s):
successeurs=[j for j in range(len(G[0])) if G[s][j]!=NOLINK]+[len(G)-1]
return successeurs
def intersection(lst1, lst2): #intersection de deux listes
return list(set(lst1) & set(lst2))
def PLC(G,sommet_depart=0,afficher_commentaires=False): #G graphe orienté sans circuit; G est une matrice (n,n) et s, un entier entre 0 et len(A)-1, est le sommet de départ
#des vérifications
A = G; B = np.array(A)
if sommet_depart not in range(0,len(A)):
print("sommet inexistant")
return
if (B.diagonal()!=NOLINK).any():
print("retranchez les valeurs des diagonales")
return
#des initialisations
λ = [INF for i in range(len(A))];plc = λ #liste des distances à sommet_depart
λ[sommet_depart] = 0
rang = 0
S = [sommet_depart]#liste des sommets en cours S
r = [0 for i in range(len(G))] #liste des rangs des sommets
liste_sommets = [i for i in range(len(G))]
while len(S)!=len(A):
rang +=1
if afficher_commentaires: print("A: ",A)
listeDesSommes_prive_de_S = [i for i in liste_sommets if i not in S]
W = [i for i in listeDesSommes_prive_de_S if intersection(predecesseurs(G,i) , listeDesSommes_prive_de_S)==[] ]
if afficher_commentaires: print(intersection(predecesseurs(G,7) , listeDesSommes_prive_de_S))
#l'ensemble des sommets qui ne sont pas dans S et qui n'ont pars de prédecesseurs
#on pose rang(v)=rang pour tout les éléments de W
for v in W:
r[v]=rang
S.extend(W)
for k in range(1,rang+1):
for v in [ v for v in liste_sommets if r[v]==k]:
N_moins = predecesseurs(G,v)
plc[v] = max([ plc[w] + t(G, w, v) for w in N_moins])
if afficher_commentaires:print("distance max entre {} et {} trouvé = {}".format(sommet_depart,v,plc[v]))
return λ
##########test
S = [ [1,2,3],
[2,1,3],
[1,2,3]
]#s est une solution. On a une machine sur chaque ligne
print("plc = ",PLC(GRAPHE(S),sommet_depart=0,afficher_commentaires=True),end="\n\n")
def plc(i,j,G=GRAPHE(S)):
|
liste_plc = PLC(G,sommet_depart=0)
indice_piece = i-1; indice_machine= j-1
indice_pièceETmachine = trouver_indice_couple(indice_piece,indice_machine,len(S[0]),len(S))
return liste_plc[indice_pièceETmachine]
print("plc(2,1)= ", plc(2,1))
print("fin question 2", end = "\n\n\n\n\n")
Gammes= [ [1,2,3],[2,1,3],[1,2,3]]
S= [ [1,2,3],[2,1,3],[1,2,3]]#s est une solution. On a une machine sur chaque ligne
#question3
#trouver pour chaque tâche (pièce, machine), l'heure de démarrage au plus tard
def AuPlusTard(Gammes,solution,sommet_fin,afficher_commentaires=False):#G graphe orienté sans circuit; G est une matrice (n,n) et s, un entier entre 0 et len(A)-1, est le sommet de départ
Graphe = GRAPHE(solution,Gammes)
#des vérifications
A = Graphe; B = np.array(A)
if sommet_fin not
|
random_line_split
|
|
file_system.rs
|
/// Whether DirEntries added to this filesystem should be considered permanent, instead of a
/// cache of the backing storage. An example is tmpfs: the DirEntry tree *is* the backing
/// storage, as opposed to ext4, which uses the DirEntry tree as a cache and removes unused
/// nodes from it.
pub permanent_entries: bool,
/// A file-system global mutex to serialize rename operations.
///
/// This mutex is useful because the invariants enforced during a rename
/// operation involve many DirEntry objects. In the future, we might be
/// able to remove this mutex, but we will need to think carefully about
/// how rename operations can interleave.
///
/// See DirEntry::rename.
pub rename_mutex: Mutex<()>,
/// The FsNode cache for this file system.
///
/// When two directory entries are hard links to the same underlying inode,
/// this cache lets us re-use the same FsNode object for both directory
/// entries.
///
/// Rather than calling FsNode::new directly, file systems should call
/// FileSystem::get_or_create_node to see if the FsNode already exists in
/// the cache.
nodes: Mutex<HashMap<ino_t, Weak<FsNode>>>,
/// DirEntryHandle cache for the filesystem. Currently only used by filesystems that set the
/// permanent_entries flag, to store every node and make sure it doesn't get freed without
/// being explicitly unlinked.
entries: Mutex<HashMap<usize, DirEntryHandle>>,
/// Hack meant to stand in for the fs_use_trans selinux feature. If set, this value will be set
/// as the selinux label on any newly created inodes in the filesystem.
pub selinux_context: OnceCell<FsString>,
}
impl FileSystem {
/// Create a new filesystem.
pub fn new(kernel: &Kernel, ops: impl FileSystemOps) -> FileSystemHandle {
Self::new_internal(kernel, ops, false)
}
/// Create a new filesystem with the permanent_entries flag set.
pub fn new_with_permanent_entries(
kernel: &Kernel,
ops: impl FileSystemOps,
) -> FileSystemHandle {
Self::new_internal(kernel, ops, true)
}
/// Create a new filesystem and call set_root in one step.
pub fn new_with_root(
kernel: &Kernel,
ops: impl FileSystemOps,
root_node: FsNode,
) -> FileSystemHandle {
let fs = Self::new_with_permanent_entries(kernel, ops);
fs.set_root_node(root_node);
fs
}
pub fn set_root(self: &FileSystemHandle, root: impl FsNodeOps) {
self.set_root_node(FsNode::new_root(root));
}
/// Set up the root of the filesystem. Must not be called more than once.
pub fn set_root_node(self: &FileSystemHandle, mut root: FsNode) {
if root.inode_num == 0 {
root.inode_num = self.next_inode_num();
}
root.set_fs(self);
let root_node = Arc::new(root);
self.nodes.lock().insert(root_node.inode_num, Arc::downgrade(&root_node));
let root = DirEntry::new(root_node, None, FsString::new());
assert!(self.root.set(root).is_ok(), "FileSystem::set_root can't be called more than once");
}
fn new_internal(
kernel: &Kernel,
ops: impl FileSystemOps,
permanent_entries: bool,
) -> FileSystemHandle {
Arc::new(FileSystem {
root: OnceCell::new(),
next_inode: AtomicU64::new(1),
ops: Box::new(ops),
dev_id: kernel.device_registry.write().next_anonymous_dev_id(),
permanent_entries,
rename_mutex: Mutex::new(()),
nodes: Mutex::new(HashMap::new()),
entries: Mutex::new(HashMap::new()),
selinux_context: OnceCell::new(),
})
}
/// The root directory entry of this file system.
///
/// Panics if this file system does not have a root directory.
pub fn root(&self) -> &DirEntryHandle {
self.root.get().unwrap()
}
/// Get or create an FsNode for this file system.
///
/// If inode_num is Some, then this function checks the node cache to
/// determine whether this node is already open. If so, the function
/// returns the existing FsNode. If not, the function calls the given
/// create_fn function to create the FsNode.
///
/// If inode_num is None, then this function assigns a new inode number
/// and calls the given create_fn function to create the FsNode with the
/// assigned number.
///
/// Returns Err only if create_fn returns Err.
pub fn get_or_create_node<F>(
&self,
inode_num: Option<ino_t>,
create_fn: F,
) -> Result<FsNodeHandle, Errno>
where
F: FnOnce(ino_t) -> Result<FsNodeHandle, Errno>,
{
let inode_num = inode_num.unwrap_or_else(|| self.next_inode_num());
let mut nodes = self.nodes.lock();
match nodes.entry(inode_num) {
Entry::Vacant(entry) => {
let node = create_fn(inode_num)?;
entry.insert(Arc::downgrade(&node));
Ok(node)
}
Entry::Occupied(mut entry) => {
if let Some(node) = entry.get().upgrade() {
return Ok(node);
}
let node = create_fn(inode_num)?;
entry.insert(Arc::downgrade(&node));
Ok(node)
}
}
}
/// File systems that produce their own IDs for nodes should invoke this
/// function. The ones who leave to this object to assign the IDs should
/// call |create_node|.
pub fn create_node_with_id(
self: &Arc<Self>,
ops: Box<dyn FsNodeOps>,
id: ino_t,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
if let Some(label) = self.selinux_context.get() {
let _ = ops.set_xattr(b"security.selinux", label, XattrOp::Create);
}
let node = FsNode::new_uncached(ops, self, id, mode, owner);
self.nodes.lock().insert(node.inode_num, Arc::downgrade(&node));
node
}
pub fn create_node(
self: &Arc<Self>,
ops: Box<dyn FsNodeOps>,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
let inode_num = self.next_inode_num();
self.create_node_with_id(ops, inode_num, mode, owner)
}
pub fn create_node_with_ops(
self: &Arc<Self>,
ops: impl FsNodeOps,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
self.create_node(Box::new(ops), mode, owner)
}
/// Remove the given FsNode from the node cache.
///
/// Called from the Drop trait of FsNode.
pub fn remove_node(&self, node: &mut FsNode) {
let mut nodes = self.nodes.lock();
if let Some(weak_node) = nodes.get(&node.inode_num) {
if std::ptr::eq(weak_node.as_ptr(), node) {
nodes.remove(&node.inode_num);
}
}
}
pub fn next_inode_num(&self) -> ino_t {
assert!(!self.ops.generate_node_ids());
self.next_inode.fetch_add(1, Ordering::Relaxed)
}
/// Move |renamed| that is at |old_name| in |old_parent| to |new_name| in |new_parent|
/// replacing |replaced|.
/// If |replaced| exists and is a directory, this function must check that |renamed| is n
/// directory and that |replaced| is empty.
pub fn rename(
&self,
old_parent: &FsNodeHandle,
old_name: &FsStr,
new_parent: &FsNodeHandle,
new_name: &FsStr,
renamed: &FsNodeHandle,
replaced: Option<&FsNodeHandle>,
) -> Result<(), Errno> {
self.ops.rename(self, old_parent, old_name, new_parent, new_name, renamed, replaced)
}
/// Returns the `statfs` for this filesystem.
///
/// Each `FileSystemOps` impl is expected to override this to return the specific statfs for
/// the filesystem.
///
/// Returns `ENOSYS` if the `FileSystemOps` don't implement `stat`.
pub fn statfs(&self) -> Result<statfs, Errno> {
let mut stat = self.ops.statfs(self)?;
if stat.f_frsize == 0
|
Ok(stat)
}
pub fn did_create_dir_entry(&self, entry: &DirEntryHandle) {
if self.permanent_entries {
self.entries.lock().insert(Arc::as_ptr(entry) as usize, entry.clone());
}
}
pub fn will_destroy_dir_entry(&self, entry: &DirEntryHandle) {
if self.permanent_entries {
self.entries.lock().remove(&(
|
{
stat.f_frsize = stat.f_bsize as i64;
}
|
conditional_block
|
file_system.rs
|
,
/// Whether DirEntries added to this filesystem should be considered permanent, instead of a
/// cache of the backing storage. An example is tmpfs: the DirEntry tree *is* the backing
/// storage, as opposed to ext4, which uses the DirEntry tree as a cache and removes unused
/// nodes from it.
pub permanent_entries: bool,
/// A file-system global mutex to serialize rename operations.
///
/// This mutex is useful because the invariants enforced during a rename
/// operation involve many DirEntry objects. In the future, we might be
/// able to remove this mutex, but we will need to think carefully about
/// how rename operations can interleave.
///
/// See DirEntry::rename.
pub rename_mutex: Mutex<()>,
/// The FsNode cache for this file system.
///
/// When two directory entries are hard links to the same underlying inode,
/// this cache lets us re-use the same FsNode object for both directory
/// entries.
///
/// Rather than calling FsNode::new directly, file systems should call
/// FileSystem::get_or_create_node to see if the FsNode already exists in
/// the cache.
nodes: Mutex<HashMap<ino_t, Weak<FsNode>>>,
/// DirEntryHandle cache for the filesystem. Currently only used by filesystems that set the
/// permanent_entries flag, to store every node and make sure it doesn't get freed without
/// being explicitly unlinked.
entries: Mutex<HashMap<usize, DirEntryHandle>>,
/// Hack meant to stand in for the fs_use_trans selinux feature. If set, this value will be set
/// as the selinux label on any newly created inodes in the filesystem.
pub selinux_context: OnceCell<FsString>,
}
impl FileSystem {
/// Create a new filesystem.
pub fn new(kernel: &Kernel, ops: impl FileSystemOps) -> FileSystemHandle {
Self::new_internal(kernel, ops, false)
}
/// Create a new filesystem with the permanent_entries flag set.
pub fn new_with_permanent_entries(
kernel: &Kernel,
ops: impl FileSystemOps,
) -> FileSystemHandle {
Self::new_internal(kernel, ops, true)
}
/// Create a new filesystem and call set_root in one step.
pub fn new_with_root(
kernel: &Kernel,
ops: impl FileSystemOps,
root_node: FsNode,
) -> FileSystemHandle {
let fs = Self::new_with_permanent_entries(kernel, ops);
fs.set_root_node(root_node);
fs
}
pub fn set_root(self: &FileSystemHandle, root: impl FsNodeOps) {
self.set_root_node(FsNode::new_root(root));
}
/// Set up the root of the filesystem. Must not be called more than once.
pub fn set_root_node(self: &FileSystemHandle, mut root: FsNode) {
if root.inode_num == 0 {
root.inode_num = self.next_inode_num();
}
root.set_fs(self);
let root_node = Arc::new(root);
self.nodes.lock().insert(root_node.inode_num, Arc::downgrade(&root_node));
let root = DirEntry::new(root_node, None, FsString::new());
assert!(self.root.set(root).is_ok(), "FileSystem::set_root can't be called more than once");
}
fn new_internal(
kernel: &Kernel,
ops: impl FileSystemOps,
permanent_entries: bool,
) -> FileSystemHandle {
Arc::new(FileSystem {
root: OnceCell::new(),
next_inode: AtomicU64::new(1),
ops: Box::new(ops),
dev_id: kernel.device_registry.write().next_anonymous_dev_id(),
permanent_entries,
rename_mutex: Mutex::new(()),
nodes: Mutex::new(HashMap::new()),
entries: Mutex::new(HashMap::new()),
selinux_context: OnceCell::new(),
})
}
/// The root directory entry of this file system.
///
/// Panics if this file system does not have a root directory.
pub fn root(&self) -> &DirEntryHandle {
self.root.get().unwrap()
}
/// Get or create an FsNode for this file system.
///
/// If inode_num is Some, then this function checks the node cache to
/// determine whether this node is already open. If so, the function
/// returns the existing FsNode. If not, the function calls the given
/// create_fn function to create the FsNode.
///
/// If inode_num is None, then this function assigns a new inode number
/// and calls the given create_fn function to create the FsNode with the
/// assigned number.
///
/// Returns Err only if create_fn returns Err.
pub fn get_or_create_node<F>(
&self,
inode_num: Option<ino_t>,
create_fn: F,
) -> Result<FsNodeHandle, Errno>
where
F: FnOnce(ino_t) -> Result<FsNodeHandle, Errno>,
{
let inode_num = inode_num.unwrap_or_else(|| self.next_inode_num());
let mut nodes = self.nodes.lock();
match nodes.entry(inode_num) {
Entry::Vacant(entry) => {
let node = create_fn(inode_num)?;
entry.insert(Arc::downgrade(&node));
Ok(node)
}
Entry::Occupied(mut entry) => {
if let Some(node) = entry.get().upgrade() {
return Ok(node);
}
let node = create_fn(inode_num)?;
entry.insert(Arc::downgrade(&node));
Ok(node)
}
}
}
/// File systems that produce their own IDs for nodes should invoke this
/// function. The ones who leave to this object to assign the IDs should
/// call |create_node|.
pub fn create_node_with_id(
self: &Arc<Self>,
ops: Box<dyn FsNodeOps>,
id: ino_t,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
if let Some(label) = self.selinux_context.get() {
let _ = ops.set_xattr(b"security.selinux", label, XattrOp::Create);
}
let node = FsNode::new_uncached(ops, self, id, mode, owner);
self.nodes.lock().insert(node.inode_num, Arc::downgrade(&node));
node
}
pub fn create_node(
self: &Arc<Self>,
ops: Box<dyn FsNodeOps>,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
let inode_num = self.next_inode_num();
self.create_node_with_id(ops, inode_num, mode, owner)
}
pub fn create_node_with_ops(
self: &Arc<Self>,
ops: impl FsNodeOps,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
self.create_node(Box::new(ops), mode, owner)
}
/// Remove the given FsNode from the node cache.
///
/// Called from the Drop trait of FsNode.
pub fn remove_node(&self, node: &mut FsNode) {
let mut nodes = self.nodes.lock();
if let Some(weak_node) = nodes.get(&node.inode_num) {
if std::ptr::eq(weak_node.as_ptr(), node) {
nodes.remove(&node.inode_num);
}
}
}
pub fn next_inode_num(&self) -> ino_t {
assert!(!self.ops.generate_node_ids());
self.next_inode.fetch_add(1, Ordering::Relaxed)
}
/// Move |renamed| that is at |old_name| in |old_parent| to |new_name| in |new_parent|
/// replacing |replaced|.
/// If |replaced| exists and is a directory, this function must check that |renamed| is n
/// directory and that |replaced| is empty.
pub fn rename(
&self,
old_parent: &FsNodeHandle,
old_name: &FsStr,
new_parent: &FsNodeHandle,
new_name: &FsStr,
renamed: &FsNodeHandle,
replaced: Option<&FsNodeHandle>,
) -> Result<(), Errno> {
self.ops.rename(self, old_parent, old_name, new_parent, new_name, renamed, replaced)
}
/// Returns the `statfs` for this filesystem.
|
/// Returns `ENOSYS` if the `FileSystemOps` don't implement `stat`.
pub fn statfs(&self) -> Result<statfs, Errno> {
let mut stat = self.ops.statfs(self)?;
if stat.f_frsize == 0 {
stat.f_frsize = stat.f_bsize as i64;
}
Ok(stat)
}
pub fn did_create_dir_entry(&self, entry: &DirEntryHandle) {
if self.permanent_entries {
self.entries.lock().insert(Arc::as_ptr(entry) as usize, entry.clone());
}
}
pub fn will_destroy_dir_entry(&self, entry: &DirEntryHandle) {
if self.permanent_entries {
self.entries.lock().remove(&(Arc
|
///
/// Each `FileSystemOps` impl is expected to override this to return the specific statfs for
/// the filesystem.
///
|
random_line_split
|
file_system.rs
|
,
/// Whether DirEntries added to this filesystem should be considered permanent, instead of a
/// cache of the backing storage. An example is tmpfs: the DirEntry tree *is* the backing
/// storage, as opposed to ext4, which uses the DirEntry tree as a cache and removes unused
/// nodes from it.
pub permanent_entries: bool,
/// A file-system global mutex to serialize rename operations.
///
/// This mutex is useful because the invariants enforced during a rename
/// operation involve many DirEntry objects. In the future, we might be
/// able to remove this mutex, but we will need to think carefully about
/// how rename operations can interleave.
///
/// See DirEntry::rename.
pub rename_mutex: Mutex<()>,
/// The FsNode cache for this file system.
///
/// When two directory entries are hard links to the same underlying inode,
/// this cache lets us re-use the same FsNode object for both directory
/// entries.
///
/// Rather than calling FsNode::new directly, file systems should call
/// FileSystem::get_or_create_node to see if the FsNode already exists in
/// the cache.
nodes: Mutex<HashMap<ino_t, Weak<FsNode>>>,
/// DirEntryHandle cache for the filesystem. Currently only used by filesystems that set the
/// permanent_entries flag, to store every node and make sure it doesn't get freed without
/// being explicitly unlinked.
entries: Mutex<HashMap<usize, DirEntryHandle>>,
/// Hack meant to stand in for the fs_use_trans selinux feature. If set, this value will be set
/// as the selinux label on any newly created inodes in the filesystem.
pub selinux_context: OnceCell<FsString>,
}
impl FileSystem {
/// Create a new filesystem.
pub fn new(kernel: &Kernel, ops: impl FileSystemOps) -> FileSystemHandle {
Self::new_internal(kernel, ops, false)
}
/// Create a new filesystem with the permanent_entries flag set.
pub fn new_with_permanent_entries(
kernel: &Kernel,
ops: impl FileSystemOps,
) -> FileSystemHandle {
Self::new_internal(kernel, ops, true)
}
/// Create a new filesystem and call set_root in one step.
pub fn new_with_root(
kernel: &Kernel,
ops: impl FileSystemOps,
root_node: FsNode,
) -> FileSystemHandle {
let fs = Self::new_with_permanent_entries(kernel, ops);
fs.set_root_node(root_node);
fs
}
pub fn set_root(self: &FileSystemHandle, root: impl FsNodeOps) {
self.set_root_node(FsNode::new_root(root));
}
/// Set up the root of the filesystem. Must not be called more than once.
pub fn set_root_node(self: &FileSystemHandle, mut root: FsNode) {
if root.inode_num == 0 {
root.inode_num = self.next_inode_num();
}
root.set_fs(self);
let root_node = Arc::new(root);
self.nodes.lock().insert(root_node.inode_num, Arc::downgrade(&root_node));
let root = DirEntry::new(root_node, None, FsString::new());
assert!(self.root.set(root).is_ok(), "FileSystem::set_root can't be called more than once");
}
fn new_internal(
kernel: &Kernel,
ops: impl FileSystemOps,
permanent_entries: bool,
) -> FileSystemHandle {
Arc::new(FileSystem {
root: OnceCell::new(),
next_inode: AtomicU64::new(1),
ops: Box::new(ops),
dev_id: kernel.device_registry.write().next_anonymous_dev_id(),
permanent_entries,
rename_mutex: Mutex::new(()),
nodes: Mutex::new(HashMap::new()),
entries: Mutex::new(HashMap::new()),
selinux_context: OnceCell::new(),
})
}
/// The root directory entry of this file system.
///
/// Panics if this file system does not have a root directory.
pub fn
|
(&self) -> &DirEntryHandle {
self.root.get().unwrap()
}
/// Get or create an FsNode for this file system.
///
/// If inode_num is Some, then this function checks the node cache to
/// determine whether this node is already open. If so, the function
/// returns the existing FsNode. If not, the function calls the given
/// create_fn function to create the FsNode.
///
/// If inode_num is None, then this function assigns a new inode number
/// and calls the given create_fn function to create the FsNode with the
/// assigned number.
///
/// Returns Err only if create_fn returns Err.
pub fn get_or_create_node<F>(
&self,
inode_num: Option<ino_t>,
create_fn: F,
) -> Result<FsNodeHandle, Errno>
where
F: FnOnce(ino_t) -> Result<FsNodeHandle, Errno>,
{
let inode_num = inode_num.unwrap_or_else(|| self.next_inode_num());
let mut nodes = self.nodes.lock();
match nodes.entry(inode_num) {
Entry::Vacant(entry) => {
let node = create_fn(inode_num)?;
entry.insert(Arc::downgrade(&node));
Ok(node)
}
Entry::Occupied(mut entry) => {
if let Some(node) = entry.get().upgrade() {
return Ok(node);
}
let node = create_fn(inode_num)?;
entry.insert(Arc::downgrade(&node));
Ok(node)
}
}
}
/// File systems that produce their own IDs for nodes should invoke this
/// function. The ones who leave to this object to assign the IDs should
/// call |create_node|.
pub fn create_node_with_id(
self: &Arc<Self>,
ops: Box<dyn FsNodeOps>,
id: ino_t,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
if let Some(label) = self.selinux_context.get() {
let _ = ops.set_xattr(b"security.selinux", label, XattrOp::Create);
}
let node = FsNode::new_uncached(ops, self, id, mode, owner);
self.nodes.lock().insert(node.inode_num, Arc::downgrade(&node));
node
}
pub fn create_node(
self: &Arc<Self>,
ops: Box<dyn FsNodeOps>,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
let inode_num = self.next_inode_num();
self.create_node_with_id(ops, inode_num, mode, owner)
}
pub fn create_node_with_ops(
self: &Arc<Self>,
ops: impl FsNodeOps,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
self.create_node(Box::new(ops), mode, owner)
}
/// Remove the given FsNode from the node cache.
///
/// Called from the Drop trait of FsNode.
pub fn remove_node(&self, node: &mut FsNode) {
let mut nodes = self.nodes.lock();
if let Some(weak_node) = nodes.get(&node.inode_num) {
if std::ptr::eq(weak_node.as_ptr(), node) {
nodes.remove(&node.inode_num);
}
}
}
pub fn next_inode_num(&self) -> ino_t {
assert!(!self.ops.generate_node_ids());
self.next_inode.fetch_add(1, Ordering::Relaxed)
}
/// Move |renamed| that is at |old_name| in |old_parent| to |new_name| in |new_parent|
/// replacing |replaced|.
/// If |replaced| exists and is a directory, this function must check that |renamed| is n
/// directory and that |replaced| is empty.
pub fn rename(
&self,
old_parent: &FsNodeHandle,
old_name: &FsStr,
new_parent: &FsNodeHandle,
new_name: &FsStr,
renamed: &FsNodeHandle,
replaced: Option<&FsNodeHandle>,
) -> Result<(), Errno> {
self.ops.rename(self, old_parent, old_name, new_parent, new_name, renamed, replaced)
}
/// Returns the `statfs` for this filesystem.
///
/// Each `FileSystemOps` impl is expected to override this to return the specific statfs for
/// the filesystem.
///
/// Returns `ENOSYS` if the `FileSystemOps` don't implement `stat`.
pub fn statfs(&self) -> Result<statfs, Errno> {
let mut stat = self.ops.statfs(self)?;
if stat.f_frsize == 0 {
stat.f_frsize = stat.f_bsize as i64;
}
Ok(stat)
}
pub fn did_create_dir_entry(&self, entry: &DirEntryHandle) {
if self.permanent_entries {
self.entries.lock().insert(Arc::as_ptr(entry) as usize, entry.clone());
}
}
pub fn will_destroy_dir_entry(&self, entry: &DirEntryHandle) {
if self.permanent_entries {
self.entries.lock().remove(&(
|
root
|
identifier_name
|
file_system.rs
|
/// Whether DirEntries added to this filesystem should be considered permanent, instead of a
/// cache of the backing storage. An example is tmpfs: the DirEntry tree *is* the backing
/// storage, as opposed to ext4, which uses the DirEntry tree as a cache and removes unused
/// nodes from it.
pub permanent_entries: bool,
/// A file-system global mutex to serialize rename operations.
///
/// This mutex is useful because the invariants enforced during a rename
/// operation involve many DirEntry objects. In the future, we might be
/// able to remove this mutex, but we will need to think carefully about
/// how rename operations can interleave.
///
/// See DirEntry::rename.
pub rename_mutex: Mutex<()>,
/// The FsNode cache for this file system.
///
/// When two directory entries are hard links to the same underlying inode,
/// this cache lets us re-use the same FsNode object for both directory
/// entries.
///
/// Rather than calling FsNode::new directly, file systems should call
/// FileSystem::get_or_create_node to see if the FsNode already exists in
/// the cache.
nodes: Mutex<HashMap<ino_t, Weak<FsNode>>>,
/// DirEntryHandle cache for the filesystem. Currently only used by filesystems that set the
/// permanent_entries flag, to store every node and make sure it doesn't get freed without
/// being explicitly unlinked.
entries: Mutex<HashMap<usize, DirEntryHandle>>,
/// Hack meant to stand in for the fs_use_trans selinux feature. If set, this value will be set
/// as the selinux label on any newly created inodes in the filesystem.
pub selinux_context: OnceCell<FsString>,
}
impl FileSystem {
/// Create a new filesystem.
pub fn new(kernel: &Kernel, ops: impl FileSystemOps) -> FileSystemHandle {
Self::new_internal(kernel, ops, false)
}
/// Create a new filesystem with the permanent_entries flag set.
pub fn new_with_permanent_entries(
kernel: &Kernel,
ops: impl FileSystemOps,
) -> FileSystemHandle {
Self::new_internal(kernel, ops, true)
}
/// Create a new filesystem and call set_root in one step.
pub fn new_with_root(
kernel: &Kernel,
ops: impl FileSystemOps,
root_node: FsNode,
) -> FileSystemHandle {
let fs = Self::new_with_permanent_entries(kernel, ops);
fs.set_root_node(root_node);
fs
}
pub fn set_root(self: &FileSystemHandle, root: impl FsNodeOps) {
self.set_root_node(FsNode::new_root(root));
}
/// Set up the root of the filesystem. Must not be called more than once.
pub fn set_root_node(self: &FileSystemHandle, mut root: FsNode)
|
fn new_internal(
kernel: &Kernel,
ops: impl FileSystemOps,
permanent_entries: bool,
) -> FileSystemHandle {
Arc::new(FileSystem {
root: OnceCell::new(),
next_inode: AtomicU64::new(1),
ops: Box::new(ops),
dev_id: kernel.device_registry.write().next_anonymous_dev_id(),
permanent_entries,
rename_mutex: Mutex::new(()),
nodes: Mutex::new(HashMap::new()),
entries: Mutex::new(HashMap::new()),
selinux_context: OnceCell::new(),
})
}
/// The root directory entry of this file system.
///
/// Panics if this file system does not have a root directory.
pub fn root(&self) -> &DirEntryHandle {
self.root.get().unwrap()
}
/// Get or create an FsNode for this file system.
///
/// If inode_num is Some, then this function checks the node cache to
/// determine whether this node is already open. If so, the function
/// returns the existing FsNode. If not, the function calls the given
/// create_fn function to create the FsNode.
///
/// If inode_num is None, then this function assigns a new inode number
/// and calls the given create_fn function to create the FsNode with the
/// assigned number.
///
/// Returns Err only if create_fn returns Err.
pub fn get_or_create_node<F>(
&self,
inode_num: Option<ino_t>,
create_fn: F,
) -> Result<FsNodeHandle, Errno>
where
F: FnOnce(ino_t) -> Result<FsNodeHandle, Errno>,
{
let inode_num = inode_num.unwrap_or_else(|| self.next_inode_num());
let mut nodes = self.nodes.lock();
match nodes.entry(inode_num) {
Entry::Vacant(entry) => {
let node = create_fn(inode_num)?;
entry.insert(Arc::downgrade(&node));
Ok(node)
}
Entry::Occupied(mut entry) => {
if let Some(node) = entry.get().upgrade() {
return Ok(node);
}
let node = create_fn(inode_num)?;
entry.insert(Arc::downgrade(&node));
Ok(node)
}
}
}
/// File systems that produce their own IDs for nodes should invoke this
/// function. The ones who leave to this object to assign the IDs should
/// call |create_node|.
pub fn create_node_with_id(
self: &Arc<Self>,
ops: Box<dyn FsNodeOps>,
id: ino_t,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
if let Some(label) = self.selinux_context.get() {
let _ = ops.set_xattr(b"security.selinux", label, XattrOp::Create);
}
let node = FsNode::new_uncached(ops, self, id, mode, owner);
self.nodes.lock().insert(node.inode_num, Arc::downgrade(&node));
node
}
pub fn create_node(
self: &Arc<Self>,
ops: Box<dyn FsNodeOps>,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
let inode_num = self.next_inode_num();
self.create_node_with_id(ops, inode_num, mode, owner)
}
pub fn create_node_with_ops(
self: &Arc<Self>,
ops: impl FsNodeOps,
mode: FileMode,
owner: FsCred,
) -> FsNodeHandle {
self.create_node(Box::new(ops), mode, owner)
}
/// Remove the given FsNode from the node cache.
///
/// Called from the Drop trait of FsNode.
pub fn remove_node(&self, node: &mut FsNode) {
let mut nodes = self.nodes.lock();
if let Some(weak_node) = nodes.get(&node.inode_num) {
if std::ptr::eq(weak_node.as_ptr(), node) {
nodes.remove(&node.inode_num);
}
}
}
pub fn next_inode_num(&self) -> ino_t {
assert!(!self.ops.generate_node_ids());
self.next_inode.fetch_add(1, Ordering::Relaxed)
}
/// Move |renamed| that is at |old_name| in |old_parent| to |new_name| in |new_parent|
/// replacing |replaced|.
/// If |replaced| exists and is a directory, this function must check that |renamed| is n
/// directory and that |replaced| is empty.
pub fn rename(
&self,
old_parent: &FsNodeHandle,
old_name: &FsStr,
new_parent: &FsNodeHandle,
new_name: &FsStr,
renamed: &FsNodeHandle,
replaced: Option<&FsNodeHandle>,
) -> Result<(), Errno> {
self.ops.rename(self, old_parent, old_name, new_parent, new_name, renamed, replaced)
}
/// Returns the `statfs` for this filesystem.
///
/// Each `FileSystemOps` impl is expected to override this to return the specific statfs for
/// the filesystem.
///
/// Returns `ENOSYS` if the `FileSystemOps` don't implement `stat`.
pub fn statfs(&self) -> Result<statfs, Errno> {
let mut stat = self.ops.statfs(self)?;
if stat.f_frsize == 0 {
stat.f_frsize = stat.f_bsize as i64;
}
Ok(stat)
}
pub fn did_create_dir_entry(&self, entry: &DirEntryHandle) {
if self.permanent_entries {
self.entries.lock().insert(Arc::as_ptr(entry) as usize, entry.clone());
}
}
pub fn will_destroy_dir_entry(&self, entry: &DirEntryHandle) {
if self.permanent_entries {
self.entries.lock().remove(&(
|
{
if root.inode_num == 0 {
root.inode_num = self.next_inode_num();
}
root.set_fs(self);
let root_node = Arc::new(root);
self.nodes.lock().insert(root_node.inode_num, Arc::downgrade(&root_node));
let root = DirEntry::new(root_node, None, FsString::new());
assert!(self.root.set(root).is_ok(), "FileSystem::set_root can't be called more than once");
}
|
identifier_body
|
agent.go
|
agent release and version information.
type RInfo struct {
// InstanceID the unique name identificator for this agent
InstanceID string
// Version is the app X.Y.Z version
Version string
// Commit is the git commit sha1
Commit string
// Branch is the git branch
Branch string
// BuildStamp is the build timestamp
BuildStamp string
}
// GetRInfo returns the agent release information.
func GetRInfo() *RInfo {
info := &RInfo{
InstanceID: MainConfig.General.InstanceID,
Version: Version,
Commit: Commit,
Branch: Branch,
BuildStamp: BuildStamp,
}
return info
}
var (
// Bus is the messaging system used to send messages to the devices
Bus = bus.NewBus()
// MainConfig contains the global configuration
MainConfig config.Config
// DBConfig contains the database config
DBConfig config.DBConfig
log utils.Logger
// reloadMutex guards the reloadProcess flag
reloadMutex sync.Mutex
reloadProcess bool
// mutex guards the runtime devices map access
mutex sync.RWMutex
// devices is the runtime snmp devices map
devices map[string]*device.SnmpDevice
// influxdb is the runtime devices output db map
influxdb map[string]*output.InfluxDB
selfmonProc *selfmon.SelfMon
// gatherWg synchronizes device specific goroutines
gatherWg sync.WaitGroup
senderWg sync.WaitGroup
)
// SetLogger sets the current log output.
func SetLogger(l utils.Logger) {
log = l
}
// Reload Mutex Related Methods.
// CheckReloadProcess checks if the agent is currently reloading config.
func CheckReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
return reloadProcess
}
// CheckAndSetReloadProcess sets the reloadProcess flag.
// Returns its previous value.
func CheckAndSetReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
retval := reloadProcess
reloadProcess = true
return retval
}
// CheckAndUnSetReloadProcess unsets the reloadProcess flag.
// Returns its previous value.
func CheckAndUnSetReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
retval := reloadProcess
reloadProcess = false
return retval
}
// PrepareInfluxDBs initializes all configured output DBs in the SQL database.
// If there is no "default" key, creates a dummy output db which does nothing.
func PrepareInfluxDBs() map[string]*output.InfluxDB {
idb := make(map[string]*output.InfluxDB)
var defFound bool
for k, c := range DBConfig.Influxdb {
if k == "default" {
defFound = true
}
idb[k] = output.NewNotInitInfluxDB(c)
}
if defFound == false {
log.Warn("No Output default found influxdb devices found !!")
idb["default"] = output.DummyDB
}
return idb
}
// GetDevice returns the snmp device with the given id.
// Returns an error if there is an ongoing reload.
func GetDevice(id string) (*device.SnmpDevice, error) {
var dev *device.SnmpDevice
var ok bool
if CheckReloadProcess() == true {
log.Warning("There is a reload process running while trying to get device info")
return nil, fmt.Errorf("There is a reload process running.... please wait until finished ")
}
mutex.RLock()
defer mutex.RUnlock()
if dev, ok = devices[id]; !ok {
return nil, fmt.Errorf("There is not any device with id %s running", id)
}
return dev, nil
}
// GetDeviceJSONInfo returns the device data in JSON format.
// Returns an error if there is an ongoing reload.
func GetDeviceJSONInfo(id string) ([]byte, error) {
var dev *device.SnmpDevice
var ok bool
if CheckReloadProcess() == true {
log.Warning("There is a reload process running while trying to get device info")
return nil, fmt.Errorf("There is a reload process running.... please wait until finished ")
}
mutex.RLock()
defer mutex.RUnlock()
if dev, ok = devices[id]; !ok {
return nil, fmt.Errorf("there is not any device with id %s running", id)
}
return dev.ToJSON()
}
// GetDevStats returns a map with the basic info of each device.
func GetDevStats() map[string]*stats.GatherStats {
devstats := make(map[string]*stats.GatherStats)
mutex.RLock()
for k, v := range devices {
devstats[k] = v.GetBasicStats()
}
mutex.RUnlock()
return devstats
}
// StopInfluxOut stops sending data to output influxDB servers.
func StopInfluxOut(idb map[string]*output.InfluxDB) {
for k, v := range idb {
log.Infof("Stopping Influxdb out %s", k)
v.StopSender()
}
}
// ReleaseInfluxOut closes the influxDB connections and releases the associated resources.
func ReleaseInfluxOut(idb map[string]*output.InfluxDB) {
for k, v := range idb {
log.Infof("Release Influxdb resources %s", k)
v.End()
}
}
// DeviceProcessStop stops all device polling goroutines
func DeviceProcessStop() {
Bus.Broadcast(&bus.Message{Type: bus.Exit})
}
// DeviceProcessStart starts all device polling goroutines
func DeviceProcessStart() {
mutex.Lock()
devices = make(map[string]*device.SnmpDevice)
mutex.Unlock()
for k, c := range DBConfig.SnmpDevice {
AddDeviceInRuntime(k, c)
}
}
func init() {
go Bus.Start()
}
func initSelfMonitoring(idb map[string]*output.InfluxDB) {
log.Debugf("INFLUXDB2: %+v", idb)
selfmonProc = selfmon.NewNotInit(&MainConfig.Selfmon)
if MainConfig.Selfmon.Enabled {
if val, ok := idb["default"]; ok {
// only executed if a "default" influxdb exist
val.Init()
val.StartSender(&senderWg)
selfmonProc.Init()
selfmonProc.SetOutDB(idb)
selfmonProc.SetOutput(val)
log.Printf("SELFMON enabled %+v", MainConfig.Selfmon)
// Begin the statistic reporting
selfmonProc.StartGather(&gatherWg)
} else {
MainConfig.Selfmon.Enabled = false
log.Errorf("SELFMON disabled becaouse of no default db found !!! SELFMON[ %+v ] INFLUXLIST[ %+v]\n", MainConfig.Selfmon, idb)
}
} else {
log.Printf("SELFMON disabled %+v\n", MainConfig.Selfmon)
}
}
// IsDeviceInRuntime checks if device `id` exists in the runtime array.
func IsDeviceInRuntime(id string) bool {
mutex.Lock()
defer mutex.Unlock()
if _, ok := devices[id]; ok {
return true
}
return false
}
// DeleteDeviceInRuntime removes the device `id` from the runtime array.
func DeleteDeviceInRuntime(id string) error {
// Avoid modifications to devices while deleting device
mutex.Lock()
defer mutex.Unlock()
if dev, ok := devices[id]; ok {
// Stop all device processes and its measurements. Once finished they will be removed
// from the bus and node closed (snmp connections for measurements will be closed)
dev.StopGather()
log.Debugf("Bus retuned from the exit message to the ID device %s", id)
delete(devices, id)
return nil
}
log.Errorf("There is no %s device in the runtime device list", id)
return nil
}
// AddDeviceInRuntime initializes each SNMP device and puts the pointer to the global device map.
func AddDeviceInRuntime(k string, cfg *config.SnmpDeviceCfg) {
// Initialize each SNMP device and put pointer to the global map devices
dev := device.New(cfg)
dev.AttachToBus(Bus)
dev.InitCatalogVar(DBConfig.VarCatalog)
dev.SetSelfMonitoring(selfmonProc)
// send a db map to initialize each one its own db if needed
outdb, _ := dev.GetOutSenderFromMap(influxdb)
outdb.Init()
outdb.StartSender(&senderWg)
mutex.Lock()
devices[k] = dev
// Start gather goroutine for device and add it to the wait group for gather goroutines
gatherWg.Add(1)
go func() {
|
log.Infof("Device %s finished", cfg.ID)
// If device goroutine has finished, leave the bus so it won't get blocked trying
// to send messages to a not running device.
dev.LeaveBus(Bus)
}()
mutex.Unlock()
}
// LoadConf loads the DB conf and initializes the device metric config.
func LoadConf() {
MainConfig.Database.LoadDbConfig(&DBConfig)
influxdb = PrepareInfluxDBs()
// begin self monitoring process if needed, before all goroutines
initSelfMonitoring(influxdb)
config.InitMetricsCfg(&DBConfig)
}
// Start loads the agent configuration and starts it.
func Start() {
LoadConf()
DeviceProcessStart()
}
// End stops all devices polling.
func End() (time.Duration, error) {
start := time.Now()
log.Infof("END: begin device Gather processes stop... at %s", start.String())
|
defer gatherWg.Done()
dev.StartGather()
|
random_line_split
|
agent.go
|
agent release and version information.
type RInfo struct {
// InstanceID the unique name identificator for this agent
InstanceID string
// Version is the app X.Y.Z version
Version string
// Commit is the git commit sha1
Commit string
// Branch is the git branch
Branch string
// BuildStamp is the build timestamp
BuildStamp string
}
// GetRInfo returns the agent release information.
func GetRInfo() *RInfo {
info := &RInfo{
InstanceID: MainConfig.General.InstanceID,
Version: Version,
Commit: Commit,
Branch: Branch,
BuildStamp: BuildStamp,
}
return info
}
var (
// Bus is the messaging system used to send messages to the devices
Bus = bus.NewBus()
// MainConfig contains the global configuration
MainConfig config.Config
// DBConfig contains the database config
DBConfig config.DBConfig
log utils.Logger
// reloadMutex guards the reloadProcess flag
reloadMutex sync.Mutex
reloadProcess bool
// mutex guards the runtime devices map access
mutex sync.RWMutex
// devices is the runtime snmp devices map
devices map[string]*device.SnmpDevice
// influxdb is the runtime devices output db map
influxdb map[string]*output.InfluxDB
selfmonProc *selfmon.SelfMon
// gatherWg synchronizes device specific goroutines
gatherWg sync.WaitGroup
senderWg sync.WaitGroup
)
// SetLogger sets the current log output.
func SetLogger(l utils.Logger) {
log = l
}
// Reload Mutex Related Methods.
// CheckReloadProcess checks if the agent is currently reloading config.
func CheckReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
return reloadProcess
}
// CheckAndSetReloadProcess sets the reloadProcess flag.
// Returns its previous value.
func CheckAndSetReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
retval := reloadProcess
reloadProcess = true
return retval
}
// CheckAndUnSetReloadProcess unsets the reloadProcess flag.
// Returns its previous value.
func CheckAndUnSetReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
retval := reloadProcess
reloadProcess = false
return retval
}
// PrepareInfluxDBs initializes all configured output DBs in the SQL database.
// If there is no "default" key, creates a dummy output db which does nothing.
func PrepareInfluxDBs() map[string]*output.InfluxDB {
idb := make(map[string]*output.InfluxDB)
var defFound bool
for k, c := range DBConfig.Influxdb {
if k == "default" {
defFound = true
}
idb[k] = output.NewNotInitInfluxDB(c)
}
if defFound == false {
log.Warn("No Output default found influxdb devices found !!")
idb["default"] = output.DummyDB
}
return idb
}
// GetDevice returns the snmp device with the given id.
// Returns an error if there is an ongoing reload.
func GetDevice(id string) (*device.SnmpDevice, error) {
var dev *device.SnmpDevice
var ok bool
if CheckReloadProcess() == true {
log.Warning("There is a reload process running while trying to get device info")
return nil, fmt.Errorf("There is a reload process running.... please wait until finished ")
}
mutex.RLock()
defer mutex.RUnlock()
if dev, ok = devices[id]; !ok {
return nil, fmt.Errorf("There is not any device with id %s running", id)
}
return dev, nil
}
// GetDeviceJSONInfo returns the device data in JSON format.
// Returns an error if there is an ongoing reload.
func GetDeviceJSONInfo(id string) ([]byte, error) {
var dev *device.SnmpDevice
var ok bool
if CheckReloadProcess() == true {
log.Warning("There is a reload process running while trying to get device info")
return nil, fmt.Errorf("There is a reload process running.... please wait until finished ")
}
mutex.RLock()
defer mutex.RUnlock()
if dev, ok = devices[id]; !ok {
return nil, fmt.Errorf("there is not any device with id %s running", id)
}
return dev.ToJSON()
}
// GetDevStats returns a map with the basic info of each device.
func GetDevStats() map[string]*stats.GatherStats {
devstats := make(map[string]*stats.GatherStats)
mutex.RLock()
for k, v := range devices {
devstats[k] = v.GetBasicStats()
}
mutex.RUnlock()
return devstats
}
// StopInfluxOut stops sending data to output influxDB servers.
func StopInfluxOut(idb map[string]*output.InfluxDB) {
for k, v := range idb {
log.Infof("Stopping Influxdb out %s", k)
v.StopSender()
}
}
// ReleaseInfluxOut closes the influxDB connections and releases the associated resources.
func ReleaseInfluxOut(idb map[string]*output.InfluxDB) {
for k, v := range idb {
log.Infof("Release Influxdb resources %s", k)
v.End()
}
}
// DeviceProcessStop stops all device polling goroutines
func DeviceProcessStop() {
Bus.Broadcast(&bus.Message{Type: bus.Exit})
}
// DeviceProcessStart starts all device polling goroutines
func DeviceProcessStart() {
mutex.Lock()
devices = make(map[string]*device.SnmpDevice)
mutex.Unlock()
for k, c := range DBConfig.SnmpDevice {
AddDeviceInRuntime(k, c)
}
}
func init() {
go Bus.Start()
}
func initSelfMonitoring(idb map[string]*output.InfluxDB) {
log.Debugf("INFLUXDB2: %+v", idb)
selfmonProc = selfmon.NewNotInit(&MainConfig.Selfmon)
if MainConfig.Selfmon.Enabled {
if val, ok := idb["default"]; ok {
// only executed if a "default" influxdb exist
val.Init()
val.StartSender(&senderWg)
selfmonProc.Init()
selfmonProc.SetOutDB(idb)
selfmonProc.SetOutput(val)
log.Printf("SELFMON enabled %+v", MainConfig.Selfmon)
// Begin the statistic reporting
selfmonProc.StartGather(&gatherWg)
} else {
MainConfig.Selfmon.Enabled = false
log.Errorf("SELFMON disabled becaouse of no default db found !!! SELFMON[ %+v ] INFLUXLIST[ %+v]\n", MainConfig.Selfmon, idb)
}
} else {
log.Printf("SELFMON disabled %+v\n", MainConfig.Selfmon)
}
}
// IsDeviceInRuntime checks if device `id` exists in the runtime array.
func IsDeviceInRuntime(id string) bool {
mutex.Lock()
defer mutex.Unlock()
if _, ok := devices[id]; ok {
return true
}
return false
}
// DeleteDeviceInRuntime removes the device `id` from the runtime array.
func DeleteDeviceInRuntime(id string) error {
// Avoid modifications to devices while deleting device
mutex.Lock()
defer mutex.Unlock()
if dev, ok := devices[id]; ok {
// Stop all device processes and its measurements. Once finished they will be removed
// from the bus and node closed (snmp connections for measurements will be closed)
dev.StopGather()
log.Debugf("Bus retuned from the exit message to the ID device %s", id)
delete(devices, id)
return nil
}
log.Errorf("There is no %s device in the runtime device list", id)
return nil
}
// AddDeviceInRuntime initializes each SNMP device and puts the pointer to the global device map.
func AddDeviceInRuntime(k string, cfg *config.SnmpDeviceCfg) {
// Initialize each SNMP device and put pointer to the global map devices
dev := device.New(cfg)
dev.AttachToBus(Bus)
dev.InitCatalogVar(DBConfig.VarCatalog)
dev.SetSelfMonitoring(selfmonProc)
// send a db map to initialize each one its own db if needed
outdb, _ := dev.GetOutSenderFromMap(influxdb)
outdb.Init()
outdb.StartSender(&senderWg)
mutex.Lock()
devices[k] = dev
// Start gather goroutine for device and add it to the wait group for gather goroutines
gatherWg.Add(1)
go func() {
defer gatherWg.Done()
dev.StartGather()
log.Infof("Device %s finished", cfg.ID)
// If device goroutine has finished, leave the bus so it won't get blocked trying
// to send messages to a not running device.
dev.LeaveBus(Bus)
}()
mutex.Unlock()
}
// LoadConf loads the DB conf and initializes the device metric config.
func LoadConf() {
MainConfig.Database.LoadDbConfig(&DBConfig)
influxdb = PrepareInfluxDBs()
// begin self monitoring process if needed, before all goroutines
initSelfMonitoring(influxdb)
config.InitMetricsCfg(&DBConfig)
}
// Start loads the agent configuration and starts it.
func Start()
|
// End stops all devices polling.
func End() (time.Duration, error) {
start := time.Now()
log.Infof("END: begin device Gather processes stop... at %s", start.String
|
{
LoadConf()
DeviceProcessStart()
}
|
identifier_body
|
agent.go
|
agent release and version information.
type RInfo struct {
// InstanceID the unique name identificator for this agent
InstanceID string
// Version is the app X.Y.Z version
Version string
// Commit is the git commit sha1
Commit string
// Branch is the git branch
Branch string
// BuildStamp is the build timestamp
BuildStamp string
}
// GetRInfo returns the agent release information.
func GetRInfo() *RInfo {
info := &RInfo{
InstanceID: MainConfig.General.InstanceID,
Version: Version,
Commit: Commit,
Branch: Branch,
BuildStamp: BuildStamp,
}
return info
}
var (
// Bus is the messaging system used to send messages to the devices
Bus = bus.NewBus()
// MainConfig contains the global configuration
MainConfig config.Config
// DBConfig contains the database config
DBConfig config.DBConfig
log utils.Logger
// reloadMutex guards the reloadProcess flag
reloadMutex sync.Mutex
reloadProcess bool
// mutex guards the runtime devices map access
mutex sync.RWMutex
// devices is the runtime snmp devices map
devices map[string]*device.SnmpDevice
// influxdb is the runtime devices output db map
influxdb map[string]*output.InfluxDB
selfmonProc *selfmon.SelfMon
// gatherWg synchronizes device specific goroutines
gatherWg sync.WaitGroup
senderWg sync.WaitGroup
)
// SetLogger sets the current log output.
func SetLogger(l utils.Logger) {
log = l
}
// Reload Mutex Related Methods.
// CheckReloadProcess checks if the agent is currently reloading config.
func CheckReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
return reloadProcess
}
// CheckAndSetReloadProcess sets the reloadProcess flag.
// Returns its previous value.
func CheckAndSetReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
retval := reloadProcess
reloadProcess = true
return retval
}
// CheckAndUnSetReloadProcess unsets the reloadProcess flag.
// Returns its previous value.
func CheckAndUnSetReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
retval := reloadProcess
reloadProcess = false
return retval
}
// PrepareInfluxDBs initializes all configured output DBs in the SQL database.
// If there is no "default" key, creates a dummy output db which does nothing.
func PrepareInfluxDBs() map[string]*output.InfluxDB {
idb := make(map[string]*output.InfluxDB)
var defFound bool
for k, c := range DBConfig.Influxdb {
if k == "default" {
defFound = true
}
idb[k] = output.NewNotInitInfluxDB(c)
}
if defFound == false {
log.Warn("No Output default found influxdb devices found !!")
idb["default"] = output.DummyDB
}
return idb
}
// GetDevice returns the snmp device with the given id.
// Returns an error if there is an ongoing reload.
func GetDevice(id string) (*device.SnmpDevice, error) {
var dev *device.SnmpDevice
var ok bool
if CheckReloadProcess() == true {
log.Warning("There is a reload process running while trying to get device info")
return nil, fmt.Errorf("There is a reload process running.... please wait until finished ")
}
mutex.RLock()
defer mutex.RUnlock()
if dev, ok = devices[id]; !ok {
return nil, fmt.Errorf("There is not any device with id %s running", id)
}
return dev, nil
}
// GetDeviceJSONInfo returns the device data in JSON format.
// Returns an error if there is an ongoing reload.
func GetDeviceJSONInfo(id string) ([]byte, error) {
var dev *device.SnmpDevice
var ok bool
if CheckReloadProcess() == true {
log.Warning("There is a reload process running while trying to get device info")
return nil, fmt.Errorf("There is a reload process running.... please wait until finished ")
}
mutex.RLock()
defer mutex.RUnlock()
if dev, ok = devices[id]; !ok {
return nil, fmt.Errorf("there is not any device with id %s running", id)
}
return dev.ToJSON()
}
// GetDevStats returns a map with the basic info of each device.
func GetDevStats() map[string]*stats.GatherStats {
devstats := make(map[string]*stats.GatherStats)
mutex.RLock()
for k, v := range devices {
devstats[k] = v.GetBasicStats()
}
mutex.RUnlock()
return devstats
}
// StopInfluxOut stops sending data to output influxDB servers.
func StopInfluxOut(idb map[string]*output.InfluxDB) {
for k, v := range idb {
log.Infof("Stopping Influxdb out %s", k)
v.StopSender()
}
}
// ReleaseInfluxOut closes the influxDB connections and releases the associated resources.
func ReleaseInfluxOut(idb map[string]*output.InfluxDB) {
for k, v := range idb {
log.Infof("Release Influxdb resources %s", k)
v.End()
}
}
// DeviceProcessStop stops all device polling goroutines
func DeviceProcessStop() {
Bus.Broadcast(&bus.Message{Type: bus.Exit})
}
// DeviceProcessStart starts all device polling goroutines
func DeviceProcessStart() {
mutex.Lock()
devices = make(map[string]*device.SnmpDevice)
mutex.Unlock()
for k, c := range DBConfig.SnmpDevice {
AddDeviceInRuntime(k, c)
}
}
func init() {
go Bus.Start()
}
func initSelfMonitoring(idb map[string]*output.InfluxDB) {
log.Debugf("INFLUXDB2: %+v", idb)
selfmonProc = selfmon.NewNotInit(&MainConfig.Selfmon)
if MainConfig.Selfmon.Enabled
|
else {
log.Printf("SELFMON disabled %+v\n", MainConfig.Selfmon)
}
}
// IsDeviceInRuntime checks if device `id` exists in the runtime array.
func IsDeviceInRuntime(id string) bool {
mutex.Lock()
defer mutex.Unlock()
if _, ok := devices[id]; ok {
return true
}
return false
}
// DeleteDeviceInRuntime removes the device `id` from the runtime array.
func DeleteDeviceInRuntime(id string) error {
// Avoid modifications to devices while deleting device
mutex.Lock()
defer mutex.Unlock()
if dev, ok := devices[id]; ok {
// Stop all device processes and its measurements. Once finished they will be removed
// from the bus and node closed (snmp connections for measurements will be closed)
dev.StopGather()
log.Debugf("Bus retuned from the exit message to the ID device %s", id)
delete(devices, id)
return nil
}
log.Errorf("There is no %s device in the runtime device list", id)
return nil
}
// AddDeviceInRuntime initializes each SNMP device and puts the pointer to the global device map.
func AddDeviceInRuntime(k string, cfg *config.SnmpDeviceCfg) {
// Initialize each SNMP device and put pointer to the global map devices
dev := device.New(cfg)
dev.AttachToBus(Bus)
dev.InitCatalogVar(DBConfig.VarCatalog)
dev.SetSelfMonitoring(selfmonProc)
// send a db map to initialize each one its own db if needed
outdb, _ := dev.GetOutSenderFromMap(influxdb)
outdb.Init()
outdb.StartSender(&senderWg)
mutex.Lock()
devices[k] = dev
// Start gather goroutine for device and add it to the wait group for gather goroutines
gatherWg.Add(1)
go func() {
defer gatherWg.Done()
dev.StartGather()
log.Infof("Device %s finished", cfg.ID)
// If device goroutine has finished, leave the bus so it won't get blocked trying
// to send messages to a not running device.
dev.LeaveBus(Bus)
}()
mutex.Unlock()
}
// LoadConf loads the DB conf and initializes the device metric config.
func LoadConf() {
MainConfig.Database.LoadDbConfig(&DBConfig)
influxdb = PrepareInfluxDBs()
// begin self monitoring process if needed, before all goroutines
initSelfMonitoring(influxdb)
config.InitMetricsCfg(&DBConfig)
}
// Start loads the agent configuration and starts it.
func Start() {
LoadConf()
DeviceProcessStart()
}
// End stops all devices polling.
func End() (time.Duration, error) {
start := time.Now()
log.Infof("END: begin device Gather processes stop... at %s", start.String())
|
{
if val, ok := idb["default"]; ok {
// only executed if a "default" influxdb exist
val.Init()
val.StartSender(&senderWg)
selfmonProc.Init()
selfmonProc.SetOutDB(idb)
selfmonProc.SetOutput(val)
log.Printf("SELFMON enabled %+v", MainConfig.Selfmon)
// Begin the statistic reporting
selfmonProc.StartGather(&gatherWg)
} else {
MainConfig.Selfmon.Enabled = false
log.Errorf("SELFMON disabled becaouse of no default db found !!! SELFMON[ %+v ] INFLUXLIST[ %+v]\n", MainConfig.Selfmon, idb)
}
}
|
conditional_block
|
agent.go
|
agent release and version information.
type RInfo struct {
// InstanceID the unique name identificator for this agent
InstanceID string
// Version is the app X.Y.Z version
Version string
// Commit is the git commit sha1
Commit string
// Branch is the git branch
Branch string
// BuildStamp is the build timestamp
BuildStamp string
}
// GetRInfo returns the agent release information.
func GetRInfo() *RInfo {
info := &RInfo{
InstanceID: MainConfig.General.InstanceID,
Version: Version,
Commit: Commit,
Branch: Branch,
BuildStamp: BuildStamp,
}
return info
}
var (
// Bus is the messaging system used to send messages to the devices
Bus = bus.NewBus()
// MainConfig contains the global configuration
MainConfig config.Config
// DBConfig contains the database config
DBConfig config.DBConfig
log utils.Logger
// reloadMutex guards the reloadProcess flag
reloadMutex sync.Mutex
reloadProcess bool
// mutex guards the runtime devices map access
mutex sync.RWMutex
// devices is the runtime snmp devices map
devices map[string]*device.SnmpDevice
// influxdb is the runtime devices output db map
influxdb map[string]*output.InfluxDB
selfmonProc *selfmon.SelfMon
// gatherWg synchronizes device specific goroutines
gatherWg sync.WaitGroup
senderWg sync.WaitGroup
)
// SetLogger sets the current log output.
func SetLogger(l utils.Logger) {
log = l
}
// Reload Mutex Related Methods.
// CheckReloadProcess checks if the agent is currently reloading config.
func CheckReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
return reloadProcess
}
// CheckAndSetReloadProcess sets the reloadProcess flag.
// Returns its previous value.
func
|
() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
retval := reloadProcess
reloadProcess = true
return retval
}
// CheckAndUnSetReloadProcess unsets the reloadProcess flag.
// Returns its previous value.
func CheckAndUnSetReloadProcess() bool {
reloadMutex.Lock()
defer reloadMutex.Unlock()
retval := reloadProcess
reloadProcess = false
return retval
}
// PrepareInfluxDBs initializes all configured output DBs in the SQL database.
// If there is no "default" key, creates a dummy output db which does nothing.
func PrepareInfluxDBs() map[string]*output.InfluxDB {
idb := make(map[string]*output.InfluxDB)
var defFound bool
for k, c := range DBConfig.Influxdb {
if k == "default" {
defFound = true
}
idb[k] = output.NewNotInitInfluxDB(c)
}
if defFound == false {
log.Warn("No Output default found influxdb devices found !!")
idb["default"] = output.DummyDB
}
return idb
}
// GetDevice returns the snmp device with the given id.
// Returns an error if there is an ongoing reload.
func GetDevice(id string) (*device.SnmpDevice, error) {
var dev *device.SnmpDevice
var ok bool
if CheckReloadProcess() == true {
log.Warning("There is a reload process running while trying to get device info")
return nil, fmt.Errorf("There is a reload process running.... please wait until finished ")
}
mutex.RLock()
defer mutex.RUnlock()
if dev, ok = devices[id]; !ok {
return nil, fmt.Errorf("There is not any device with id %s running", id)
}
return dev, nil
}
// GetDeviceJSONInfo returns the device data in JSON format.
// Returns an error if there is an ongoing reload.
func GetDeviceJSONInfo(id string) ([]byte, error) {
var dev *device.SnmpDevice
var ok bool
if CheckReloadProcess() == true {
log.Warning("There is a reload process running while trying to get device info")
return nil, fmt.Errorf("There is a reload process running.... please wait until finished ")
}
mutex.RLock()
defer mutex.RUnlock()
if dev, ok = devices[id]; !ok {
return nil, fmt.Errorf("there is not any device with id %s running", id)
}
return dev.ToJSON()
}
// GetDevStats returns a map with the basic info of each device.
func GetDevStats() map[string]*stats.GatherStats {
devstats := make(map[string]*stats.GatherStats)
mutex.RLock()
for k, v := range devices {
devstats[k] = v.GetBasicStats()
}
mutex.RUnlock()
return devstats
}
// StopInfluxOut stops sending data to output influxDB servers.
func StopInfluxOut(idb map[string]*output.InfluxDB) {
for k, v := range idb {
log.Infof("Stopping Influxdb out %s", k)
v.StopSender()
}
}
// ReleaseInfluxOut closes the influxDB connections and releases the associated resources.
func ReleaseInfluxOut(idb map[string]*output.InfluxDB) {
for k, v := range idb {
log.Infof("Release Influxdb resources %s", k)
v.End()
}
}
// DeviceProcessStop stops all device polling goroutines
func DeviceProcessStop() {
Bus.Broadcast(&bus.Message{Type: bus.Exit})
}
// DeviceProcessStart starts all device polling goroutines
func DeviceProcessStart() {
mutex.Lock()
devices = make(map[string]*device.SnmpDevice)
mutex.Unlock()
for k, c := range DBConfig.SnmpDevice {
AddDeviceInRuntime(k, c)
}
}
func init() {
go Bus.Start()
}
func initSelfMonitoring(idb map[string]*output.InfluxDB) {
log.Debugf("INFLUXDB2: %+v", idb)
selfmonProc = selfmon.NewNotInit(&MainConfig.Selfmon)
if MainConfig.Selfmon.Enabled {
if val, ok := idb["default"]; ok {
// only executed if a "default" influxdb exist
val.Init()
val.StartSender(&senderWg)
selfmonProc.Init()
selfmonProc.SetOutDB(idb)
selfmonProc.SetOutput(val)
log.Printf("SELFMON enabled %+v", MainConfig.Selfmon)
// Begin the statistic reporting
selfmonProc.StartGather(&gatherWg)
} else {
MainConfig.Selfmon.Enabled = false
log.Errorf("SELFMON disabled becaouse of no default db found !!! SELFMON[ %+v ] INFLUXLIST[ %+v]\n", MainConfig.Selfmon, idb)
}
} else {
log.Printf("SELFMON disabled %+v\n", MainConfig.Selfmon)
}
}
// IsDeviceInRuntime checks if device `id` exists in the runtime array.
func IsDeviceInRuntime(id string) bool {
mutex.Lock()
defer mutex.Unlock()
if _, ok := devices[id]; ok {
return true
}
return false
}
// DeleteDeviceInRuntime removes the device `id` from the runtime array.
func DeleteDeviceInRuntime(id string) error {
// Avoid modifications to devices while deleting device
mutex.Lock()
defer mutex.Unlock()
if dev, ok := devices[id]; ok {
// Stop all device processes and its measurements. Once finished they will be removed
// from the bus and node closed (snmp connections for measurements will be closed)
dev.StopGather()
log.Debugf("Bus retuned from the exit message to the ID device %s", id)
delete(devices, id)
return nil
}
log.Errorf("There is no %s device in the runtime device list", id)
return nil
}
// AddDeviceInRuntime initializes each SNMP device and puts the pointer to the global device map.
func AddDeviceInRuntime(k string, cfg *config.SnmpDeviceCfg) {
// Initialize each SNMP device and put pointer to the global map devices
dev := device.New(cfg)
dev.AttachToBus(Bus)
dev.InitCatalogVar(DBConfig.VarCatalog)
dev.SetSelfMonitoring(selfmonProc)
// send a db map to initialize each one its own db if needed
outdb, _ := dev.GetOutSenderFromMap(influxdb)
outdb.Init()
outdb.StartSender(&senderWg)
mutex.Lock()
devices[k] = dev
// Start gather goroutine for device and add it to the wait group for gather goroutines
gatherWg.Add(1)
go func() {
defer gatherWg.Done()
dev.StartGather()
log.Infof("Device %s finished", cfg.ID)
// If device goroutine has finished, leave the bus so it won't get blocked trying
// to send messages to a not running device.
dev.LeaveBus(Bus)
}()
mutex.Unlock()
}
// LoadConf loads the DB conf and initializes the device metric config.
func LoadConf() {
MainConfig.Database.LoadDbConfig(&DBConfig)
influxdb = PrepareInfluxDBs()
// begin self monitoring process if needed, before all goroutines
initSelfMonitoring(influxdb)
config.InitMetricsCfg(&DBConfig)
}
// Start loads the agent configuration and starts it.
func Start() {
LoadConf()
DeviceProcessStart()
}
// End stops all devices polling.
func End() (time.Duration, error) {
start := time.Now()
log.Infof("END: begin device Gather processes stop... at %s", start.String())
|
CheckAndSetReloadProcess
|
identifier_name
|
arena.rs
|
the reference count.
///
/// # Safety
///
/// `handle` must be allocated from `self`.
// TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `clone` instead.
unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data>;
/// Deallocate a given handle, and finalize the referred object if there are
/// no more handles.
///
/// # Safety
///
/// `handle` must be allocated from `self`.
// TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `drop` instead.
unsafe fn dealloc(&self, handle: Ref<Self::Data>);
/// Temporarily releases the lock while calling `f`, and re-acquires the lock after `f` returned.
///
/// # Safety
///
/// The caller must be careful when calling this inside `ArenaObject::finalize`.
/// If you use this while finalizing an `ArenaObject`, the `Arena`'s lock will be temporarily released,
/// and hence, another thread may use `Arena::find_or_alloc` to obtain an `Rc` referring to the `ArenaObject`
/// we are **currently finalizing**. Therefore, in this case, make sure no thread tries to `find_or_alloc`
/// for an `ArenaObject` that may be under finalization.
unsafe fn reacquire_after<'s, 'g: 's, F, R: 's>(guard: &'s mut Self::Guard<'g>, f: F) -> R
where
F: FnOnce() -> R;
}
pub trait ArenaObject {
/// Finalizes the `ArenaObject`.
/// This function is automatically called when the last `Rc` refereing to this `ArenaObject` gets dropped.
fn finalize<'s, A: Arena>(&'s mut self, guard: &'s mut A::Guard<'_>);
}
/// A homogeneous memory allocator equipped with reference counts.
#[pin_project]
pub struct ArrayArena<T, const CAPACITY: usize> {
#[pin]
entries: [RcCell<T>; CAPACITY],
}
#[pin_project]
#[repr(C)]
pub struct MruEntry<T> {
#[pin]
list_entry: ListEntry,
#[pin]
data: RcCell<T>,
}
/// A homogeneous memory allocator equipped with reference counts.
#[pin_project]
pub struct MruArena<T, const CAPACITY: usize> {
#[pin]
entries: [MruEntry<T>; CAPACITY],
#[pin]
list: List<MruEntry<T>>,
}
/// A thread-safe reference counted pointer, allocated from `A: Arena`.
/// The data type is same as `A::Data`.
///
/// # Safety
///
/// `inner` is allocated from `arena`.
/// We can safely dereference `arena` until `inner` gets dropped,
/// because we panic if the arena drops earlier than `inner`.
pub struct Rc<A: Arena> {
arena: *const A,
inner: ManuallyDrop<Ref<A::Data>>,
}
// `Rc` is `Send` because it does not impl `DerefMut`,
// and when we access the inner `Arena`, we do it after acquiring `Arena`'s lock.
// Also, `Rc` does not point to thread-local data.
unsafe impl<T: Sync, A: Arena<Data = T>> Send for Rc<A> {}
impl<T, const CAPACITY: usize> ArrayArena<T, CAPACITY> {
// TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe...
pub const fn new(entries: [RcCell<T>; CAPACITY]) -> Self {
Self { entries }
}
}
impl<T: 'static + ArenaObject + Unpin, const CAPACITY: usize> Arena
for Spinlock<ArrayArena<T, CAPACITY>>
{
type Data = T;
type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>;
fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>(
&self,
c: C,
n: N,
) -> Option<Ref<Self::Data>> {
let mut guard = self.lock();
let this = guard.get_pin_mut().project();
let mut empty: Option<*mut RcCell<T>> = None;
for entry in IterPinMut::from(this.entries) {
if !entry.is_borrowed() {
if empty.is_none() {
empty = Some(entry.as_ref().get_ref() as *const _ as *mut _)
}
// Note: Do not use `break` here.
// We must first search through all entries, and then alloc at empty
// only if the entry we're finding for doesn't exist.
} else if let Some(r) = entry.try_borrow() {
// The entry is not under finalization. Check its data.
if c(&r) {
return Some(r);
}
}
}
empty.map(|cell_raw| {
// SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned.
let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) };
n(cell.as_mut().get_pin_mut().unwrap().get_mut());
cell.borrow()
})
}
fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> {
let mut guard = self.lock();
let this = guard.get_pin_mut().project();
for mut entry in IterPinMut::from(this.entries) {
if !entry.is_borrowed() {
f(entry.as_mut().get_pin_mut().unwrap().get_mut());
return Some(entry.borrow());
}
}
None
}
unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> {
let mut _this = self.lock();
handle.clone()
}
unsafe fn dealloc(&self, handle: Ref<Self::Data>) {
let mut this = self.lock();
if let Ok(mut rm) = RefMut::<T>::try_from(handle) {
rm.finalize::<Self>(&mut this);
}
}
unsafe fn reacquire_after<'s, 'g: 's, F, R: 's>(guard: &'s mut Self::Guard<'g>, f: F) -> R
where
F: FnOnce() -> R,
{
guard.reacquire_after(f)
}
}
impl<T> MruEntry<T> {
// TODO(https://github.com/kaist-cp/rv6/issues/369)
// A workarond for https://github.com/Gilnaa/memoffset/issues/49.
// Assumes `list_entry` is located at the beginning of `MruEntry`
// and `data` is located at `mem::size_of::<ListEntry>()`.
const DATA_OFFSET: usize = mem::size_of::<ListEntry>();
const LIST_ENTRY_OFFSET: usize = 0;
// const DATA_OFFSET: usize = offset_of!(MruEntry<T>, data);
// const LIST_ENTRY_OFFSET: usize = offset_of!(MruEntry<T>, list_entry);
pub const fn new(data: T) -> Self {
Self {
list_entry: unsafe { ListEntry::new() },
data: RcCell::new(data),
}
}
/// For the `MruEntry<T>` that corresponds to the given `RefMut<T>`, we move it to the front of the list.
///
/// # Safety
///
/// Only use this if the given `RefMut<T>` was obtained from an `MruEntry<T>`,
/// which is contained inside the `list`.
unsafe fn finalize_entry(r: RefMut<T>, list: &List<MruEntry<T>>) {
let ptr = (r.get_cell() as *const _ as usize - Self::DATA_OFFSET) as *mut MruEntry<T>;
let entry = unsafe { &*ptr };
list.push_back(entry);
}
}
// SAFETY: `MruEntry` owns a `ListEntry`.
unsafe impl<T> ListNode for MruEntry<T> {
fn get_list_entry(&self) -> &ListEntry {
&self.list_entry
}
fn from_list_entry(list_entry: *const ListEntry) -> *const Self
|
}
impl<T, const CAPACITY: usize> MruArena<T, CAPACITY> {
// TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe...
pub const fn new(entries: [MruEntry<T>; CAPACITY]) -> Self {
Self {
entries,
list: unsafe { List::new() },
}
}
pub fn init(self: Pin<&mut Self>) {
let mut this = self.project();
this.list.as_mut().init();
for mut entry in IterPinMut::from(this.entries) {
entry.as_mut().project().list_entry.init();
this.list.push_front(&entry);
}
}
}
impl<T: 'static + ArenaObject + Unpin, const CAPACITY: usize> Arena
for Spinlock<MruArena<T, CAPACITY>>
{
type Data
|
{
(list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET) as *const Self
}
|
identifier_body
|
arena.rs
|
the reference count.
///
/// # Safety
///
/// `handle` must be allocated from `self`.
// TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `clone` instead.
unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data>;
/// Deallocate a given handle, and finalize the referred object if there are
/// no more handles.
///
/// # Safety
///
/// `handle` must be allocated from `self`.
// TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `drop` instead.
unsafe fn dealloc(&self, handle: Ref<Self::Data>);
/// Temporarily releases the lock while calling `f`, and re-acquires the lock after `f` returned.
///
/// # Safety
///
/// The caller must be careful when calling this inside `ArenaObject::finalize`.
/// If you use this while finalizing an `ArenaObject`, the `Arena`'s lock will be temporarily released,
/// and hence, another thread may use `Arena::find_or_alloc` to obtain an `Rc` referring to the `ArenaObject`
/// we are **currently finalizing**. Therefore, in this case, make sure no thread tries to `find_or_alloc`
/// for an `ArenaObject` that may be under finalization.
unsafe fn reacquire_after<'s, 'g: 's, F, R: 's>(guard: &'s mut Self::Guard<'g>, f: F) -> R
where
F: FnOnce() -> R;
}
pub trait ArenaObject {
/// Finalizes the `ArenaObject`.
/// This function is automatically called when the last `Rc` refereing to this `ArenaObject` gets dropped.
fn finalize<'s, A: Arena>(&'s mut self, guard: &'s mut A::Guard<'_>);
}
/// A homogeneous memory allocator equipped with reference counts.
#[pin_project]
pub struct ArrayArena<T, const CAPACITY: usize> {
#[pin]
entries: [RcCell<T>; CAPACITY],
}
#[pin_project]
#[repr(C)]
pub struct MruEntry<T> {
#[pin]
list_entry: ListEntry,
#[pin]
data: RcCell<T>,
}
/// A homogeneous memory allocator equipped with reference counts.
#[pin_project]
pub struct MruArena<T, const CAPACITY: usize> {
#[pin]
entries: [MruEntry<T>; CAPACITY],
#[pin]
list: List<MruEntry<T>>,
}
/// A thread-safe reference counted pointer, allocated from `A: Arena`.
/// The data type is same as `A::Data`.
///
/// # Safety
///
/// `inner` is allocated from `arena`.
/// We can safely dereference `arena` until `inner` gets dropped,
/// because we panic if the arena drops earlier than `inner`.
pub struct Rc<A: Arena> {
arena: *const A,
inner: ManuallyDrop<Ref<A::Data>>,
}
// `Rc` is `Send` because it does not impl `DerefMut`,
// and when we access the inner `Arena`, we do it after acquiring `Arena`'s lock.
// Also, `Rc` does not point to thread-local data.
unsafe impl<T: Sync, A: Arena<Data = T>> Send for Rc<A> {}
impl<T, const CAPACITY: usize> ArrayArena<T, CAPACITY> {
// TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe...
pub const fn new(entries: [RcCell<T>; CAPACITY]) -> Self {
Self { entries }
}
}
impl<T: 'static + ArenaObject + Unpin, const CAPACITY: usize> Arena
for Spinlock<ArrayArena<T, CAPACITY>>
{
type Data = T;
type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>;
fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>(
&self,
c: C,
n: N,
) -> Option<Ref<Self::Data>> {
let mut guard = self.lock();
let this = guard.get_pin_mut().project();
let mut empty: Option<*mut RcCell<T>> = None;
for entry in IterPinMut::from(this.entries) {
if !entry.is_borrowed() {
if empty.is_none() {
empty = Some(entry.as_ref().get_ref() as *const _ as *mut _)
}
// Note: Do not use `break` here.
// We must first search through all entries, and then alloc at empty
// only if the entry we're finding for doesn't exist.
} else if let Some(r) = entry.try_borrow() {
// The entry is not under finalization. Check its data.
if c(&r)
|
}
}
empty.map(|cell_raw| {
// SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned.
let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) };
n(cell.as_mut().get_pin_mut().unwrap().get_mut());
cell.borrow()
})
}
fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> {
let mut guard = self.lock();
let this = guard.get_pin_mut().project();
for mut entry in IterPinMut::from(this.entries) {
if !entry.is_borrowed() {
f(entry.as_mut().get_pin_mut().unwrap().get_mut());
return Some(entry.borrow());
}
}
None
}
unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> {
let mut _this = self.lock();
handle.clone()
}
unsafe fn dealloc(&self, handle: Ref<Self::Data>) {
let mut this = self.lock();
if let Ok(mut rm) = RefMut::<T>::try_from(handle) {
rm.finalize::<Self>(&mut this);
}
}
unsafe fn reacquire_after<'s, 'g: 's, F, R: 's>(guard: &'s mut Self::Guard<'g>, f: F) -> R
where
F: FnOnce() -> R,
{
guard.reacquire_after(f)
}
}
impl<T> MruEntry<T> {
// TODO(https://github.com/kaist-cp/rv6/issues/369)
// A workarond for https://github.com/Gilnaa/memoffset/issues/49.
// Assumes `list_entry` is located at the beginning of `MruEntry`
// and `data` is located at `mem::size_of::<ListEntry>()`.
const DATA_OFFSET: usize = mem::size_of::<ListEntry>();
const LIST_ENTRY_OFFSET: usize = 0;
// const DATA_OFFSET: usize = offset_of!(MruEntry<T>, data);
// const LIST_ENTRY_OFFSET: usize = offset_of!(MruEntry<T>, list_entry);
pub const fn new(data: T) -> Self {
Self {
list_entry: unsafe { ListEntry::new() },
data: RcCell::new(data),
}
}
/// For the `MruEntry<T>` that corresponds to the given `RefMut<T>`, we move it to the front of the list.
///
/// # Safety
///
/// Only use this if the given `RefMut<T>` was obtained from an `MruEntry<T>`,
/// which is contained inside the `list`.
unsafe fn finalize_entry(r: RefMut<T>, list: &List<MruEntry<T>>) {
let ptr = (r.get_cell() as *const _ as usize - Self::DATA_OFFSET) as *mut MruEntry<T>;
let entry = unsafe { &*ptr };
list.push_back(entry);
}
}
// SAFETY: `MruEntry` owns a `ListEntry`.
unsafe impl<T> ListNode for MruEntry<T> {
fn get_list_entry(&self) -> &ListEntry {
&self.list_entry
}
fn from_list_entry(list_entry: *const ListEntry) -> *const Self {
(list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET) as *const Self
}
}
impl<T, const CAPACITY: usize> MruArena<T, CAPACITY> {
// TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe...
pub const fn new(entries: [MruEntry<T>; CAPACITY]) -> Self {
Self {
entries,
list: unsafe { List::new() },
}
}
pub fn init(self: Pin<&mut Self>) {
let mut this = self.project();
this.list.as_mut().init();
for mut entry in IterPinMut::from(this.entries) {
entry.as_mut().project().list_entry.init();
this.list.push_front(&entry);
}
}
}
impl<T: 'static + ArenaObject + Unpin, const CAPACITY: usize> Arena
for Spinlock<MruArena<T, CAPACITY>>
{
type Data
|
{
return Some(r);
}
|
conditional_block
|
arena.rs
|
<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Rc<Self>> {
let inner = self.alloc_handle(f)?;
// SAFETY: `inner` was allocated from `self`.
Some(unsafe { Rc::from_unchecked(self, inner) })
}
/// Duplicate a given handle, and increase the reference count.
///
/// # Safety
///
/// `handle` must be allocated from `self`.
// TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `clone` instead.
unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data>;
/// Deallocate a given handle, and finalize the referred object if there are
/// no more handles.
///
/// # Safety
///
/// `handle` must be allocated from `self`.
// TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `drop` instead.
unsafe fn dealloc(&self, handle: Ref<Self::Data>);
/// Temporarily releases the lock while calling `f`, and re-acquires the lock after `f` returned.
///
/// # Safety
///
/// The caller must be careful when calling this inside `ArenaObject::finalize`.
/// If you use this while finalizing an `ArenaObject`, the `Arena`'s lock will be temporarily released,
/// and hence, another thread may use `Arena::find_or_alloc` to obtain an `Rc` referring to the `ArenaObject`
/// we are **currently finalizing**. Therefore, in this case, make sure no thread tries to `find_or_alloc`
/// for an `ArenaObject` that may be under finalization.
unsafe fn reacquire_after<'s, 'g: 's, F, R: 's>(guard: &'s mut Self::Guard<'g>, f: F) -> R
where
F: FnOnce() -> R;
}
pub trait ArenaObject {
/// Finalizes the `ArenaObject`.
/// This function is automatically called when the last `Rc` refereing to this `ArenaObject` gets dropped.
fn finalize<'s, A: Arena>(&'s mut self, guard: &'s mut A::Guard<'_>);
}
/// A homogeneous memory allocator equipped with reference counts.
#[pin_project]
pub struct ArrayArena<T, const CAPACITY: usize> {
#[pin]
entries: [RcCell<T>; CAPACITY],
}
#[pin_project]
#[repr(C)]
pub struct MruEntry<T> {
#[pin]
list_entry: ListEntry,
#[pin]
data: RcCell<T>,
}
/// A homogeneous memory allocator equipped with reference counts.
#[pin_project]
pub struct MruArena<T, const CAPACITY: usize> {
#[pin]
entries: [MruEntry<T>; CAPACITY],
#[pin]
list: List<MruEntry<T>>,
}
/// A thread-safe reference counted pointer, allocated from `A: Arena`.
/// The data type is same as `A::Data`.
///
/// # Safety
///
/// `inner` is allocated from `arena`.
/// We can safely dereference `arena` until `inner` gets dropped,
/// because we panic if the arena drops earlier than `inner`.
pub struct Rc<A: Arena> {
arena: *const A,
inner: ManuallyDrop<Ref<A::Data>>,
}
// `Rc` is `Send` because it does not impl `DerefMut`,
// and when we access the inner `Arena`, we do it after acquiring `Arena`'s lock.
// Also, `Rc` does not point to thread-local data.
unsafe impl<T: Sync, A: Arena<Data = T>> Send for Rc<A> {}
impl<T, const CAPACITY: usize> ArrayArena<T, CAPACITY> {
// TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe...
pub const fn new(entries: [RcCell<T>; CAPACITY]) -> Self {
Self { entries }
}
}
impl<T: 'static + ArenaObject + Unpin, const CAPACITY: usize> Arena
for Spinlock<ArrayArena<T, CAPACITY>>
{
type Data = T;
type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>;
fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>(
&self,
c: C,
n: N,
) -> Option<Ref<Self::Data>> {
let mut guard = self.lock();
let this = guard.get_pin_mut().project();
let mut empty: Option<*mut RcCell<T>> = None;
for entry in IterPinMut::from(this.entries) {
if !entry.is_borrowed() {
if empty.is_none() {
empty = Some(entry.as_ref().get_ref() as *const _ as *mut _)
}
// Note: Do not use `break` here.
// We must first search through all entries, and then alloc at empty
// only if the entry we're finding for doesn't exist.
} else if let Some(r) = entry.try_borrow() {
// The entry is not under finalization. Check its data.
if c(&r) {
return Some(r);
}
}
}
empty.map(|cell_raw| {
// SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned.
let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) };
n(cell.as_mut().get_pin_mut().unwrap().get_mut());
cell.borrow()
})
}
fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> {
let mut guard = self.lock();
let this = guard.get_pin_mut().project();
for mut entry in IterPinMut::from(this.entries) {
if !entry.is_borrowed() {
f(entry.as_mut().get_pin_mut().unwrap().get_mut());
return Some(entry.borrow());
}
}
None
}
unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> {
let mut _this = self.lock();
handle.clone()
}
unsafe fn dealloc(&self, handle: Ref<Self::Data>) {
let mut this = self.lock();
if let Ok(mut rm) = RefMut::<T>::try_from(handle) {
rm.finalize::<Self>(&mut this);
}
}
unsafe fn reacquire_after<'s, 'g: 's, F, R: 's>(guard: &'s mut Self::Guard<'g>, f: F) -> R
where
F: FnOnce() -> R,
{
guard.reacquire_after(f)
}
}
impl<T> MruEntry<T> {
// TODO(https://github.com/kaist-cp/rv6/issues/369)
// A workarond for https://github.com/Gilnaa/memoffset/issues/49.
// Assumes `list_entry` is located at the beginning of `MruEntry`
// and `data` is located at `mem::size_of::<ListEntry>()`.
const DATA_OFFSET: usize = mem::size_of::<ListEntry>();
const LIST_ENTRY_OFFSET: usize = 0;
// const DATA_OFFSET: usize = offset_of!(MruEntry<T>, data);
// const LIST_ENTRY_OFFSET: usize = offset_of!(MruEntry<T>, list_entry);
pub const fn new(data: T) -> Self {
Self {
list_entry: unsafe { ListEntry::new() },
data: RcCell::new(data),
}
}
/// For the `MruEntry<T>` that corresponds to the given `RefMut<T>`, we move it to the front of the list.
///
/// # Safety
///
/// Only use this if the given `RefMut<T>` was obtained from an `MruEntry<T>`,
/// which is contained inside the `list`.
unsafe fn finalize_entry(r: RefMut<T>, list: &List<MruEntry<T>>) {
let ptr = (r.get_cell() as *const _ as usize - Self::DATA_OFFSET) as *mut MruEntry<T>;
let entry = unsafe { &*ptr };
list.push_back(entry);
}
}
// SAFETY: `MruEntry` owns a `ListEntry`.
unsafe impl<T> ListNode for MruEntry<T> {
fn get_list_entry(&self) -> &ListEntry {
&self.list_entry
}
fn from_list_entry(list_entry: *const ListEntry) -> *const Self {
(list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET) as *const Self
}
}
impl<T, const CAPACITY: usize> MruArena<T, CAPACITY> {
// TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe...
pub const fn new(entries: [MruEntry<T>; CAPACITY]) -> Self {
Self {
entries,
list: unsafe { List::new() },
}
}
pub fn init(self: Pin<&mut Self>) {
let mut this = self.project();
this.list.as_mut().init();
for
|
alloc
|
identifier_name
|
|
arena.rs
|
the reference count.
///
/// # Safety
///
/// `handle` must be allocated from `self`.
// TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `clone` instead.
unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data>;
/// Deallocate a given handle, and finalize the referred object if there are
/// no more handles.
///
/// # Safety
///
/// `handle` must be allocated from `self`.
// TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `drop` instead.
unsafe fn dealloc(&self, handle: Ref<Self::Data>);
/// Temporarily releases the lock while calling `f`, and re-acquires the lock after `f` returned.
///
/// # Safety
///
/// The caller must be careful when calling this inside `ArenaObject::finalize`.
/// If you use this while finalizing an `ArenaObject`, the `Arena`'s lock will be temporarily released,
/// and hence, another thread may use `Arena::find_or_alloc` to obtain an `Rc` referring to the `ArenaObject`
/// we are **currently finalizing**. Therefore, in this case, make sure no thread tries to `find_or_alloc`
/// for an `ArenaObject` that may be under finalization.
unsafe fn reacquire_after<'s, 'g: 's, F, R: 's>(guard: &'s mut Self::Guard<'g>, f: F) -> R
where
F: FnOnce() -> R;
}
pub trait ArenaObject {
/// Finalizes the `ArenaObject`.
/// This function is automatically called when the last `Rc` refereing to this `ArenaObject` gets dropped.
fn finalize<'s, A: Arena>(&'s mut self, guard: &'s mut A::Guard<'_>);
}
/// A homogeneous memory allocator equipped with reference counts.
#[pin_project]
pub struct ArrayArena<T, const CAPACITY: usize> {
#[pin]
entries: [RcCell<T>; CAPACITY],
}
#[pin_project]
#[repr(C)]
pub struct MruEntry<T> {
#[pin]
list_entry: ListEntry,
#[pin]
data: RcCell<T>,
}
/// A homogeneous memory allocator equipped with reference counts.
#[pin_project]
pub struct MruArena<T, const CAPACITY: usize> {
#[pin]
entries: [MruEntry<T>; CAPACITY],
#[pin]
list: List<MruEntry<T>>,
}
/// A thread-safe reference counted pointer, allocated from `A: Arena`.
/// The data type is same as `A::Data`.
///
/// # Safety
///
/// `inner` is allocated from `arena`.
/// We can safely dereference `arena` until `inner` gets dropped,
/// because we panic if the arena drops earlier than `inner`.
pub struct Rc<A: Arena> {
arena: *const A,
inner: ManuallyDrop<Ref<A::Data>>,
}
// `Rc` is `Send` because it does not impl `DerefMut`,
// and when we access the inner `Arena`, we do it after acquiring `Arena`'s lock.
// Also, `Rc` does not point to thread-local data.
unsafe impl<T: Sync, A: Arena<Data = T>> Send for Rc<A> {}
impl<T, const CAPACITY: usize> ArrayArena<T, CAPACITY> {
// TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe...
pub const fn new(entries: [RcCell<T>; CAPACITY]) -> Self {
Self { entries }
}
}
impl<T: 'static + ArenaObject + Unpin, const CAPACITY: usize> Arena
for Spinlock<ArrayArena<T, CAPACITY>>
{
type Data = T;
type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>;
fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>(
&self,
c: C,
n: N,
) -> Option<Ref<Self::Data>> {
let mut guard = self.lock();
let this = guard.get_pin_mut().project();
let mut empty: Option<*mut RcCell<T>> = None;
for entry in IterPinMut::from(this.entries) {
if !entry.is_borrowed() {
if empty.is_none() {
empty = Some(entry.as_ref().get_ref() as *const _ as *mut _)
}
// Note: Do not use `break` here.
// We must first search through all entries, and then alloc at empty
// only if the entry we're finding for doesn't exist.
} else if let Some(r) = entry.try_borrow() {
// The entry is not under finalization. Check its data.
if c(&r) {
return Some(r);
}
}
}
empty.map(|cell_raw| {
// SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned.
let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) };
n(cell.as_mut().get_pin_mut().unwrap().get_mut());
cell.borrow()
})
}
fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> {
let mut guard = self.lock();
let this = guard.get_pin_mut().project();
for mut entry in IterPinMut::from(this.entries) {
if !entry.is_borrowed() {
f(entry.as_mut().get_pin_mut().unwrap().get_mut());
return Some(entry.borrow());
}
}
None
}
unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> {
let mut _this = self.lock();
handle.clone()
}
unsafe fn dealloc(&self, handle: Ref<Self::Data>) {
let mut this = self.lock();
if let Ok(mut rm) = RefMut::<T>::try_from(handle) {
rm.finalize::<Self>(&mut this);
}
}
unsafe fn reacquire_after<'s, 'g: 's, F, R: 's>(guard: &'s mut Self::Guard<'g>, f: F) -> R
where
F: FnOnce() -> R,
|
impl<T> MruEntry<T> {
// TODO(https://github.com/kaist-cp/rv6/issues/369)
// A workarond for https://github.com/Gilnaa/memoffset/issues/49.
// Assumes `list_entry` is located at the beginning of `MruEntry`
// and `data` is located at `mem::size_of::<ListEntry>()`.
const DATA_OFFSET: usize = mem::size_of::<ListEntry>();
const LIST_ENTRY_OFFSET: usize = 0;
// const DATA_OFFSET: usize = offset_of!(MruEntry<T>, data);
// const LIST_ENTRY_OFFSET: usize = offset_of!(MruEntry<T>, list_entry);
pub const fn new(data: T) -> Self {
Self {
list_entry: unsafe { ListEntry::new() },
data: RcCell::new(data),
}
}
/// For the `MruEntry<T>` that corresponds to the given `RefMut<T>`, we move it to the front of the list.
///
/// # Safety
///
/// Only use this if the given `RefMut<T>` was obtained from an `MruEntry<T>`,
/// which is contained inside the `list`.
unsafe fn finalize_entry(r: RefMut<T>, list: &List<MruEntry<T>>) {
let ptr = (r.get_cell() as *const _ as usize - Self::DATA_OFFSET) as *mut MruEntry<T>;
let entry = unsafe { &*ptr };
list.push_back(entry);
}
}
// SAFETY: `MruEntry` owns a `ListEntry`.
unsafe impl<T> ListNode for MruEntry<T> {
fn get_list_entry(&self) -> &ListEntry {
&self.list_entry
}
fn from_list_entry(list_entry: *const ListEntry) -> *const Self {
(list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET) as *const Self
}
}
impl<T, const CAPACITY: usize> MruArena<T, CAPACITY> {
// TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe...
pub const fn new(entries: [MruEntry<T>; CAPACITY]) -> Self {
Self {
entries,
list: unsafe { List::new() },
}
}
pub fn init(self: Pin<&mut Self>) {
let mut this = self.project();
this.list.as_mut().init();
for mut entry in IterPinMut::from(this.entries) {
entry.as_mut().project().list_entry.init();
this.list.push_front(&entry);
}
}
}
impl<T: 'static + ArenaObject + Unpin, const CAPACITY: usize> Arena
for Spinlock<MruArena<T, CAPACITY>>
{
type Data =
|
{
guard.reacquire_after(f)
}
}
|
random_line_split
|
driver.rs
|
serial_ports::{ListPortInfo, ListPorts};
use serial_ports::ListPortType::UsbPort;
use tokio_core::reactor::Handle;
use tokio_core::channel::channel;
use tokio_core::channel::Sender;
use tokio_core::channel::Receiver;
use config::Config;
use errors::*;
use item::Item;
use device::DB;
#[cfg(windows)]
fn get_default_devices() -> Vec<String> {
vec!["\\\\.\\COM6".to_owned()]
}
#[cfg(unix)]
fn is_usb_zwave_device(port: &ListPortInfo) -> bool {
let default_usb_devices = [// VID PID
// ----- -----
(0x0658, 0x0200), // Aeotech Z-Stick Gen-5
(0x0658, 0x0280), // UZB1
(0x10c4, 0xea60) /* Aeotech Z-Stick S2 */];
// Is it one of the vid/pids in the table?
if let UsbPort(ref info) = port.port_type {
default_usb_devices.contains(&(info.vid, info.pid))
} else {
false
}
}
#[cfg(unix)]
fn get_default_devices() -> Vec<String> {
// Enumerate all of the serial devices and see if any of them match our
// known VID:PID.
let mut ports: Vec<String> = Vec::new();
let usb_ports: Vec<String> = ListPorts::new()
.iter()
.filter(|port| is_usb_zwave_device(port))
.map(|port| port.device.to_string_lossy().into_owned())
.collect();
ports.extend(usb_ports);
if ports.is_empty() {
// The following is only included temporarily until we can get a more
// comprehensive list of VIDs and PIDs.
error!("[OpenzwaveStateful] Unable to locate ZWave USB dongle. The following VID:PIDs \
were found:");
for port in ListPorts::new().iter() {
if let UsbPort(ref info) = port.port_type
|
}
// The following should be removed, once we have all of the devices captured using the above
let default_devices = ["/dev/cu.usbserial", // MacOS X (presumably)
"/dev/cu.SLAB_USBtoUART", // MacOS X (Aeotech Z-Stick S2)
"/dev/cu.usbmodem14211", // Yoric (Aeotech Z-Stick Gen-5)
"/dev/cu.usbmodem1421", // Isabel (UZB Static Controller)
"/dev/ttyUSB0", // Linux (Aeotech Z-Stick S2)
"/dev/ttyACM0" /* Linux (Aeotech Z-Stick Gen-5) */];
if let Some(default_device) = default_devices.iter()
.find(|device_name| fs::metadata(device_name).is_ok())
.map(|&str| str.to_owned()) {
ports.push(default_device);
}
}
ports
}
#[derive(Clone)]
pub struct ZWave {
#[allow(dead_code)]
ozw_manager: Arc<Mutex<ozw::manager::Manager>>,
// TODO improve this system - ideally, we should hide these behind another struct
// so that only one call is needed to update both.
items: Arc<Mutex<DB>>,
}
impl ZWave {
pub fn new(handle: &Handle, cfg: &Config) -> Result<(ZWave, Receiver<Notification<Item>>)> {
let cfg = cfg.clone();
let mut manager = {
let config_path = match cfg.sys_config {
Some(ref path) => path.as_ref(),
None => "/etc/openzwave",
};
let user_path = match cfg.user_config {
Some(ref path) => path.as_ref(),
None => "./config",
};
let opts = Options::create(config_path,
user_path,
"--SaveConfiguration true --DumpTriggerLevel 0 \
--ConsoleOutput false")?;
ozw::manager::Manager::create(opts)?
};
let devices = cfg.port.clone().map(|p| vec![p]).unwrap_or(get_default_devices());
for device in devices {
fs::File::open(&device)?;
manager.add_driver(&device)?;
}
let manager = Arc::new(Mutex::new(manager));
let items = Arc::new(Mutex::new(Default::default()));
let (tx, rx) = channel(handle)?;
let driver = ZWave {
ozw_manager: manager.clone(),
items: items,
};
let watcher = Watcher {
cfg: cfg,
driver: driver.clone(),
output: Mutex::new(tx),
};
always_lock(manager.lock()).add_watcher(watcher)?;
Ok((driver, rx))
}
pub fn get_manager(&self) -> MutexGuard<Manager> {
always_lock(self.ozw_manager.lock())
}
}
impl Binding for ZWave {
type Config = Config;
type Error = Error;
type Item = Item;
fn new(handle: &Handle, cfg: &Self::Config) -> Result<(Self, Receiver<Notification<Item>>)> {
ZWave::new(handle, cfg)
}
fn get_value(&self, name: &str) -> Option<Item> {
always_lock(self.items.lock()).get_item(&String::from(name)).map(|i| i.clone())
}
}
struct Watcher {
driver: ZWave,
cfg: Config,
output: Mutex<Sender<Notification<Item>>>,
}
impl Watcher {
fn get_out(&self) -> MutexGuard<Sender<Notification<Item>>> {
::catt_core::util::always_lock(self.output.lock())
}
}
impl ozw::manager::NotificationWatcher for Watcher {
fn on_notification(&self, zwave_notification: &ZWaveNotification) {
let notification: Notification<Item> = match zwave_notification.get_type() {
NotificationType::Type_DriverReady => {
let home_id = zwave_notification.get_home_id();
let controller = Item::controller(&format!("zwave_{}_Controller", home_id),
self.driver.clone(),
home_id);
always_lock(self.driver.items.lock())
.add_item(controller.get_name(), controller.clone());
let _ = self.get_out().send(Notification::Added(controller.clone()));
Notification::Changed(controller)
}
NotificationType::Type_AllNodesQueried |
NotificationType::Type_AwakeNodesQueried |
NotificationType::Type_AllNodesQueriedSomeDead => {
debug!("Controller ready");
// self.driver.ozw_manager.write_configs();
return;
}
NotificationType::Type_ValueAdded => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let mut db = always_lock(self.driver.items.lock());
let (name, exists) = match self.cfg.lookup_device(v) {
Some(name) => {
let exists = if let Some(_) = db.get_name(&v) {
warn!("duplicate match found for {}", name);
true
} else {
false
};
(name, exists)
}
None => {
if self.cfg.expose_unbound.unwrap_or(true) {
if let Some(name) = db.get_name(&v) {
warn!("duplicate match found for unconfigured {}", name);
(name.clone(), true)
} else {
(format!("zwave_{}_{}_{}",
v.get_home_id(),
v.get_node_id(),
v.get_label()),
false)
}
} else {
debug!("no configured devices matched {}", v);
return;
}
}
};
let item = if !exists {
debug!("adding value {} to db", name);
db.add_value(name.clone(), v)
} else {
Item::item(&name, v)
};
Notification::Added(item)
}
NotificationType::Type_ValueChanged => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let db = always_lock(self.driver.items.lock());
let name = match db.get_name(&v) {
Some(n) => n,
None => return,
};
let item = Item::item(&name, v);
debug!("value {} changed: {:?}", item.get_name(), item.get_value());
Notification::Changed(item)
}
NotificationType::Type_ValueRemoved => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let mut db = always_lock(self.driver.items.lock());
let name = match db.get_name(&v) {
Some(n) => n.clone(),
None => return,
};
debug!("removing value {} from db", name);
Notification::Removed(match db.remove_value(v) {
Some(it) => it,
None => Item::item(&name, v),
})
}
// TODO new implementation for this
// ZWaveNotification::Generic(s) => {
// if s.contains("Type_DriverRemoved") {
// warn!("controller removed! shutting down.");
// ::std::process::exit(1);
|
{
error!("[OpenzwaveStateful] {:04x}:{:04x} {}",
info.vid,
info.pid,
port.device.display());
}
|
conditional_block
|
driver.rs
|
serial_ports::{ListPortInfo, ListPorts};
use serial_ports::ListPortType::UsbPort;
use tokio_core::reactor::Handle;
use tokio_core::channel::channel;
use tokio_core::channel::Sender;
use tokio_core::channel::Receiver;
use config::Config;
use errors::*;
use item::Item;
use device::DB;
#[cfg(windows)]
fn get_default_devices() -> Vec<String> {
vec!["\\\\.\\COM6".to_owned()]
}
#[cfg(unix)]
fn is_usb_zwave_device(port: &ListPortInfo) -> bool {
let default_usb_devices = [// VID PID
// ----- -----
(0x0658, 0x0200), // Aeotech Z-Stick Gen-5
(0x0658, 0x0280), // UZB1
(0x10c4, 0xea60) /* Aeotech Z-Stick S2 */];
// Is it one of the vid/pids in the table?
if let UsbPort(ref info) = port.port_type {
default_usb_devices.contains(&(info.vid, info.pid))
} else {
false
}
}
#[cfg(unix)]
fn get_default_devices() -> Vec<String> {
// Enumerate all of the serial devices and see if any of them match our
// known VID:PID.
let mut ports: Vec<String> = Vec::new();
let usb_ports: Vec<String> = ListPorts::new()
.iter()
.filter(|port| is_usb_zwave_device(port))
.map(|port| port.device.to_string_lossy().into_owned())
.collect();
ports.extend(usb_ports);
if ports.is_empty() {
// The following is only included temporarily until we can get a more
// comprehensive list of VIDs and PIDs.
error!("[OpenzwaveStateful] Unable to locate ZWave USB dongle. The following VID:PIDs \
were found:");
for port in ListPorts::new().iter() {
if let UsbPort(ref info) = port.port_type {
error!("[OpenzwaveStateful] {:04x}:{:04x} {}",
info.vid,
info.pid,
port.device.display());
}
}
// The following should be removed, once we have all of the devices captured using the above
let default_devices = ["/dev/cu.usbserial", // MacOS X (presumably)
"/dev/cu.SLAB_USBtoUART", // MacOS X (Aeotech Z-Stick S2)
"/dev/cu.usbmodem14211", // Yoric (Aeotech Z-Stick Gen-5)
"/dev/cu.usbmodem1421", // Isabel (UZB Static Controller)
"/dev/ttyUSB0", // Linux (Aeotech Z-Stick S2)
"/dev/ttyACM0" /* Linux (Aeotech Z-Stick Gen-5) */];
if let Some(default_device) = default_devices.iter()
.find(|device_name| fs::metadata(device_name).is_ok())
.map(|&str| str.to_owned()) {
ports.push(default_device);
}
}
ports
}
#[derive(Clone)]
pub struct ZWave {
#[allow(dead_code)]
ozw_manager: Arc<Mutex<ozw::manager::Manager>>,
// TODO improve this system - ideally, we should hide these behind another struct
// so that only one call is needed to update both.
items: Arc<Mutex<DB>>,
}
impl ZWave {
pub fn
|
(handle: &Handle, cfg: &Config) -> Result<(ZWave, Receiver<Notification<Item>>)> {
let cfg = cfg.clone();
let mut manager = {
let config_path = match cfg.sys_config {
Some(ref path) => path.as_ref(),
None => "/etc/openzwave",
};
let user_path = match cfg.user_config {
Some(ref path) => path.as_ref(),
None => "./config",
};
let opts = Options::create(config_path,
user_path,
"--SaveConfiguration true --DumpTriggerLevel 0 \
--ConsoleOutput false")?;
ozw::manager::Manager::create(opts)?
};
let devices = cfg.port.clone().map(|p| vec![p]).unwrap_or(get_default_devices());
for device in devices {
fs::File::open(&device)?;
manager.add_driver(&device)?;
}
let manager = Arc::new(Mutex::new(manager));
let items = Arc::new(Mutex::new(Default::default()));
let (tx, rx) = channel(handle)?;
let driver = ZWave {
ozw_manager: manager.clone(),
items: items,
};
let watcher = Watcher {
cfg: cfg,
driver: driver.clone(),
output: Mutex::new(tx),
};
always_lock(manager.lock()).add_watcher(watcher)?;
Ok((driver, rx))
}
pub fn get_manager(&self) -> MutexGuard<Manager> {
always_lock(self.ozw_manager.lock())
}
}
impl Binding for ZWave {
type Config = Config;
type Error = Error;
type Item = Item;
fn new(handle: &Handle, cfg: &Self::Config) -> Result<(Self, Receiver<Notification<Item>>)> {
ZWave::new(handle, cfg)
}
fn get_value(&self, name: &str) -> Option<Item> {
always_lock(self.items.lock()).get_item(&String::from(name)).map(|i| i.clone())
}
}
struct Watcher {
driver: ZWave,
cfg: Config,
output: Mutex<Sender<Notification<Item>>>,
}
impl Watcher {
fn get_out(&self) -> MutexGuard<Sender<Notification<Item>>> {
::catt_core::util::always_lock(self.output.lock())
}
}
impl ozw::manager::NotificationWatcher for Watcher {
fn on_notification(&self, zwave_notification: &ZWaveNotification) {
let notification: Notification<Item> = match zwave_notification.get_type() {
NotificationType::Type_DriverReady => {
let home_id = zwave_notification.get_home_id();
let controller = Item::controller(&format!("zwave_{}_Controller", home_id),
self.driver.clone(),
home_id);
always_lock(self.driver.items.lock())
.add_item(controller.get_name(), controller.clone());
let _ = self.get_out().send(Notification::Added(controller.clone()));
Notification::Changed(controller)
}
NotificationType::Type_AllNodesQueried |
NotificationType::Type_AwakeNodesQueried |
NotificationType::Type_AllNodesQueriedSomeDead => {
debug!("Controller ready");
// self.driver.ozw_manager.write_configs();
return;
}
NotificationType::Type_ValueAdded => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let mut db = always_lock(self.driver.items.lock());
let (name, exists) = match self.cfg.lookup_device(v) {
Some(name) => {
let exists = if let Some(_) = db.get_name(&v) {
warn!("duplicate match found for {}", name);
true
} else {
false
};
(name, exists)
}
None => {
if self.cfg.expose_unbound.unwrap_or(true) {
if let Some(name) = db.get_name(&v) {
warn!("duplicate match found for unconfigured {}", name);
(name.clone(), true)
} else {
(format!("zwave_{}_{}_{}",
v.get_home_id(),
v.get_node_id(),
v.get_label()),
false)
}
} else {
debug!("no configured devices matched {}", v);
return;
}
}
};
let item = if !exists {
debug!("adding value {} to db", name);
db.add_value(name.clone(), v)
} else {
Item::item(&name, v)
};
Notification::Added(item)
}
NotificationType::Type_ValueChanged => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let db = always_lock(self.driver.items.lock());
let name = match db.get_name(&v) {
Some(n) => n,
None => return,
};
let item = Item::item(&name, v);
debug!("value {} changed: {:?}", item.get_name(), item.get_value());
Notification::Changed(item)
}
NotificationType::Type_ValueRemoved => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let mut db = always_lock(self.driver.items.lock());
let name = match db.get_name(&v) {
Some(n) => n.clone(),
None => return,
};
debug!("removing value {} from db", name);
Notification::Removed(match db.remove_value(v) {
Some(it) => it,
None => Item::item(&name, v),
})
}
// TODO new implementation for this
// ZWaveNotification::Generic(s) => {
// if s.contains("Type_DriverRemoved") {
// warn!("controller removed! shutting down.");
// ::std::process::exit(1);
|
new
|
identifier_name
|
driver.rs
|
serial_ports::{ListPortInfo, ListPorts};
use serial_ports::ListPortType::UsbPort;
use tokio_core::reactor::Handle;
use tokio_core::channel::channel;
use tokio_core::channel::Sender;
use tokio_core::channel::Receiver;
use config::Config;
use errors::*;
use item::Item;
use device::DB;
#[cfg(windows)]
fn get_default_devices() -> Vec<String> {
vec!["\\\\.\\COM6".to_owned()]
}
#[cfg(unix)]
fn is_usb_zwave_device(port: &ListPortInfo) -> bool {
let default_usb_devices = [// VID PID
// ----- -----
(0x0658, 0x0200), // Aeotech Z-Stick Gen-5
(0x0658, 0x0280), // UZB1
(0x10c4, 0xea60) /* Aeotech Z-Stick S2 */];
// Is it one of the vid/pids in the table?
if let UsbPort(ref info) = port.port_type {
default_usb_devices.contains(&(info.vid, info.pid))
} else {
false
}
}
#[cfg(unix)]
fn get_default_devices() -> Vec<String> {
// Enumerate all of the serial devices and see if any of them match our
// known VID:PID.
let mut ports: Vec<String> = Vec::new();
let usb_ports: Vec<String> = ListPorts::new()
.iter()
.filter(|port| is_usb_zwave_device(port))
.map(|port| port.device.to_string_lossy().into_owned())
.collect();
ports.extend(usb_ports);
if ports.is_empty() {
// The following is only included temporarily until we can get a more
// comprehensive list of VIDs and PIDs.
error!("[OpenzwaveStateful] Unable to locate ZWave USB dongle. The following VID:PIDs \
were found:");
for port in ListPorts::new().iter() {
if let UsbPort(ref info) = port.port_type {
error!("[OpenzwaveStateful] {:04x}:{:04x} {}",
info.vid,
info.pid,
port.device.display());
}
}
// The following should be removed, once we have all of the devices captured using the above
let default_devices = ["/dev/cu.usbserial", // MacOS X (presumably)
"/dev/cu.SLAB_USBtoUART", // MacOS X (Aeotech Z-Stick S2)
"/dev/cu.usbmodem14211", // Yoric (Aeotech Z-Stick Gen-5)
"/dev/cu.usbmodem1421", // Isabel (UZB Static Controller)
"/dev/ttyUSB0", // Linux (Aeotech Z-Stick S2)
"/dev/ttyACM0" /* Linux (Aeotech Z-Stick Gen-5) */];
if let Some(default_device) = default_devices.iter()
.find(|device_name| fs::metadata(device_name).is_ok())
.map(|&str| str.to_owned()) {
ports.push(default_device);
}
}
ports
}
#[derive(Clone)]
pub struct ZWave {
#[allow(dead_code)]
ozw_manager: Arc<Mutex<ozw::manager::Manager>>,
// TODO improve this system - ideally, we should hide these behind another struct
// so that only one call is needed to update both.
items: Arc<Mutex<DB>>,
}
impl ZWave {
pub fn new(handle: &Handle, cfg: &Config) -> Result<(ZWave, Receiver<Notification<Item>>)> {
let cfg = cfg.clone();
let mut manager = {
let config_path = match cfg.sys_config {
Some(ref path) => path.as_ref(),
None => "/etc/openzwave",
};
let user_path = match cfg.user_config {
Some(ref path) => path.as_ref(),
None => "./config",
};
let opts = Options::create(config_path,
user_path,
"--SaveConfiguration true --DumpTriggerLevel 0 \
--ConsoleOutput false")?;
ozw::manager::Manager::create(opts)?
};
let devices = cfg.port.clone().map(|p| vec![p]).unwrap_or(get_default_devices());
for device in devices {
fs::File::open(&device)?;
manager.add_driver(&device)?;
}
let manager = Arc::new(Mutex::new(manager));
let items = Arc::new(Mutex::new(Default::default()));
let (tx, rx) = channel(handle)?;
let driver = ZWave {
ozw_manager: manager.clone(),
items: items,
};
let watcher = Watcher {
cfg: cfg,
driver: driver.clone(),
output: Mutex::new(tx),
};
always_lock(manager.lock()).add_watcher(watcher)?;
Ok((driver, rx))
}
pub fn get_manager(&self) -> MutexGuard<Manager>
|
}
impl Binding for ZWave {
type Config = Config;
type Error = Error;
type Item = Item;
fn new(handle: &Handle, cfg: &Self::Config) -> Result<(Self, Receiver<Notification<Item>>)> {
ZWave::new(handle, cfg)
}
fn get_value(&self, name: &str) -> Option<Item> {
always_lock(self.items.lock()).get_item(&String::from(name)).map(|i| i.clone())
}
}
struct Watcher {
driver: ZWave,
cfg: Config,
output: Mutex<Sender<Notification<Item>>>,
}
impl Watcher {
fn get_out(&self) -> MutexGuard<Sender<Notification<Item>>> {
::catt_core::util::always_lock(self.output.lock())
}
}
impl ozw::manager::NotificationWatcher for Watcher {
fn on_notification(&self, zwave_notification: &ZWaveNotification) {
let notification: Notification<Item> = match zwave_notification.get_type() {
NotificationType::Type_DriverReady => {
let home_id = zwave_notification.get_home_id();
let controller = Item::controller(&format!("zwave_{}_Controller", home_id),
self.driver.clone(),
home_id);
always_lock(self.driver.items.lock())
.add_item(controller.get_name(), controller.clone());
let _ = self.get_out().send(Notification::Added(controller.clone()));
Notification::Changed(controller)
}
NotificationType::Type_AllNodesQueried |
NotificationType::Type_AwakeNodesQueried |
NotificationType::Type_AllNodesQueriedSomeDead => {
debug!("Controller ready");
// self.driver.ozw_manager.write_configs();
return;
}
NotificationType::Type_ValueAdded => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let mut db = always_lock(self.driver.items.lock());
let (name, exists) = match self.cfg.lookup_device(v) {
Some(name) => {
let exists = if let Some(_) = db.get_name(&v) {
warn!("duplicate match found for {}", name);
true
} else {
false
};
(name, exists)
}
None => {
if self.cfg.expose_unbound.unwrap_or(true) {
if let Some(name) = db.get_name(&v) {
warn!("duplicate match found for unconfigured {}", name);
(name.clone(), true)
} else {
(format!("zwave_{}_{}_{}",
v.get_home_id(),
v.get_node_id(),
v.get_label()),
false)
}
} else {
debug!("no configured devices matched {}", v);
return;
}
}
};
let item = if !exists {
debug!("adding value {} to db", name);
db.add_value(name.clone(), v)
} else {
Item::item(&name, v)
};
Notification::Added(item)
}
NotificationType::Type_ValueChanged => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let db = always_lock(self.driver.items.lock());
let name = match db.get_name(&v) {
Some(n) => n,
None => return,
};
let item = Item::item(&name, v);
debug!("value {} changed: {:?}", item.get_name(), item.get_value());
Notification::Changed(item)
}
NotificationType::Type_ValueRemoved => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let mut db = always_lock(self.driver.items.lock());
let name = match db.get_name(&v) {
Some(n) => n.clone(),
None => return,
};
debug!("removing value {} from db", name);
Notification::Removed(match db.remove_value(v) {
Some(it) => it,
None => Item::item(&name, v),
})
}
// TODO new implementation for this
// ZWaveNotification::Generic(s) => {
// if s.contains("Type_DriverRemoved") {
// warn!("controller removed! shutting down.");
// ::std::process::exit(1);
|
{
always_lock(self.ozw_manager.lock())
}
|
identifier_body
|
driver.rs
|
Info) -> bool {
let default_usb_devices = [// VID PID
// ----- -----
(0x0658, 0x0200), // Aeotech Z-Stick Gen-5
(0x0658, 0x0280), // UZB1
(0x10c4, 0xea60) /* Aeotech Z-Stick S2 */];
// Is it one of the vid/pids in the table?
if let UsbPort(ref info) = port.port_type {
default_usb_devices.contains(&(info.vid, info.pid))
} else {
false
}
}
#[cfg(unix)]
fn get_default_devices() -> Vec<String> {
// Enumerate all of the serial devices and see if any of them match our
// known VID:PID.
let mut ports: Vec<String> = Vec::new();
let usb_ports: Vec<String> = ListPorts::new()
.iter()
.filter(|port| is_usb_zwave_device(port))
.map(|port| port.device.to_string_lossy().into_owned())
.collect();
ports.extend(usb_ports);
if ports.is_empty() {
// The following is only included temporarily until we can get a more
// comprehensive list of VIDs and PIDs.
error!("[OpenzwaveStateful] Unable to locate ZWave USB dongle. The following VID:PIDs \
were found:");
for port in ListPorts::new().iter() {
if let UsbPort(ref info) = port.port_type {
error!("[OpenzwaveStateful] {:04x}:{:04x} {}",
info.vid,
info.pid,
port.device.display());
}
}
// The following should be removed, once we have all of the devices captured using the above
let default_devices = ["/dev/cu.usbserial", // MacOS X (presumably)
"/dev/cu.SLAB_USBtoUART", // MacOS X (Aeotech Z-Stick S2)
"/dev/cu.usbmodem14211", // Yoric (Aeotech Z-Stick Gen-5)
"/dev/cu.usbmodem1421", // Isabel (UZB Static Controller)
"/dev/ttyUSB0", // Linux (Aeotech Z-Stick S2)
"/dev/ttyACM0" /* Linux (Aeotech Z-Stick Gen-5) */];
if let Some(default_device) = default_devices.iter()
.find(|device_name| fs::metadata(device_name).is_ok())
.map(|&str| str.to_owned()) {
ports.push(default_device);
}
}
ports
}
#[derive(Clone)]
pub struct ZWave {
#[allow(dead_code)]
ozw_manager: Arc<Mutex<ozw::manager::Manager>>,
// TODO improve this system - ideally, we should hide these behind another struct
// so that only one call is needed to update both.
items: Arc<Mutex<DB>>,
}
impl ZWave {
pub fn new(handle: &Handle, cfg: &Config) -> Result<(ZWave, Receiver<Notification<Item>>)> {
let cfg = cfg.clone();
let mut manager = {
let config_path = match cfg.sys_config {
Some(ref path) => path.as_ref(),
None => "/etc/openzwave",
};
let user_path = match cfg.user_config {
Some(ref path) => path.as_ref(),
None => "./config",
};
let opts = Options::create(config_path,
user_path,
"--SaveConfiguration true --DumpTriggerLevel 0 \
--ConsoleOutput false")?;
ozw::manager::Manager::create(opts)?
};
let devices = cfg.port.clone().map(|p| vec![p]).unwrap_or(get_default_devices());
for device in devices {
fs::File::open(&device)?;
manager.add_driver(&device)?;
}
let manager = Arc::new(Mutex::new(manager));
let items = Arc::new(Mutex::new(Default::default()));
let (tx, rx) = channel(handle)?;
let driver = ZWave {
ozw_manager: manager.clone(),
items: items,
};
let watcher = Watcher {
cfg: cfg,
driver: driver.clone(),
output: Mutex::new(tx),
};
always_lock(manager.lock()).add_watcher(watcher)?;
Ok((driver, rx))
}
pub fn get_manager(&self) -> MutexGuard<Manager> {
always_lock(self.ozw_manager.lock())
}
}
impl Binding for ZWave {
type Config = Config;
type Error = Error;
type Item = Item;
fn new(handle: &Handle, cfg: &Self::Config) -> Result<(Self, Receiver<Notification<Item>>)> {
ZWave::new(handle, cfg)
}
fn get_value(&self, name: &str) -> Option<Item> {
always_lock(self.items.lock()).get_item(&String::from(name)).map(|i| i.clone())
}
}
struct Watcher {
driver: ZWave,
cfg: Config,
output: Mutex<Sender<Notification<Item>>>,
}
impl Watcher {
fn get_out(&self) -> MutexGuard<Sender<Notification<Item>>> {
::catt_core::util::always_lock(self.output.lock())
}
}
impl ozw::manager::NotificationWatcher for Watcher {
fn on_notification(&self, zwave_notification: &ZWaveNotification) {
let notification: Notification<Item> = match zwave_notification.get_type() {
NotificationType::Type_DriverReady => {
let home_id = zwave_notification.get_home_id();
let controller = Item::controller(&format!("zwave_{}_Controller", home_id),
self.driver.clone(),
home_id);
always_lock(self.driver.items.lock())
.add_item(controller.get_name(), controller.clone());
let _ = self.get_out().send(Notification::Added(controller.clone()));
Notification::Changed(controller)
}
NotificationType::Type_AllNodesQueried |
NotificationType::Type_AwakeNodesQueried |
NotificationType::Type_AllNodesQueriedSomeDead => {
debug!("Controller ready");
// self.driver.ozw_manager.write_configs();
return;
}
NotificationType::Type_ValueAdded => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let mut db = always_lock(self.driver.items.lock());
let (name, exists) = match self.cfg.lookup_device(v) {
Some(name) => {
let exists = if let Some(_) = db.get_name(&v) {
warn!("duplicate match found for {}", name);
true
} else {
false
};
(name, exists)
}
None => {
if self.cfg.expose_unbound.unwrap_or(true) {
if let Some(name) = db.get_name(&v) {
warn!("duplicate match found for unconfigured {}", name);
(name.clone(), true)
} else {
(format!("zwave_{}_{}_{}",
v.get_home_id(),
v.get_node_id(),
v.get_label()),
false)
}
} else {
debug!("no configured devices matched {}", v);
return;
}
}
};
let item = if !exists {
debug!("adding value {} to db", name);
db.add_value(name.clone(), v)
} else {
Item::item(&name, v)
};
Notification::Added(item)
}
NotificationType::Type_ValueChanged => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let db = always_lock(self.driver.items.lock());
let name = match db.get_name(&v) {
Some(n) => n,
None => return,
};
let item = Item::item(&name, v);
debug!("value {} changed: {:?}", item.get_name(), item.get_value());
Notification::Changed(item)
}
NotificationType::Type_ValueRemoved => {
let v = zwave_notification.get_value_id();
if !should_expose(v) {
return;
}
let mut db = always_lock(self.driver.items.lock());
let name = match db.get_name(&v) {
Some(n) => n.clone(),
None => return,
};
debug!("removing value {} from db", name);
Notification::Removed(match db.remove_value(v) {
Some(it) => it,
None => Item::item(&name, v),
})
}
// TODO new implementation for this
// ZWaveNotification::Generic(s) => {
// if s.contains("Type_DriverRemoved") {
// warn!("controller removed! shutting down.");
// ::std::process::exit(1);
// }
// return;
// }
NotificationType::Type_ControllerCommand => {
let home_id = zwave_notification.get_home_id();
let db_name = format!("zwave_{}_Controller", home_id);
let controller = match self.driver.get_value(&db_name) {
Some(c) => c,
None => {
debug!("controller not found in item db");
return;
}
};
let state = match ControllerState::from_u8(zwave_notification.get_event()
.unwrap()) {
|
Some(s) => s,
None => {
|
random_line_split
|
|
array-virtual-repeat-strategy.ts
|
lices replace existing entries?
// this means every removal of collection item is followed by an added with the same count
let allSplicesAreInplace = true;
for (i = 0; spliceCount > i; i++) {
splice = splices[i];
const removedCount = splice.removed.length;
const addedCount = splice.addedCount;
totalRemovedCount += removedCount;
totalAddedCount += addedCount;
if (removedCount !== addedCount) {
allSplicesAreInplace = false;
}
}
// Optimizable case 1:
// if all splices removal are followed by same amount of add,
// optimise by just replacing affected visible views
if (allSplicesAreInplace) {
const lastIndex = repeat.lastViewIndex();
// const repeatViewSlot = repeat.viewSlot;
for (i = 0; spliceCount > i; i++) {
splice = splices[i];
for (let collectionIndex = splice.index; collectionIndex < splice.index + splice.addedCount; collectionIndex++) {
if (collectionIndex >= firstIndex && collectionIndex <= lastIndex) {
const viewIndex = collectionIndex - firstIndex;
const overrideContext = createFullOverrideContext(repeat, newArray[collectionIndex], collectionIndex, newArraySize);
repeat.removeView(viewIndex, /*return to cache?*/true, /*skip animation?*/true);
repeat.insertView(viewIndex, overrideContext.bindingContext, overrideContext);
}
}
}
return;
}
let firstIndexAfterMutation = firstIndex;
const itemHeight = repeat.itemHeight;
const originalSize = newArraySize + totalRemovedCount - totalAddedCount;
const currViewCount = repeat.viewCount();
let newViewCount = currViewCount;
// bailable case 1:
// if previous collection size is 0 and item height has not been calculated
// there is no base to calculate mutation
// treat it like an instance changed and bail
if (originalSize === 0 && itemHeight === 0) {
repeat.resetCalculation();
repeat.itemsChanged();
return;
}
// Optimizable case 2:
// all splices happens before viewport, and are all positive splices (splices with only adds)
// in this case, only need to adjust top buffer and let scroll handler takecare of next step
const all_splices_are_positive_and_before_view_port = totalRemovedCount === 0
&& totalAddedCount > 0
&& splices.every(splice => splice.index <= firstIndex);
if (all_splices_are_positive_and_before_view_port) {
repeat.$first = firstIndex + totalAddedCount - 1;
repeat.topBufferHeight += totalAddedCount * itemHeight;
// 1. ensure that change in scroll position will not be ignored
repeat.enableScroll();
// 2. check if it's currently at the original first index
// assume it's safe to manually adjust scrollbar top position if so
const scrollerInfo = repeat.getScrollerInfo();
const scroller_scroll_top = scrollerInfo.scrollTop;
const top_buffer_distance = getDistanceToParent(repeat.topBufferEl, scrollerInfo.scroller);
const real_scroll_top = Math$max(0, scroller_scroll_top === 0
? 0
: (scroller_scroll_top - top_buffer_distance));
let first_index_after_scroll_adjustment = real_scroll_top === 0
? 0
: Math$floor(real_scroll_top / itemHeight);
if (
// if scroller is not at top most
scroller_scroll_top > top_buffer_distance
// and current firts index is the same with first index calculated from scroll position
&& first_index_after_scroll_adjustment === firstIndex
) {
repeat.updateBufferElements(/*skip update?*/false);
repeat.scrollerEl.scrollTop = real_scroll_top + totalAddedCount * itemHeight;
this._remeasure(repeat, itemHeight, newViewCount, newArraySize, firstIndex);
return;
}
// if it's not the same, it's an interesting case
// where multiple repeats are in the same container
// -- and their collections get mutated at the same time
// -- and this state reflects a repeat that does not have any visible view
}
// Optimizable case 3:
// all splices happens after last index of the repeat, and the repeat has already filled up the viewport
// in this case, no visible view is needed to be updated/moved/removed.
// only need to update bottom buffer
const lastViewIndex = repeat.lastViewIndex();
const all_splices_are_after_view_port =
currViewCount > repeat.minViewsRequired
&& splices.every(s => s.index > lastViewIndex);
if (all_splices_are_after_view_port) {
repeat.bottomBufferHeight = Math$max(0, newArraySize - firstIndex - currViewCount) * itemHeight;
repeat.updateBufferElements(true);
}
// mutation happens somewhere in middle of the visible viewport
// or before the viewport. In any case, it will shift first index around
// which requires recalculation of everything
else {
let viewsRequiredCount = repeat.minViewsRequired * 2;
// when max views count required is 0, it's a sign of previous state of this mutation
// was either reseted, or in unstable state. Should recalculate min & max numbers of views required
// before processing further
if (viewsRequiredCount === 0) {
const scrollerInfo = repeat.getScrollerInfo();
const minViewsRequired = calcMinViewsRequired(scrollerInfo.height, itemHeight);
// reassign to min views required
repeat.minViewsRequired = minViewsRequired;
// reassign to max views required
viewsRequiredCount = minViewsRequired * 2;
}
for (i = 0; spliceCount > i; ++i) {
const { addedCount, removed: { length: removedCount }, index: spliceIndex } = splices[i];
const removeDelta = removedCount - addedCount;
if (firstIndexAfterMutation > spliceIndex) {
firstIndexAfterMutation = Math$max(0, firstIndexAfterMutation - removeDelta);
}
}
newViewCount = 0;
// if array size is less than or equal to number of elements in View
// the nadjust first index to 0
// and set view count to new array size as there are not enough item to fill more than required
if (newArraySize <= repeat.minViewsRequired) {
firstIndexAfterMutation = 0;
newViewCount = newArraySize;
}
// if number of views required to fill viewport is less than the size of array
else {
// else if array size is
// - greater than min number of views required to fill viewport
// - and less than or equal to no of views required to have smooth scrolling
// Set viewcount to new array size
// but do not change first index, since it could be at bottom half of the repeat "actual" views
if (newArraySize <= viewsRequiredCount) {
newViewCount = newArraySize;
firstIndexAfterMutation = 0;
}
// else, the array size is big enough to cover the min views required, and the buffer for smooth scrolling
// then set view count to mins views + buffer number
// don't change first index
else {
newViewCount = viewsRequiredCount;
}
}
const newTopBufferItemCount = newArraySize >= firstIndexAfterMutation
? firstIndexAfterMutation
: 0;
const viewCountDelta = newViewCount - currViewCount;
// needs to adjust bound view count based on newViewCount
// if newViewCount > currViewCount: add until meet new number
if (viewCountDelta > 0) {
for (i = 0; viewCountDelta > i; ++i) {
const collectionIndex = firstIndexAfterMutation + currViewCount + i;
const overrideContext = createFullOverrideContext(repeat, newArray[collectionIndex], collectionIndex, newArray.length);
repeat.addView(overrideContext.bindingContext, overrideContext);
}
} else {
const ii = Math$abs(viewCountDelta);
for (i = 0; ii > i; ++i) {
repeat.removeView(newViewCount, /*return to cache?*/true, /*skip animation?*/false);
}
}
const newBotBufferItemCount = Math$max(0, newArraySize - newTopBufferItemCount - newViewCount);
repeat.$first = firstIndexAfterMutation;
// repeat._previousFirst = firstIndex;
// repeat._lastRebind = firstIndexAfterMutation + newViewCount;
repeat.topBufferHeight = newTopBufferItemCount * itemHeight;
repeat.bottomBufferHeight = newBotBufferItemCount * itemHeight;
repeat.updateBufferElements(/*skip update?*/true);
}
// step 1 of mutation handling could shift the scroller scroll position
// around and stabilize somewhere that is not original scroll position based on splices
// need to recalcuate first index based on scroll position, as this is the simplest form
// of syncing with browser implementation
this._remeasure(repeat, itemHeight, newViewCount, newArraySize, firstIndexAfterMutation);
}
|
updateAllViews
|
identifier_name
|
|
array-virtual-repeat-strategy.ts
|
(splice => splice.index <= firstIndex);
if (all_splices_are_positive_and_before_view_port) {
repeat.$first = firstIndex + totalAddedCount - 1;
repeat.topBufferHeight += totalAddedCount * itemHeight;
// 1. ensure that change in scroll position will not be ignored
repeat.enableScroll();
// 2. check if it's currently at the original first index
// assume it's safe to manually adjust scrollbar top position if so
const scrollerInfo = repeat.getScrollerInfo();
const scroller_scroll_top = scrollerInfo.scrollTop;
const top_buffer_distance = getDistanceToParent(repeat.topBufferEl, scrollerInfo.scroller);
const real_scroll_top = Math$max(0, scroller_scroll_top === 0
? 0
: (scroller_scroll_top - top_buffer_distance));
let first_index_after_scroll_adjustment = real_scroll_top === 0
? 0
: Math$floor(real_scroll_top / itemHeight);
if (
// if scroller is not at top most
scroller_scroll_top > top_buffer_distance
// and current firts index is the same with first index calculated from scroll position
&& first_index_after_scroll_adjustment === firstIndex
) {
repeat.updateBufferElements(/*skip update?*/false);
repeat.scrollerEl.scrollTop = real_scroll_top + totalAddedCount * itemHeight;
this._remeasure(repeat, itemHeight, newViewCount, newArraySize, firstIndex);
return;
}
// if it's not the same, it's an interesting case
// where multiple repeats are in the same container
// -- and their collections get mutated at the same time
// -- and this state reflects a repeat that does not have any visible view
}
// Optimizable case 3:
// all splices happens after last index of the repeat, and the repeat has already filled up the viewport
// in this case, no visible view is needed to be updated/moved/removed.
// only need to update bottom buffer
const lastViewIndex = repeat.lastViewIndex();
const all_splices_are_after_view_port =
currViewCount > repeat.minViewsRequired
&& splices.every(s => s.index > lastViewIndex);
if (all_splices_are_after_view_port) {
repeat.bottomBufferHeight = Math$max(0, newArraySize - firstIndex - currViewCount) * itemHeight;
repeat.updateBufferElements(true);
}
// mutation happens somewhere in middle of the visible viewport
// or before the viewport. In any case, it will shift first index around
// which requires recalculation of everything
else {
let viewsRequiredCount = repeat.minViewsRequired * 2;
// when max views count required is 0, it's a sign of previous state of this mutation
// was either reseted, or in unstable state. Should recalculate min & max numbers of views required
// before processing further
if (viewsRequiredCount === 0) {
const scrollerInfo = repeat.getScrollerInfo();
const minViewsRequired = calcMinViewsRequired(scrollerInfo.height, itemHeight);
// reassign to min views required
repeat.minViewsRequired = minViewsRequired;
// reassign to max views required
viewsRequiredCount = minViewsRequired * 2;
}
for (i = 0; spliceCount > i; ++i) {
const { addedCount, removed: { length: removedCount }, index: spliceIndex } = splices[i];
const removeDelta = removedCount - addedCount;
if (firstIndexAfterMutation > spliceIndex) {
firstIndexAfterMutation = Math$max(0, firstIndexAfterMutation - removeDelta);
}
}
newViewCount = 0;
// if array size is less than or equal to number of elements in View
// the nadjust first index to 0
// and set view count to new array size as there are not enough item to fill more than required
if (newArraySize <= repeat.minViewsRequired) {
firstIndexAfterMutation = 0;
newViewCount = newArraySize;
}
// if number of views required to fill viewport is less than the size of array
else {
// else if array size is
// - greater than min number of views required to fill viewport
// - and less than or equal to no of views required to have smooth scrolling
// Set viewcount to new array size
// but do not change first index, since it could be at bottom half of the repeat "actual" views
if (newArraySize <= viewsRequiredCount) {
newViewCount = newArraySize;
firstIndexAfterMutation = 0;
}
// else, the array size is big enough to cover the min views required, and the buffer for smooth scrolling
// then set view count to mins views + buffer number
// don't change first index
else {
newViewCount = viewsRequiredCount;
}
}
const newTopBufferItemCount = newArraySize >= firstIndexAfterMutation
? firstIndexAfterMutation
: 0;
const viewCountDelta = newViewCount - currViewCount;
// needs to adjust bound view count based on newViewCount
// if newViewCount > currViewCount: add until meet new number
if (viewCountDelta > 0) {
for (i = 0; viewCountDelta > i; ++i) {
const collectionIndex = firstIndexAfterMutation + currViewCount + i;
const overrideContext = createFullOverrideContext(repeat, newArray[collectionIndex], collectionIndex, newArray.length);
repeat.addView(overrideContext.bindingContext, overrideContext);
}
} else {
const ii = Math$abs(viewCountDelta);
for (i = 0; ii > i; ++i) {
repeat.removeView(newViewCount, /*return to cache?*/true, /*skip animation?*/false);
}
}
const newBotBufferItemCount = Math$max(0, newArraySize - newTopBufferItemCount - newViewCount);
repeat.$first = firstIndexAfterMutation;
// repeat._previousFirst = firstIndex;
// repeat._lastRebind = firstIndexAfterMutation + newViewCount;
repeat.topBufferHeight = newTopBufferItemCount * itemHeight;
repeat.bottomBufferHeight = newBotBufferItemCount * itemHeight;
repeat.updateBufferElements(/*skip update?*/true);
}
// step 1 of mutation handling could shift the scroller scroll position
// around and stabilize somewhere that is not original scroll position based on splices
// need to recalcuate first index based on scroll position, as this is the simplest form
// of syncing with browser implementation
this._remeasure(repeat, itemHeight, newViewCount, newArraySize, firstIndexAfterMutation);
}
updateAllViews(repeat: IVirtualRepeater, startIndex: number): void {
const views = (repeat.viewSlot as IViewSlot).children;
const viewLength = views.length;
const collection = repeat.items as any[];
const delta = Math$floor(repeat.topBufferHeight / repeat.itemHeight);
let collectionIndex = 0;
let view: IView;
for (; viewLength > startIndex; ++startIndex) {
collectionIndex = startIndex + delta;
view = repeat.view(startIndex);
rebindView(repeat, view, collectionIndex, collection);
repeat.updateBindings(view);
}
}
remeasure(repeat: IVirtualRepeater): void {
this._remeasure(repeat, repeat.itemHeight, repeat.viewCount(), repeat.items.length, repeat.firstViewIndex());
}
/**
* Unlike normal repeat, virtualization repeat employs "padding" elements. Those elements
* often are just blank block with proper height/width to adjust the height/width/scroll feeling
* of virtualized repeat.
*
* Because of this, either mutation or change of the collection of repeat will potentially require
* readjustment (or measurement) of those blank block, based on scroll position
*
* This is 2 phases scroll handle
*
* @internal
*/
// eslint-disable-next-line @typescript-eslint/no-unused-vars
_remeasure(repeat: IVirtualRepeater, itemHeight: number, newViewCount: number, newArraySize: number, firstIndex: number): void {
const scrollerInfo = repeat.getScrollerInfo();
const scroller_scroll_top = scrollerInfo.scrollTop;
const top_buffer_distance = getDistanceToParent(repeat.topBufferEl, scrollerInfo.scroller);
const real_scroll_top = Math$max(0, scroller_scroll_top === 0
? 0
: (scroller_scroll_top - top_buffer_distance));
let first_index_after_scroll_adjustment = real_scroll_top === 0
? 0
: Math$floor(real_scroll_top / itemHeight);
// if first index after scroll adjustment doesn't fit with number of possible view
// it means the scroller has been too far down to the bottom and nolonger suitable to start from this index
// rollback until all views fit into new collection, or until has enough collection item to render
if (first_index_after_scroll_adjustment + newViewCount >= newArraySize) {
first_index_after_scroll_adjustment = Math$max(0, newArraySize - newViewCount);
|
random_line_split
|
||
array-virtual-repeat-strategy.ts
|
splice.index <= firstIndex);
if (all_splices_are_positive_and_before_view_port) {
repeat.$first = firstIndex + totalAddedCount - 1;
repeat.topBufferHeight += totalAddedCount * itemHeight;
// 1. ensure that change in scroll position will not be ignored
repeat.enableScroll();
// 2. check if it's currently at the original first index
// assume it's safe to manually adjust scrollbar top position if so
const scrollerInfo = repeat.getScrollerInfo();
const scroller_scroll_top = scrollerInfo.scrollTop;
const top_buffer_distance = getDistanceToParent(repeat.topBufferEl, scrollerInfo.scroller);
const real_scroll_top = Math$max(0, scroller_scroll_top === 0
? 0
: (scroller_scroll_top - top_buffer_distance));
let first_index_after_scroll_adjustment = real_scroll_top === 0
? 0
: Math$floor(real_scroll_top / itemHeight);
if (
// if scroller is not at top most
scroller_scroll_top > top_buffer_distance
// and current firts index is the same with first index calculated from scroll position
&& first_index_after_scroll_adjustment === firstIndex
) {
repeat.updateBufferElements(/*skip update?*/false);
repeat.scrollerEl.scrollTop = real_scroll_top + totalAddedCount * itemHeight;
this._remeasure(repeat, itemHeight, newViewCount, newArraySize, firstIndex);
return;
}
// if it's not the same, it's an interesting case
// where multiple repeats are in the same container
// -- and their collections get mutated at the same time
// -- and this state reflects a repeat that does not have any visible view
}
// Optimizable case 3:
// all splices happens after last index of the repeat, and the repeat has already filled up the viewport
// in this case, no visible view is needed to be updated/moved/removed.
// only need to update bottom buffer
const lastViewIndex = repeat.lastViewIndex();
const all_splices_are_after_view_port =
currViewCount > repeat.minViewsRequired
&& splices.every(s => s.index > lastViewIndex);
if (all_splices_are_after_view_port) {
repeat.bottomBufferHeight = Math$max(0, newArraySize - firstIndex - currViewCount) * itemHeight;
repeat.updateBufferElements(true);
}
// mutation happens somewhere in middle of the visible viewport
// or before the viewport. In any case, it will shift first index around
// which requires recalculation of everything
else {
let viewsRequiredCount = repeat.minViewsRequired * 2;
// when max views count required is 0, it's a sign of previous state of this mutation
// was either reseted, or in unstable state. Should recalculate min & max numbers of views required
// before processing further
if (viewsRequiredCount === 0) {
const scrollerInfo = repeat.getScrollerInfo();
const minViewsRequired = calcMinViewsRequired(scrollerInfo.height, itemHeight);
// reassign to min views required
repeat.minViewsRequired = minViewsRequired;
// reassign to max views required
viewsRequiredCount = minViewsRequired * 2;
}
for (i = 0; spliceCount > i; ++i) {
const { addedCount, removed: { length: removedCount }, index: spliceIndex } = splices[i];
const removeDelta = removedCount - addedCount;
if (firstIndexAfterMutation > spliceIndex) {
firstIndexAfterMutation = Math$max(0, firstIndexAfterMutation - removeDelta);
}
}
newViewCount = 0;
// if array size is less than or equal to number of elements in View
// the nadjust first index to 0
// and set view count to new array size as there are not enough item to fill more than required
if (newArraySize <= repeat.minViewsRequired) {
firstIndexAfterMutation = 0;
newViewCount = newArraySize;
}
// if number of views required to fill viewport is less than the size of array
else {
// else if array size is
// - greater than min number of views required to fill viewport
// - and less than or equal to no of views required to have smooth scrolling
// Set viewcount to new array size
// but do not change first index, since it could be at bottom half of the repeat "actual" views
if (newArraySize <= viewsRequiredCount) {
newViewCount = newArraySize;
firstIndexAfterMutation = 0;
}
// else, the array size is big enough to cover the min views required, and the buffer for smooth scrolling
// then set view count to mins views + buffer number
// don't change first index
else {
newViewCount = viewsRequiredCount;
}
}
const newTopBufferItemCount = newArraySize >= firstIndexAfterMutation
? firstIndexAfterMutation
: 0;
const viewCountDelta = newViewCount - currViewCount;
// needs to adjust bound view count based on newViewCount
// if newViewCount > currViewCount: add until meet new number
if (viewCountDelta > 0) {
for (i = 0; viewCountDelta > i; ++i) {
const collectionIndex = firstIndexAfterMutation + currViewCount + i;
const overrideContext = createFullOverrideContext(repeat, newArray[collectionIndex], collectionIndex, newArray.length);
repeat.addView(overrideContext.bindingContext, overrideContext);
}
} else {
const ii = Math$abs(viewCountDelta);
for (i = 0; ii > i; ++i) {
repeat.removeView(newViewCount, /*return to cache?*/true, /*skip animation?*/false);
}
}
const newBotBufferItemCount = Math$max(0, newArraySize - newTopBufferItemCount - newViewCount);
repeat.$first = firstIndexAfterMutation;
// repeat._previousFirst = firstIndex;
// repeat._lastRebind = firstIndexAfterMutation + newViewCount;
repeat.topBufferHeight = newTopBufferItemCount * itemHeight;
repeat.bottomBufferHeight = newBotBufferItemCount * itemHeight;
repeat.updateBufferElements(/*skip update?*/true);
}
// step 1 of mutation handling could shift the scroller scroll position
// around and stabilize somewhere that is not original scroll position based on splices
// need to recalcuate first index based on scroll position, as this is the simplest form
// of syncing with browser implementation
this._remeasure(repeat, itemHeight, newViewCount, newArraySize, firstIndexAfterMutation);
}
updateAllViews(repeat: IVirtualRepeater, startIndex: number): void {
const views = (repeat.viewSlot as IViewSlot).children;
const viewLength = views.length;
const collection = repeat.items as any[];
const delta = Math$floor(repeat.topBufferHeight / repeat.itemHeight);
let collectionIndex = 0;
let view: IView;
for (; viewLength > startIndex; ++startIndex) {
collectionIndex = startIndex + delta;
view = repeat.view(startIndex);
rebindView(repeat, view, collectionIndex, collection);
repeat.updateBindings(view);
}
}
remeasure(repeat: IVirtualRepeater): void {
this._remeasure(repeat, repeat.itemHeight, repeat.viewCount(), repeat.items.length, repeat.firstViewIndex());
}
/**
* Unlike normal repeat, virtualization repeat employs "padding" elements. Those elements
* often are just blank block with proper height/width to adjust the height/width/scroll feeling
* of virtualized repeat.
*
* Because of this, either mutation or change of the collection of repeat will potentially require
* readjustment (or measurement) of those blank block, based on scroll position
*
* This is 2 phases scroll handle
*
* @internal
*/
// eslint-disable-next-line @typescript-eslint/no-unused-vars
_remeasure(repeat: IVirtualRepeater, itemHeight: number, newViewCount: number, newArraySize: number, firstIndex: number): void {
const scrollerInfo = repeat.getScrollerInfo();
const scroller_scroll_top = scrollerInfo.scrollTop;
const top_buffer_distance = getDistanceToParent(repeat.topBufferEl, scrollerInfo.scroller);
const real_scroll_top = Math$max(0, scroller_scroll_top === 0
? 0
: (scroller_scroll_top - top_buffer_distance));
let first_index_after_scroll_adjustment = real_scroll_top === 0
? 0
: Math$floor(real_scroll_top / itemHeight);
// if first index after scroll adjustment doesn't fit with number of possible view
// it means the scroller has been too far down to the bottom and nolonger suitable to start from this index
// rollback until all views fit into new collection, or until has enough collection item to render
if (first_index_after_scroll_adjustment + newViewCount >= newArraySize)
|
{
first_index_after_scroll_adjustment = Math$max(0, newArraySize - newViewCount);
}
|
conditional_block
|
|
array-virtual-repeat-strategy.ts
|
initCalculation(repeat: IVirtualRepeater, items: any[]): VirtualizationCalculation {
const itemCount = items.length;
// when there is no item, bails immediately
// and return false to notify calculation finished unsuccessfully
if (!(itemCount > 0)) {
return VirtualizationCalculation.reset;
}
// before invoking instance changed, there needs to be basic calculation on how
// the required vairables such as item height and elements required
const scrollerInfo = repeat.getScrollerInfo();
// const containerEl = repeat.getScroller();
const existingViewCount = repeat.viewCount();
if (itemCount > 0 && existingViewCount === 0) {
this.createFirstRow(repeat);
}
// const isFixedHeightContainer = repeat.fixedHeightContainer = hasOverflowScroll(containerEl);
const firstView = repeat.firstView();
const itemHeight = calcOuterHeight(firstView.firstChild as Element);
// when item height is 0, bails immediately
// and return false to notify calculation has finished unsuccessfully
// it cannot be processed further when item is 0
if (itemHeight === 0) {
return VirtualizationCalculation.none;
}
repeat.itemHeight = itemHeight;
const scroll_el_height = scrollerInfo.height;
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const elementsInView = repeat.minViewsRequired = calcMinViewsRequired(scroll_el_height, itemHeight);
// const viewsCount = repeat._viewsLength = elementsInView * 2;
return VirtualizationCalculation.has_sizing | VirtualizationCalculation.observe_scroller;
}
onAttached(repeat: IVirtualRepeater): void {
if (repeat.items.length < repeat.minViewsRequired) {
repeat.getMore(0, /*is near top?*/true, this.isNearBottom(repeat, repeat.lastViewIndex()), /*force?*/true);
}
}
getViewRange(repeat: IVirtualRepeater, scrollerInfo: IScrollerInfo): [number, number] {
const topBufferEl = repeat.topBufferEl;
const scrollerEl = repeat.scrollerEl;
const itemHeight = repeat.itemHeight;
let realScrollTop = 0;
const isFixedHeightContainer = scrollerInfo.scroller !== htmlElement;
if (isFixedHeightContainer) {
// If offset parent of top buffer is the scroll container
// its actual offsetTop is just the offset top itself
// If not, then the offset top is calculated based on the parent offsetTop as well
const topBufferDistance = getDistanceToParent(topBufferEl, scrollerEl);
const scrollerScrollTop = scrollerInfo.scrollTop;
realScrollTop = Math$max(0, scrollerScrollTop - Math$abs(topBufferDistance));
} else {
realScrollTop = pageYOffset - repeat.distanceToTop;
}
const realViewCount = repeat.minViewsRequired * 2;
// Calculate the index of first view
// Using Math floor to ensure it has correct space for both small and large calculation
let firstVisibleIndex = Math$max(0, itemHeight > 0 ? Math$floor(realScrollTop / itemHeight) : 0);
const lastVisibleIndex = Math$min(
repeat.items.length - 1,
firstVisibleIndex + (realViewCount - /*number of view count includes the first view, so minus 1*/1));
firstVisibleIndex = Math$max(
0,
Math$min(
firstVisibleIndex,
lastVisibleIndex - (realViewCount - /*number of view count includes the first view, so minus 1*/1)
)
);
return [firstVisibleIndex, lastVisibleIndex];
}
updateBuffers(repeat: IVirtualRepeater, firstIndex: number): void {
const itemHeight = repeat.itemHeight;
const itemCount = repeat.items.length;
repeat.topBufferHeight = firstIndex * itemHeight;
repeat.bottomBufferHeight = (itemCount - firstIndex - repeat.viewCount()) * itemHeight;
repeat.updateBufferElements(/*skip update?*/true);
}
isNearTop(repeat: IVirtualRepeater, firstIndex: number): boolean {
const itemCount = repeat.items.length;
return itemCount > 0
? firstIndex < repeat.edgeDistance
: false;
}
isNearBottom(repeat: IVirtualRepeater, lastIndex: number): boolean {
const itemCount = repeat.items.length;
return lastIndex === -1
? true
: itemCount > 0
? lastIndex > (itemCount - 1 - repeat.edgeDistance)
: false;
}
/**
* @override
* Handle the repeat's collection instance changing.
* @param repeat The repeater instance.
* @param items The new array instance.
* @param firstIndex The index of first active view. First is a required argument, only ? for valid poly
*/
instanceChanged(repeat: IVirtualRepeater, items: any[], first?: number): void {
if (this._inPlaceProcessItems(repeat, items, first)) {
// using repeat._first instead of first from argument to use latest first index
this._remeasure(repeat, repeat.itemHeight, repeat.minViewsRequired * 2, items.length, repeat.$first);
}
}
/**
* @override
* Handle the repeat's collection instance mutating.
* @param repeat The repeat instance.
* @param array The modified array.
* @param splices Records of array changes.
*/
instanceMutated(repeat: IVirtualRepeater, array: any[], splices: ICollectionObserverSplice[]): void {
this._standardProcessInstanceMutated(repeat, array, splices);
}
/**
* Process items thay are currently mapped to a view in bound DOM tree
*
* @returns `false` to signal there should be no remeasurement
* @internal
*/
_inPlaceProcessItems($repeat: IVirtualRepeater, items: any[], firstIndex: number): boolean {
const repeat = $repeat as IArrayVirtualRepeater;
const currItemCount = items.length;
if (currItemCount === 0) {
repeat.removeAllViews(/*return to cache?*/true, /*skip animation?*/false);
repeat.resetCalculation();
repeat.__queuedSplices = repeat.__array = undefined;
return false;
}
const max_views_count = repeat.minViewsRequired * 2;
// if the number of items shrinks to less than number of active views
// remove all unneeded views
let realViewsCount = repeat.viewCount();
while (realViewsCount > currItemCount) {
realViewsCount--;
repeat.removeView(realViewsCount, /*return to cache?*/true, /*skip animation?*/false);
}
// there is situation when container height shrinks
// the real views count will be greater than new maximum required view count
// remove all unnecessary view
while (realViewsCount > max_views_count) {
realViewsCount--;
repeat.removeView(realViewsCount, /*return to cache?*/true, /*skip animation?*/false);
}
realViewsCount = Math$min(realViewsCount, max_views_count);
const local = repeat.local;
const lastIndex = currItemCount - 1;
if (firstIndex + realViewsCount > lastIndex) {
// first = currItemCount - realViewsCount instead of: first = currItemCount - 1 - realViewsCount;
// this is because during view update
// view(i) starts at 0 and ends at less than last
firstIndex = Math$max(0, currItemCount - realViewsCount);
}
repeat.$first = firstIndex;
// re-evaluate bindings on existing views.
for (let i = 0; i < realViewsCount; i++) {
const currIndex = i + firstIndex;
const view = repeat.view(i);
const last = currIndex === currItemCount - 1;
const middle = currIndex !== 0 && !last;
const bindingContext = view.bindingContext;
const overrideContext = view.overrideContext;
// any changes to the binding context?
if (bindingContext[local] === items[currIndex]
&& overrideContext.$index === currIndex
&& overrideContext.$middle === middle
&& overrideContext.$last === last
) {
// no changes. continue...
continue;
}
// update the binding context and refresh the bindings.
bindingContext[local] = items[currIndex];
overrideContext.$first = currIndex === 0;
overrideContext.$middle = middle;
overrideContext.$last = last;
overrideContext.$index = currIndex;
const odd = currIndex % 2 === 1;
overrideContext.$odd = odd;
overrideContext.$even = !odd;
repeat.updateBindings(view);
}
// add new views
const minLength = Math$min(max_views_count, currItemCount);
for (let i = realViewsCount; i < minLength; i++) {
const overrideContext = createFullOverrideContext(repeat, items[i], i, currItemCount);
repeat.addView(overrideContext.bindingContext, overrideContext);
}
return true;
}
/**@internal */
_standardProcessInstanceMutated($repeat: IVirtualRepeater, array: Array<any>, splices: ICollection
|
{
return items.length;
}
|
identifier_body
|
|
fit_sed.py
|
):
"""Switch method for different types of flux measurement input.
Returns 2d array of floats. Note that first column can be an exact
observed central wavelength, or a number in [70, 100, 160, ...],
which serves as a proxy for pacs-blue, or pacs-green, etc.
"""
if isinstance(measurements, basestring):
try:
measurements_array = np.genfromtxt(measurements, dtype=None)
except IOError:
sys.exit('String is not valid file of measurements.')
elif isinstance(measurements, (list, tuple, np.ndarray)):
measurements_array = np.array(measurements)
else:
sys.exit('There is a problem with the measurements.')
# ensure that the array has 2 dimensions
if len(measurements_array) is 1 or type(measurements_array) is np.void:
measurements_array = np.array(measurements_array)
# parse each row at a time and store in clean_measurements array
clean_measurements = np.zeros((len(measurements_array), 3),
dtype='float')
for i, row in enumerate(measurements_array):
try:
observed_wavelength, flux, flux_err = row
except:
sys.exit('Each row must have three elements.')
try:
observed_wavelength = float(observed_wavelength)
except ValueError:
telescope_filter = observed_wavelength.lower()
if 'pacs' in telescope_filter:
if '70' in telescope_filter or 'blue' in telescope_filter:
observed_wavelength = 70
elif '100' in telescope_filter or 'green' in telescope_filter:
observed_wavelength = 100
elif '160' in telescope_filter or 'red' in telescope_filter:
observed_wavelength = 160
else:
sys.exit('Incorrect PACS filter entered.')
elif 'spire' in telescope_filter:
pass
else:
sys.exit('"{}"" is not supported.'.format(telescope_filter))
clean_measurements[i, 0] = float(observed_wavelength)
try:
clean_measurements[i, 1] = float(flux)
clean_measurements[i, 2] = float(flux_err)
except ValueError:
sys.exit('Flux and uncertainty must be floats.')
return clean_measurements
def read_K15_template(template):
"""Reads in a K15 template SED, returning an array of
wavelength and corresponding array of specific luminosity."""
template_fname = os.path.join(root_dir, 'data', 'kirkpatrick+15',
'Comprehensive_library', '{}.txt'.format(template))
if not os.path.isfile(template_fname):
sys.exit('Invalid template model entered.')
try:
template_sed = np.genfromtxt(template_fname, skip_header=4)
except IOError:
sys.exit('Something is wrong with the SED template.')
# rest wavelengths, luminosity [um]
waves = template_sed[:, 0]
L_nu = template_sed[:, 1]
return waves, L_nu
def model_sed(template, z):
"""Given a K15 model and redshift, returns the SED in flux density
units via two arrays:
|
observed wavelengths in microns
f_nu : 1d array
observed flux density in mJy
"""
waves, L_nu = read_K15_template(template)
# observed wavelengths
waves *= (1 + z)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
D_L = cosmo.luminosity_distance(z)
# flux density [mJy]
f_nu = (L_nu * (u.W / u.Hz) / (4 * np.pi * D_L**2)).to(u.mJy).value
return waves, f_nu
def model_photometry(waves, f_nu, wavelength):
"""The wavelength is [70, 100, 160] for PACS observations. Otherwise
return nearest flux density for some other central wavelength.
"""
if wavelength not in [70, 100, 160]:
return f_nu[np.argmin(np.abs(np.log(waves / wavelength)))]
pacs_filter_fname = pacs_filter_fnames[wavelength]
filter_waves, transmission = np.genfromtxt(pacs_filter_fname,
unpack=True)
filter_waves *= 1e-4 # [um]
# interpolate to same wavelengths used in SED
within_filter = (waves > min(filter_waves)) & (waves < max(filter_waves))
wave_range = waves[within_filter]
func_interp = interp1d(filter_waves, transmission, kind='cubic')
interp_transmission = func_interp(wave_range)
flux_density = np.sum([T * f for T, f in \
zip(interp_transmission, f_nu[within_filter])]) / np.sum(interp_transmission)
return flux_density
def chi_squared(normalization, model, data, data_err):
"""Returns the reduced chi^2 for all measurements in one template SED.
"""
model = np.array(model) * normalization
# degrees of freedom = # observations - # params
dof = len(data) - 1.
# for detections, get usual negative log-likelihood
detections = 0.5 * (data - model)**2 / data_err**2
# for nondetections, use survival analysis likelihood, e.g.,
# Feigelson & Nelson (1985)
#nondetections = 0.5 * (1 + erf(model / (np.sqrt(2) * data_err)))
# UPDATE: we'll instead use a normal distribution centered on
# zero since it gives the intuitively correct answer when fitting.
nondetections = 0.5 * (0 - model)**2 / data_err**2
# For a better treatment, we may want to use the survival function
# for a non-normal distribution... i.e., if we know the
# completeness as a function of flux, we may be able to create a
# log-likelihood of some non-detection for any model prediction.
return np.sum(np.where(np.isfinite(data), detections, nondetections)) / dof
def lnlike(normalization, model, data, data_err):
"""Negative chi^2"""
model = np.array(model) * normalization
# for detections do the usual
detections = -0.5 * (data - model)**2 / data_err**2
# nondetections are consistent with noise
nondetections = -0.5 * (0 - model)**2 / data_err**2
return np.sum(np.where(np.isfinite(data), detections, nondetections))
def fit_sed(template, measurements, z, verbose=True):
assert template in K15_SED_templates
# get and unpack redshifted wavelengths and SED
waves, f_nu = model_sed(template, z)
# unpack measurements and then model what they should be
measured_waves, measured_fluxes, measured_uncertainties = measurements.T
modeled_fluxes = np.array([model_photometry(waves, f_nu, wave) for wave in measured_waves])
# minimize chi-squared, with normalization (first arg) as free parameter
opt_result = minimize(chi_squared, x0=[1.],
args=(modeled_fluxes, measured_fluxes, measured_uncertainties))
if opt_result['success']:
chi2 = opt_result['fun']
norm = opt_result['x'][0]
if verbose:
print('Template {} successful, with chi^2 = {:.2f}'.format(template, chi2))
return chi2, norm
else:
if verbose:
print('Template {} unsuccessful.'.format(template))
return np.nan, np.nan
def calculate_uncertainties(norm, modeled_fluxes, measured_fluxes,
measured_uncertainties, nsteps=500, nwalkers=100, nburnin=50,
nthreads=4, save_samples=False, plot_distribution=False):
"""Uses MCMC sampling to estimate uncertainties on the one parameter,
the normalization, which scales the uncertainties on the IR luminosity.
In all of my runs, this produces a normal pdf, so taking the standard
deviation is a good way to estimate 68% credible intervals.
"""
init_params = [norm]
ndim = len(init_params)
init_pos = [init_params + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)]
# run emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, threads=nthreads,
args=(modeled_fluxes, measured_fluxes, measured_uncertainties))
sampler.run_mcmc(pos0=init_pos, N=nsteps, rstate0=256)
# extract sampled normalization
samples = sampler.chain[:, nburnin:, :].reshape((-1, ndim))
# save samples
if save_samples:
np.save(os.path.join(root_dir, 'results
|
Returns
-------
waves : 1d array
|
random_line_split
|
fit_sed.py
|
):
"""Switch method for different types of flux measurement input.
Returns 2d array of floats. Note that first column can be an exact
observed central wavelength, or a number in [70, 100, 160, ...],
which serves as a proxy for pacs-blue, or pacs-green, etc.
"""
if isinstance(measurements, basestring):
try:
measurements_array = np.genfromtxt(measurements, dtype=None)
except IOError:
sys.exit('String is not valid file of measurements.')
elif isinstance(measurements, (list, tuple, np.ndarray)):
measurements_array = np.array(measurements)
else:
sys.exit('There is a problem with the measurements.')
# ensure that the array has 2 dimensions
if len(measurements_array) is 1 or type(measurements_array) is np.void:
measurements_array = np.array(measurements_array)
# parse each row at a time and store in clean_measurements array
clean_measurements = np.zeros((len(measurements_array), 3),
dtype='float')
for i, row in enumerate(measurements_array):
try:
observed_wavelength, flux, flux_err = row
except:
sys.exit('Each row must have three elements.')
try:
observed_wavelength = float(observed_wavelength)
except ValueError:
telescope_filter = observed_wavelength.lower()
if 'pacs' in telescope_filter:
if '70' in telescope_filter or 'blue' in telescope_filter:
observed_wavelength = 70
elif '100' in telescope_filter or 'green' in telescope_filter:
observed_wavelength = 100
elif '160' in telescope_filter or 'red' in telescope_filter:
|
else:
sys.exit('Incorrect PACS filter entered.')
elif 'spire' in telescope_filter:
pass
else:
sys.exit('"{}"" is not supported.'.format(telescope_filter))
clean_measurements[i, 0] = float(observed_wavelength)
try:
clean_measurements[i, 1] = float(flux)
clean_measurements[i, 2] = float(flux_err)
except ValueError:
sys.exit('Flux and uncertainty must be floats.')
return clean_measurements
def read_K15_template(template):
"""Reads in a K15 template SED, returning an array of
wavelength and corresponding array of specific luminosity."""
template_fname = os.path.join(root_dir, 'data', 'kirkpatrick+15',
'Comprehensive_library', '{}.txt'.format(template))
if not os.path.isfile(template_fname):
sys.exit('Invalid template model entered.')
try:
template_sed = np.genfromtxt(template_fname, skip_header=4)
except IOError:
sys.exit('Something is wrong with the SED template.')
# rest wavelengths, luminosity [um]
waves = template_sed[:, 0]
L_nu = template_sed[:, 1]
return waves, L_nu
def model_sed(template, z):
"""Given a K15 model and redshift, returns the SED in flux density
units via two arrays:
Returns
-------
waves : 1d array
observed wavelengths in microns
f_nu : 1d array
observed flux density in mJy
"""
waves, L_nu = read_K15_template(template)
# observed wavelengths
waves *= (1 + z)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
D_L = cosmo.luminosity_distance(z)
# flux density [mJy]
f_nu = (L_nu * (u.W / u.Hz) / (4 * np.pi * D_L**2)).to(u.mJy).value
return waves, f_nu
def model_photometry(waves, f_nu, wavelength):
"""The wavelength is [70, 100, 160] for PACS observations. Otherwise
return nearest flux density for some other central wavelength.
"""
if wavelength not in [70, 100, 160]:
return f_nu[np.argmin(np.abs(np.log(waves / wavelength)))]
pacs_filter_fname = pacs_filter_fnames[wavelength]
filter_waves, transmission = np.genfromtxt(pacs_filter_fname,
unpack=True)
filter_waves *= 1e-4 # [um]
# interpolate to same wavelengths used in SED
within_filter = (waves > min(filter_waves)) & (waves < max(filter_waves))
wave_range = waves[within_filter]
func_interp = interp1d(filter_waves, transmission, kind='cubic')
interp_transmission = func_interp(wave_range)
flux_density = np.sum([T * f for T, f in \
zip(interp_transmission, f_nu[within_filter])]) / np.sum(interp_transmission)
return flux_density
def chi_squared(normalization, model, data, data_err):
"""Returns the reduced chi^2 for all measurements in one template SED.
"""
model = np.array(model) * normalization
# degrees of freedom = # observations - # params
dof = len(data) - 1.
# for detections, get usual negative log-likelihood
detections = 0.5 * (data - model)**2 / data_err**2
# for nondetections, use survival analysis likelihood, e.g.,
# Feigelson & Nelson (1985)
#nondetections = 0.5 * (1 + erf(model / (np.sqrt(2) * data_err)))
# UPDATE: we'll instead use a normal distribution centered on
# zero since it gives the intuitively correct answer when fitting.
nondetections = 0.5 * (0 - model)**2 / data_err**2
# For a better treatment, we may want to use the survival function
# for a non-normal distribution... i.e., if we know the
# completeness as a function of flux, we may be able to create a
# log-likelihood of some non-detection for any model prediction.
return np.sum(np.where(np.isfinite(data), detections, nondetections)) / dof
def lnlike(normalization, model, data, data_err):
"""Negative chi^2"""
model = np.array(model) * normalization
# for detections do the usual
detections = -0.5 * (data - model)**2 / data_err**2
# nondetections are consistent with noise
nondetections = -0.5 * (0 - model)**2 / data_err**2
return np.sum(np.where(np.isfinite(data), detections, nondetections))
def fit_sed(template, measurements, z, verbose=True):
assert template in K15_SED_templates
# get and unpack redshifted wavelengths and SED
waves, f_nu = model_sed(template, z)
# unpack measurements and then model what they should be
measured_waves, measured_fluxes, measured_uncertainties = measurements.T
modeled_fluxes = np.array([model_photometry(waves, f_nu, wave) for wave in measured_waves])
# minimize chi-squared, with normalization (first arg) as free parameter
opt_result = minimize(chi_squared, x0=[1.],
args=(modeled_fluxes, measured_fluxes, measured_uncertainties))
if opt_result['success']:
chi2 = opt_result['fun']
norm = opt_result['x'][0]
if verbose:
print('Template {} successful, with chi^2 = {:.2f}'.format(template, chi2))
return chi2, norm
else:
if verbose:
print('Template {} unsuccessful.'.format(template))
return np.nan, np.nan
def calculate_uncertainties(norm, modeled_fluxes, measured_fluxes,
measured_uncertainties, nsteps=500, nwalkers=100, nburnin=50,
nthreads=4, save_samples=False, plot_distribution=False):
"""Uses MCMC sampling to estimate uncertainties on the one parameter,
the normalization, which scales the uncertainties on the IR luminosity.
In all of my runs, this produces a normal pdf, so taking the standard
deviation is a good way to estimate 68% credible intervals.
"""
init_params = [norm]
ndim = len(init_params)
init_pos = [init_params + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)]
# run emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, threads=nthreads,
args=(modeled_fluxes, measured_fluxes, measured_uncertainties))
sampler.run_mcmc(pos0=init_pos, N=nsteps, rstate0=256)
# extract sampled normalization
samples = sampler.chain[:, nburnin:, :].reshape((-1, ndim))
# save samples
if save_samples:
np.save(os.path.join(root_dir, '
|
observed_wavelength = 160
|
conditional_block
|
fit_sed.py
|
):
"""Switch method for different types of flux measurement input.
Returns 2d array of floats. Note that first column can be an exact
observed central wavelength, or a number in [70, 100, 160, ...],
which serves as a proxy for pacs-blue, or pacs-green, etc.
"""
if isinstance(measurements, basestring):
try:
measurements_array = np.genfromtxt(measurements, dtype=None)
except IOError:
sys.exit('String is not valid file of measurements.')
elif isinstance(measurements, (list, tuple, np.ndarray)):
measurements_array = np.array(measurements)
else:
sys.exit('There is a problem with the measurements.')
# ensure that the array has 2 dimensions
if len(measurements_array) is 1 or type(measurements_array) is np.void:
measurements_array = np.array(measurements_array)
# parse each row at a time and store in clean_measurements array
clean_measurements = np.zeros((len(measurements_array), 3),
dtype='float')
for i, row in enumerate(measurements_array):
try:
observed_wavelength, flux, flux_err = row
except:
sys.exit('Each row must have three elements.')
try:
observed_wavelength = float(observed_wavelength)
except ValueError:
telescope_filter = observed_wavelength.lower()
if 'pacs' in telescope_filter:
if '70' in telescope_filter or 'blue' in telescope_filter:
observed_wavelength = 70
elif '100' in telescope_filter or 'green' in telescope_filter:
observed_wavelength = 100
elif '160' in telescope_filter or 'red' in telescope_filter:
observed_wavelength = 160
else:
sys.exit('Incorrect PACS filter entered.')
elif 'spire' in telescope_filter:
pass
else:
sys.exit('"{}"" is not supported.'.format(telescope_filter))
clean_measurements[i, 0] = float(observed_wavelength)
try:
clean_measurements[i, 1] = float(flux)
clean_measurements[i, 2] = float(flux_err)
except ValueError:
sys.exit('Flux and uncertainty must be floats.')
return clean_measurements
def
|
(template):
"""Reads in a K15 template SED, returning an array of
wavelength and corresponding array of specific luminosity."""
template_fname = os.path.join(root_dir, 'data', 'kirkpatrick+15',
'Comprehensive_library', '{}.txt'.format(template))
if not os.path.isfile(template_fname):
sys.exit('Invalid template model entered.')
try:
template_sed = np.genfromtxt(template_fname, skip_header=4)
except IOError:
sys.exit('Something is wrong with the SED template.')
# rest wavelengths, luminosity [um]
waves = template_sed[:, 0]
L_nu = template_sed[:, 1]
return waves, L_nu
def model_sed(template, z):
"""Given a K15 model and redshift, returns the SED in flux density
units via two arrays:
Returns
-------
waves : 1d array
observed wavelengths in microns
f_nu : 1d array
observed flux density in mJy
"""
waves, L_nu = read_K15_template(template)
# observed wavelengths
waves *= (1 + z)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
D_L = cosmo.luminosity_distance(z)
# flux density [mJy]
f_nu = (L_nu * (u.W / u.Hz) / (4 * np.pi * D_L**2)).to(u.mJy).value
return waves, f_nu
def model_photometry(waves, f_nu, wavelength):
"""The wavelength is [70, 100, 160] for PACS observations. Otherwise
return nearest flux density for some other central wavelength.
"""
if wavelength not in [70, 100, 160]:
return f_nu[np.argmin(np.abs(np.log(waves / wavelength)))]
pacs_filter_fname = pacs_filter_fnames[wavelength]
filter_waves, transmission = np.genfromtxt(pacs_filter_fname,
unpack=True)
filter_waves *= 1e-4 # [um]
# interpolate to same wavelengths used in SED
within_filter = (waves > min(filter_waves)) & (waves < max(filter_waves))
wave_range = waves[within_filter]
func_interp = interp1d(filter_waves, transmission, kind='cubic')
interp_transmission = func_interp(wave_range)
flux_density = np.sum([T * f for T, f in \
zip(interp_transmission, f_nu[within_filter])]) / np.sum(interp_transmission)
return flux_density
def chi_squared(normalization, model, data, data_err):
"""Returns the reduced chi^2 for all measurements in one template SED.
"""
model = np.array(model) * normalization
# degrees of freedom = # observations - # params
dof = len(data) - 1.
# for detections, get usual negative log-likelihood
detections = 0.5 * (data - model)**2 / data_err**2
# for nondetections, use survival analysis likelihood, e.g.,
# Feigelson & Nelson (1985)
#nondetections = 0.5 * (1 + erf(model / (np.sqrt(2) * data_err)))
# UPDATE: we'll instead use a normal distribution centered on
# zero since it gives the intuitively correct answer when fitting.
nondetections = 0.5 * (0 - model)**2 / data_err**2
# For a better treatment, we may want to use the survival function
# for a non-normal distribution... i.e., if we know the
# completeness as a function of flux, we may be able to create a
# log-likelihood of some non-detection for any model prediction.
return np.sum(np.where(np.isfinite(data), detections, nondetections)) / dof
def lnlike(normalization, model, data, data_err):
"""Negative chi^2"""
model = np.array(model) * normalization
# for detections do the usual
detections = -0.5 * (data - model)**2 / data_err**2
# nondetections are consistent with noise
nondetections = -0.5 * (0 - model)**2 / data_err**2
return np.sum(np.where(np.isfinite(data), detections, nondetections))
def fit_sed(template, measurements, z, verbose=True):
assert template in K15_SED_templates
# get and unpack redshifted wavelengths and SED
waves, f_nu = model_sed(template, z)
# unpack measurements and then model what they should be
measured_waves, measured_fluxes, measured_uncertainties = measurements.T
modeled_fluxes = np.array([model_photometry(waves, f_nu, wave) for wave in measured_waves])
# minimize chi-squared, with normalization (first arg) as free parameter
opt_result = minimize(chi_squared, x0=[1.],
args=(modeled_fluxes, measured_fluxes, measured_uncertainties))
if opt_result['success']:
chi2 = opt_result['fun']
norm = opt_result['x'][0]
if verbose:
print('Template {} successful, with chi^2 = {:.2f}'.format(template, chi2))
return chi2, norm
else:
if verbose:
print('Template {} unsuccessful.'.format(template))
return np.nan, np.nan
def calculate_uncertainties(norm, modeled_fluxes, measured_fluxes,
measured_uncertainties, nsteps=500, nwalkers=100, nburnin=50,
nthreads=4, save_samples=False, plot_distribution=False):
"""Uses MCMC sampling to estimate uncertainties on the one parameter,
the normalization, which scales the uncertainties on the IR luminosity.
In all of my runs, this produces a normal pdf, so taking the standard
deviation is a good way to estimate 68% credible intervals.
"""
init_params = [norm]
ndim = len(init_params)
init_pos = [init_params + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)]
# run emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, threads=nthreads,
args=(modeled_fluxes, measured_fluxes, measured_uncertainties))
sampler.run_mcmc(pos0=init_pos, N=nsteps, rstate0=256)
# extract sampled normalization
samples = sampler.chain[:, nburnin:, :].reshape((-1, ndim))
# save samples
if save_samples:
np.save(os.path.join(root_dir, '
|
read_K15_template
|
identifier_name
|
fit_sed.py
|
):
"""Switch method for different types of flux measurement input.
Returns 2d array of floats. Note that first column can be an exact
observed central wavelength, or a number in [70, 100, 160, ...],
which serves as a proxy for pacs-blue, or pacs-green, etc.
"""
if isinstance(measurements, basestring):
try:
measurements_array = np.genfromtxt(measurements, dtype=None)
except IOError:
sys.exit('String is not valid file of measurements.')
elif isinstance(measurements, (list, tuple, np.ndarray)):
measurements_array = np.array(measurements)
else:
sys.exit('There is a problem with the measurements.')
# ensure that the array has 2 dimensions
if len(measurements_array) is 1 or type(measurements_array) is np.void:
measurements_array = np.array(measurements_array)
# parse each row at a time and store in clean_measurements array
clean_measurements = np.zeros((len(measurements_array), 3),
dtype='float')
for i, row in enumerate(measurements_array):
try:
observed_wavelength, flux, flux_err = row
except:
sys.exit('Each row must have three elements.')
try:
observed_wavelength = float(observed_wavelength)
except ValueError:
telescope_filter = observed_wavelength.lower()
if 'pacs' in telescope_filter:
if '70' in telescope_filter or 'blue' in telescope_filter:
observed_wavelength = 70
elif '100' in telescope_filter or 'green' in telescope_filter:
observed_wavelength = 100
elif '160' in telescope_filter or 'red' in telescope_filter:
observed_wavelength = 160
else:
sys.exit('Incorrect PACS filter entered.')
elif 'spire' in telescope_filter:
pass
else:
sys.exit('"{}"" is not supported.'.format(telescope_filter))
clean_measurements[i, 0] = float(observed_wavelength)
try:
clean_measurements[i, 1] = float(flux)
clean_measurements[i, 2] = float(flux_err)
except ValueError:
sys.exit('Flux and uncertainty must be floats.')
return clean_measurements
def read_K15_template(template):
"""Reads in a K15 template SED, returning an array of
wavelength and corresponding array of specific luminosity."""
template_fname = os.path.join(root_dir, 'data', 'kirkpatrick+15',
'Comprehensive_library', '{}.txt'.format(template))
if not os.path.isfile(template_fname):
sys.exit('Invalid template model entered.')
try:
template_sed = np.genfromtxt(template_fname, skip_header=4)
except IOError:
sys.exit('Something is wrong with the SED template.')
# rest wavelengths, luminosity [um]
waves = template_sed[:, 0]
L_nu = template_sed[:, 1]
return waves, L_nu
def model_sed(template, z):
"""Given a K15 model and redshift, returns the SED in flux density
units via two arrays:
Returns
-------
waves : 1d array
observed wavelengths in microns
f_nu : 1d array
observed flux density in mJy
"""
waves, L_nu = read_K15_template(template)
# observed wavelengths
waves *= (1 + z)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
D_L = cosmo.luminosity_distance(z)
# flux density [mJy]
f_nu = (L_nu * (u.W / u.Hz) / (4 * np.pi * D_L**2)).to(u.mJy).value
return waves, f_nu
def model_photometry(waves, f_nu, wavelength):
"""The wavelength is [70, 100, 160] for PACS observations. Otherwise
return nearest flux density for some other central wavelength.
"""
if wavelength not in [70, 100, 160]:
return f_nu[np.argmin(np.abs(np.log(waves / wavelength)))]
pacs_filter_fname = pacs_filter_fnames[wavelength]
filter_waves, transmission = np.genfromtxt(pacs_filter_fname,
unpack=True)
filter_waves *= 1e-4 # [um]
# interpolate to same wavelengths used in SED
within_filter = (waves > min(filter_waves)) & (waves < max(filter_waves))
wave_range = waves[within_filter]
func_interp = interp1d(filter_waves, transmission, kind='cubic')
interp_transmission = func_interp(wave_range)
flux_density = np.sum([T * f for T, f in \
zip(interp_transmission, f_nu[within_filter])]) / np.sum(interp_transmission)
return flux_density
def chi_squared(normalization, model, data, data_err):
"""Returns the reduced chi^2 for all measurements in one template SED.
"""
model = np.array(model) * normalization
# degrees of freedom = # observations - # params
dof = len(data) - 1.
# for detections, get usual negative log-likelihood
detections = 0.5 * (data - model)**2 / data_err**2
# for nondetections, use survival analysis likelihood, e.g.,
# Feigelson & Nelson (1985)
#nondetections = 0.5 * (1 + erf(model / (np.sqrt(2) * data_err)))
# UPDATE: we'll instead use a normal distribution centered on
# zero since it gives the intuitively correct answer when fitting.
nondetections = 0.5 * (0 - model)**2 / data_err**2
# For a better treatment, we may want to use the survival function
# for a non-normal distribution... i.e., if we know the
# completeness as a function of flux, we may be able to create a
# log-likelihood of some non-detection for any model prediction.
return np.sum(np.where(np.isfinite(data), detections, nondetections)) / dof
def lnlike(normalization, model, data, data_err):
"""Negative chi^2"""
model = np.array(model) * normalization
# for detections do the usual
detections = -0.5 * (data - model)**2 / data_err**2
# nondetections are consistent with noise
nondetections = -0.5 * (0 - model)**2 / data_err**2
return np.sum(np.where(np.isfinite(data), detections, nondetections))
def fit_sed(template, measurements, z, verbose=True):
|
return chi2, norm
else:
if verbose:
print('Template {} unsuccessful.'.format(template))
return np.nan, np.nan
def calculate_uncertainties(norm, modeled_fluxes, measured_fluxes,
measured_uncertainties, nsteps=500, nwalkers=100, nburnin=50,
nthreads=4, save_samples=False, plot_distribution=False):
"""Uses MCMC sampling to estimate uncertainties on the one parameter,
the normalization, which scales the uncertainties on the IR luminosity.
In all of my runs, this produces a normal pdf, so taking the standard
deviation is a good way to estimate 68% credible intervals.
"""
init_params = [norm]
ndim = len(init_params)
init_pos = [init_params + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)]
# run emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, threads=nthreads,
args=(modeled_fluxes, measured_fluxes, measured_uncertainties))
sampler.run_mcmc(pos0=init_pos, N=nsteps, rstate0=256)
# extract sampled normalization
samples = sampler.chain[:, nburnin:, :].reshape((-1, ndim))
# save samples
if save_samples:
np.save(os.path.join(root_dir, 'results
|
assert template in K15_SED_templates
# get and unpack redshifted wavelengths and SED
waves, f_nu = model_sed(template, z)
# unpack measurements and then model what they should be
measured_waves, measured_fluxes, measured_uncertainties = measurements.T
modeled_fluxes = np.array([model_photometry(waves, f_nu, wave) for wave in measured_waves])
# minimize chi-squared, with normalization (first arg) as free parameter
opt_result = minimize(chi_squared, x0=[1.],
args=(modeled_fluxes, measured_fluxes, measured_uncertainties))
if opt_result['success']:
chi2 = opt_result['fun']
norm = opt_result['x'][0]
if verbose:
print('Template {} successful, with chi^2 = {:.2f}'.format(template, chi2))
|
identifier_body
|
utils.rs
|
;
use winapi::um::handleapi::CloseHandle;
use winapi::um::memoryapi::ReadProcessMemory;
use winapi::um::processthreadsapi::OpenProcess;
use winapi::um::psapi::GetModuleFileNameExW;
use winapi::um::shlobj::SHGetKnownFolderPath;
use winapi::um::winbase::{FORMAT_MESSAGE_ALLOCATE_BUFFER, FORMAT_MESSAGE_FROM_SYSTEM, FormatMessageA, LocalFree};
use winapi::um::winnt::{LANG_USER_DEFAULT, LPSTR, PROCESS_QUERY_LIMITED_INFORMATION, PROCESS_VM_READ, PWSTR};
use winapi::um::winuser::{EnumWindows, GetWindowTextLengthW, GetWindowTextW, GetWindowThreadProcessId, IsWindowVisible};
use winapi::um::winver::{GetFileVersionInfoSizeW, GetFileVersionInfoW, VerQueryValueW};
use wrapperrs::Error;
use crate::agent::RequesterInfo;
use crate::config::Config;
use crate::utils::Finally;
use super::process_describers::describe;
pub trait StrExt {
fn to_utf16_null(&self) -> Vec<u16>;
}
impl StrExt for &str {
fn to_utf16_null(&self) -> Vec<u16> {
let mut v: Vec<_> = self.encode_utf16().collect();
v.push(0);
v
}
}
pub fn check_error() -> wrapperrs::Result<()> {
format_error(unsafe { GetLastError() })
}
pub fn format_error(err: u32) -> wrapperrs::Result<()> {
unsafe {
if err == 0 {
return Ok(());
}
let msg_ptr: LPSTR = null_mut();
FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
null(),
err as u32,
LANG_USER_DEFAULT as u32,
transmute(&msg_ptr),
0,
null_mut(),
);
let msg = CStr::from_ptr(msg_ptr).to_str().unwrap();
let err = wrapperrs::Error::new(&format!("(win32) {}", &msg[..msg.len() - 2]));
LocalFree(msg_ptr as *mut c_void);
Err(err.into())
}
}
pub unsafe fn close_handle(handle: *mut c_void) -> impl Drop {
Finally::new(move || { CloseHandle(handle); })
}
pub fn get_known_folder(folder_id: GUID) -> PathBuf {
unsafe {
let mut wstr: PWSTR = null_mut();
SHGetKnownFolderPath(&folder_id, 0, null_mut(), &mut wstr);
let length = (0..).into_iter()
.take_while(|i| wstr.offset(*i).read() != 0)
.count();
let str = String::from_utf16(
std::slice::from_raw_parts(wstr, length)).unwrap();
CoTaskMemFree(wstr as *mut c_void);
PathBuf::from(str)
}
}
pub unsafe fn get_executable_from_pid(pid: u32) -> wrapperrs::Result<PathBuf>
|
pub unsafe fn get_executable_description(exe: &Path) -> Result<String, ()> {
let exe_utf16 = exe.to_str().unwrap().to_utf16_null();
let mut handle: DWORD = 0;
let size = GetFileVersionInfoSizeW(exe_utf16.as_ptr(), &mut handle);
if size == 0 {
error!("GetFileVersionInfoSizeW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data = vec![0u8; size as _];
if GetFileVersionInfoW(exe_utf16.as_ptr(), 0, data.len() as _,
data.as_mut_ptr() as _) == 0 {
error!("GetFileVersionInfoW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data_ptr: *mut DWORD = null_mut();
let mut size: u32 = 0;
if VerQueryValueW(data.as_ptr() as _,
r"\VarFileInfo\Translation".to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _), &mut size as _) == 0 {
error!("VerQueryValueW (translation), err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let language = *data_ptr;
let lang_id = language & 0xffff;
let code_page = language >> 16 & 0xffff;
let mut data_ptr: *mut u16 = null_mut();
let mut size: u32 = 0;
let query = format!(r"\StringFileInfo\{:0>4x}{:0>4x}\FileDescription", lang_id, code_page);
if VerQueryValueW(data.as_ptr() as _, query.as_str().to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _),
&mut size as _) == 0 {
let err = GetLastError();
// 1813 - FileDescription resource type not found
if err != 1813 {
error!("VerQueryValueW (file description), err={}, exe={}, query={}", err,
exe.to_str().unwrap(), query);
}
return Err(());
};
let data: Vec<_> = (0..).step_by(2)
.map(|offset| data_ptr.offset(offset / 2).read())
.take_while(|c| *c != 0)
.collect();
Ok(String::from_utf16(&data).unwrap())
}
pub unsafe fn get_parent_pid(pid: u32) -> u32 {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return 0;
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
if NtQueryInformationProcess(process, ProcessBasicInformation, &mut info as *mut _ as _,
size_of_val(&info) as _, null_mut()) != STATUS_SUCCESS {
return 0;
}
info.InheritedFromUniqueProcessId as _
}
pub unsafe fn find_primary_window(process_id: u32) -> Option<HWND> {
struct Data {
process_id: u32,
windows: Vec<HWND>,
}
unsafe extern "system" fn window_proc(hwnd: HWND, lparam: LPARAM) -> BOOL {
let data = &mut *(lparam as *mut Data);
let mut process_id = 0;
GetWindowThreadProcessId(hwnd, &mut process_id);
if process_id == data.process_id {
data.windows.push(hwnd);
};
TRUE
}
let mut data = Data {
process_id,
windows: Vec::new(),
};
EnumWindows(Some(window_proc), &mut data as *mut _ as _);
if data.windows.is_empty() {
return None;
};
data.windows
.iter()
.find(|&&hwnd| IsWindowVisible(hwnd) == TRUE)
.or_else(|| data.windows.first())
.copied()
}
pub unsafe fn get_window_text(win: HWND) -> Result<String, ()> {
let mut title = vec![0; (GetWindowTextLengthW(win) + 1) as _];
let length = GetWindowTextW(win, title.as_mut_ptr(), title.len() as _);
if length > 0 {
Ok(String::from_utf16(&title[..length as _]).unwrap())
} else {
Err(())
}
}
pub unsafe fn get_process_command_line(pid: u32) -> Result<String, ()> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION | PROCESS_VM_READ,
FALSE, pid);
if process == null_mut() {
return Err(());
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
let res = NtQueryInformationProcess(process, ProcessBasicInformation,
&mut info as *mut _ as _,
size_of_val(&info) as u32, null_mut());
if res != STATUS_SUCCESS {
return Err(());
}
unsafe fn read_process<T>(process: HANDLE, addr: *mut c_void) -> std::result::Result<T, ()> {
let mut dst: T = MaybeUninit::zeroed().assume_init();
if ReadProcessMemory(process, addr, &mut dst as *mut _ as _, size_of_val(&dst),
null_mut()) ==
|
{
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return Err(Error::new("OpenProcess").into());
};
let _close_process = close_handle(process);
let mut name = [0u16; 32 * 1024];
let length = GetModuleFileNameExW(process, null_mut(), name.as_mut_ptr(), name.len() as _);
if length == 0 {
Err(Error::new("GetModuleFileNameExW").into())
} else {
Ok(PathBuf::from(String::from_utf16(&name[..length as _]).unwrap()))
}
}
|
identifier_body
|
utils.rs
|
use itertools::Itertools;
use log::error;
use ntapi::ntpebteb::PEB;
use ntapi::ntpsapi::{NtQueryInformationProcess, PROCESS_BASIC_INFORMATION, ProcessBasicInformation};
use ntapi::ntrtl::RTL_USER_PROCESS_PARAMETERS;
use winapi::shared::guiddef::GUID;
use winapi::shared::minwindef::{BOOL, DWORD, FALSE, LPARAM, TRUE};
use winapi::shared::ntdef::{HANDLE, UNICODE_STRING};
use winapi::shared::ntstatus::STATUS_SUCCESS;
use winapi::shared::windef::HWND;
use winapi::um::combaseapi::CoTaskMemFree;
use winapi::um::errhandlingapi::GetLastError;
use winapi::um::handleapi::CloseHandle;
use winapi::um::memoryapi::ReadProcessMemory;
use winapi::um::processthreadsapi::OpenProcess;
use winapi::um::psapi::GetModuleFileNameExW;
use winapi::um::shlobj::SHGetKnownFolderPath;
use winapi::um::winbase::{FORMAT_MESSAGE_ALLOCATE_BUFFER, FORMAT_MESSAGE_FROM_SYSTEM, FormatMessageA, LocalFree};
use winapi::um::winnt::{LANG_USER_DEFAULT, LPSTR, PROCESS_QUERY_LIMITED_INFORMATION, PROCESS_VM_READ, PWSTR};
use winapi::um::winuser::{EnumWindows, GetWindowTextLengthW, GetWindowTextW, GetWindowThreadProcessId, IsWindowVisible};
use winapi::um::winver::{GetFileVersionInfoSizeW, GetFileVersionInfoW, VerQueryValueW};
use wrapperrs::Error;
use crate::agent::RequesterInfo;
use crate::config::Config;
use crate::utils::Finally;
use super::process_describers::describe;
pub trait StrExt {
fn to_utf16_null(&self) -> Vec<u16>;
}
impl StrExt for &str {
fn to_utf16_null(&self) -> Vec<u16> {
let mut v: Vec<_> = self.encode_utf16().collect();
v.push(0);
v
}
}
pub fn check_error() -> wrapperrs::Result<()> {
format_error(unsafe { GetLastError() })
}
pub fn format_error(err: u32) -> wrapperrs::Result<()> {
unsafe {
if err == 0 {
return Ok(());
}
let msg_ptr: LPSTR = null_mut();
FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
null(),
err as u32,
LANG_USER_DEFAULT as u32,
transmute(&msg_ptr),
0,
null_mut(),
);
let msg = CStr::from_ptr(msg_ptr).to_str().unwrap();
let err = wrapperrs::Error::new(&format!("(win32) {}", &msg[..msg.len() - 2]));
LocalFree(msg_ptr as *mut c_void);
Err(err.into())
}
}
pub unsafe fn close_handle(handle: *mut c_void) -> impl Drop {
Finally::new(move || { CloseHandle(handle); })
}
pub fn get_known_folder(folder_id: GUID) -> PathBuf {
unsafe {
let mut wstr: PWSTR = null_mut();
SHGetKnownFolderPath(&folder_id, 0, null_mut(), &mut wstr);
let length = (0..).into_iter()
.take_while(|i| wstr.offset(*i).read() != 0)
.count();
let str = String::from_utf16(
std::slice::from_raw_parts(wstr, length)).unwrap();
CoTaskMemFree(wstr as *mut c_void);
PathBuf::from(str)
}
}
pub unsafe fn get_executable_from_pid(pid: u32) -> wrapperrs::Result<PathBuf> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return Err(Error::new("OpenProcess").into());
};
let _close_process = close_handle(process);
let mut name = [0u16; 32 * 1024];
let length = GetModuleFileNameExW(process, null_mut(), name.as_mut_ptr(), name.len() as _);
if length == 0 {
Err(Error::new("GetModuleFileNameExW").into())
} else {
Ok(PathBuf::from(String::from_utf16(&name[..length as _]).unwrap()))
}
}
pub unsafe fn get_executable_description(exe: &Path) -> Result<String, ()> {
let exe_utf16 = exe.to_str().unwrap().to_utf16_null();
let mut handle: DWORD = 0;
let size = GetFileVersionInfoSizeW(exe_utf16.as_ptr(), &mut handle);
if size == 0 {
error!("GetFileVersionInfoSizeW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data = vec![0u8; size as _];
if GetFileVersionInfoW(exe_utf16.as_ptr(), 0, data.len() as _,
data.as_mut_ptr() as _) == 0 {
error!("GetFileVersionInfoW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data_ptr: *mut DWORD = null_mut();
let mut size: u32 = 0;
if VerQueryValueW(data.as_ptr() as _,
r"\VarFileInfo\Translation".to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _), &mut size as _) == 0 {
error!("VerQueryValueW (translation), err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let language = *data_ptr;
let lang_id = language & 0xffff;
let code_page = language >> 16 & 0xffff;
let mut data_ptr: *mut u16 = null_mut();
let mut size: u32 = 0;
let query = format!(r"\StringFileInfo\{:0>4x}{:0>4x}\FileDescription", lang_id, code_page);
if VerQueryValueW(data.as_ptr() as _, query.as_str().to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _),
&mut size as _) == 0 {
let err = GetLastError();
// 1813 - FileDescription resource type not found
if err != 1813 {
error!("VerQueryValueW (file description), err={}, exe={}, query={}", err,
exe.to_str().unwrap(), query);
}
return Err(());
};
let data: Vec<_> = (0..).step_by(2)
.map(|offset| data_ptr.offset(offset / 2).read())
.take_while(|c| *c != 0)
.collect();
Ok(String::from_utf16(&data).unwrap())
}
pub unsafe fn get_parent_pid(pid: u32) -> u32 {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return 0;
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
if NtQueryInformationProcess(process, ProcessBasicInformation, &mut info as *mut _ as _,
size_of_val(&info) as _, null_mut()) != STATUS_SUCCESS {
return 0;
}
info.InheritedFromUniqueProcessId as _
}
pub unsafe fn find_primary_window(process_id: u32) -> Option<HWND> {
struct Data {
process_id: u32,
windows: Vec<HWND>,
}
unsafe extern "system" fn window_proc(hwnd: HWND, lparam: LPARAM) -> BOOL {
let data = &mut *(lparam as *mut Data);
let mut process_id = 0;
GetWindowThreadProcessId(hwnd, &mut process_id);
if process_id == data.process_id {
data.windows.push(hwnd);
};
TRUE
}
let mut data = Data {
process_id,
windows: Vec::new(),
};
EnumWindows(Some(window_proc), &mut data as *mut _ as _);
if data.windows.is_empty() {
return None;
};
data.windows
.iter()
.find(|&&hwnd| IsWindowVisible(hwnd) == TRUE)
.or_else(|| data.windows.first())
.copied()
}
pub unsafe fn get_window_text(win: HWND) -> Result<String, ()> {
let mut title = vec![0; (GetWindowTextLengthW(win) + 1) as _];
let length = GetWindowTextW(win, title.as_mut_ptr(), title.len() as _);
if length > 0 {
Ok(String::from_utf16(&title[..length as _]).unwrap())
} else {
Err(())
}
}
pub unsafe fn get_process_command_line(pid: u32) -> Result<String, ()> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION | PROCESS_VM_READ,
FALSE, pid);
if
|
use std::path::{Path, PathBuf};
use std::ptr::{null, null_mut};
|
random_line_split
|
|
utils.rs
|
to_utf16_null(&self) -> Vec<u16>;
}
impl StrExt for &str {
fn to_utf16_null(&self) -> Vec<u16> {
let mut v: Vec<_> = self.encode_utf16().collect();
v.push(0);
v
}
}
pub fn check_error() -> wrapperrs::Result<()> {
format_error(unsafe { GetLastError() })
}
pub fn format_error(err: u32) -> wrapperrs::Result<()> {
unsafe {
if err == 0 {
return Ok(());
}
let msg_ptr: LPSTR = null_mut();
FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
null(),
err as u32,
LANG_USER_DEFAULT as u32,
transmute(&msg_ptr),
0,
null_mut(),
);
let msg = CStr::from_ptr(msg_ptr).to_str().unwrap();
let err = wrapperrs::Error::new(&format!("(win32) {}", &msg[..msg.len() - 2]));
LocalFree(msg_ptr as *mut c_void);
Err(err.into())
}
}
pub unsafe fn close_handle(handle: *mut c_void) -> impl Drop {
Finally::new(move || { CloseHandle(handle); })
}
pub fn get_known_folder(folder_id: GUID) -> PathBuf {
unsafe {
let mut wstr: PWSTR = null_mut();
SHGetKnownFolderPath(&folder_id, 0, null_mut(), &mut wstr);
let length = (0..).into_iter()
.take_while(|i| wstr.offset(*i).read() != 0)
.count();
let str = String::from_utf16(
std::slice::from_raw_parts(wstr, length)).unwrap();
CoTaskMemFree(wstr as *mut c_void);
PathBuf::from(str)
}
}
pub unsafe fn get_executable_from_pid(pid: u32) -> wrapperrs::Result<PathBuf> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return Err(Error::new("OpenProcess").into());
};
let _close_process = close_handle(process);
let mut name = [0u16; 32 * 1024];
let length = GetModuleFileNameExW(process, null_mut(), name.as_mut_ptr(), name.len() as _);
if length == 0 {
Err(Error::new("GetModuleFileNameExW").into())
} else {
Ok(PathBuf::from(String::from_utf16(&name[..length as _]).unwrap()))
}
}
pub unsafe fn get_executable_description(exe: &Path) -> Result<String, ()> {
let exe_utf16 = exe.to_str().unwrap().to_utf16_null();
let mut handle: DWORD = 0;
let size = GetFileVersionInfoSizeW(exe_utf16.as_ptr(), &mut handle);
if size == 0 {
error!("GetFileVersionInfoSizeW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data = vec![0u8; size as _];
if GetFileVersionInfoW(exe_utf16.as_ptr(), 0, data.len() as _,
data.as_mut_ptr() as _) == 0 {
error!("GetFileVersionInfoW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data_ptr: *mut DWORD = null_mut();
let mut size: u32 = 0;
if VerQueryValueW(data.as_ptr() as _,
r"\VarFileInfo\Translation".to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _), &mut size as _) == 0 {
error!("VerQueryValueW (translation), err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let language = *data_ptr;
let lang_id = language & 0xffff;
let code_page = language >> 16 & 0xffff;
let mut data_ptr: *mut u16 = null_mut();
let mut size: u32 = 0;
let query = format!(r"\StringFileInfo\{:0>4x}{:0>4x}\FileDescription", lang_id, code_page);
if VerQueryValueW(data.as_ptr() as _, query.as_str().to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _),
&mut size as _) == 0 {
let err = GetLastError();
// 1813 - FileDescription resource type not found
if err != 1813 {
error!("VerQueryValueW (file description), err={}, exe={}, query={}", err,
exe.to_str().unwrap(), query);
}
return Err(());
};
let data: Vec<_> = (0..).step_by(2)
.map(|offset| data_ptr.offset(offset / 2).read())
.take_while(|c| *c != 0)
.collect();
Ok(String::from_utf16(&data).unwrap())
}
pub unsafe fn get_parent_pid(pid: u32) -> u32 {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return 0;
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
if NtQueryInformationProcess(process, ProcessBasicInformation, &mut info as *mut _ as _,
size_of_val(&info) as _, null_mut()) != STATUS_SUCCESS {
return 0;
}
info.InheritedFromUniqueProcessId as _
}
pub unsafe fn find_primary_window(process_id: u32) -> Option<HWND> {
struct Data {
process_id: u32,
windows: Vec<HWND>,
}
unsafe extern "system" fn window_proc(hwnd: HWND, lparam: LPARAM) -> BOOL {
let data = &mut *(lparam as *mut Data);
let mut process_id = 0;
GetWindowThreadProcessId(hwnd, &mut process_id);
if process_id == data.process_id {
data.windows.push(hwnd);
};
TRUE
}
let mut data = Data {
process_id,
windows: Vec::new(),
};
EnumWindows(Some(window_proc), &mut data as *mut _ as _);
if data.windows.is_empty() {
return None;
};
data.windows
.iter()
.find(|&&hwnd| IsWindowVisible(hwnd) == TRUE)
.or_else(|| data.windows.first())
.copied()
}
pub unsafe fn get_window_text(win: HWND) -> Result<String, ()> {
let mut title = vec![0; (GetWindowTextLengthW(win) + 1) as _];
let length = GetWindowTextW(win, title.as_mut_ptr(), title.len() as _);
if length > 0 {
Ok(String::from_utf16(&title[..length as _]).unwrap())
} else {
Err(())
}
}
pub unsafe fn get_process_command_line(pid: u32) -> Result<String, ()> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION | PROCESS_VM_READ,
FALSE, pid);
if process == null_mut() {
return Err(());
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
let res = NtQueryInformationProcess(process, ProcessBasicInformation,
&mut info as *mut _ as _,
size_of_val(&info) as u32, null_mut());
if res != STATUS_SUCCESS {
return Err(());
}
unsafe fn read_process<T>(process: HANDLE, addr: *mut c_void) -> std::result::Result<T, ()> {
let mut dst: T = MaybeUninit::zeroed().assume_init();
if ReadProcessMemory(process, addr, &mut dst as *mut _ as _, size_of_val(&dst),
null_mut()) == 0 {
dbg!(GetLastError());
Err(())
} else {
Ok(dst)
}
}
unsafe fn read_process_unicode_string(process: HANDLE, s: UNICODE_STRING)
-> std::result::Result<String, ()> {
let mut buffer = vec![0u16; (s.Length / 2) as _];
if ReadProcessMemory(process, s.Buffer as _, buffer.as_mut_ptr() as _,
s.Length as _, null_mut()) == 0 {
dbg!(GetLastError());
return Err(());
}
Ok(String::from_utf16(&buffer).unwrap())
}
if let Ok(command_line) = (|| -> std::result::Result<_, ()> {
let peb: PEB = read_process(process, info.PebBaseAddress as _)?;
let parameters: RTL_USER_PROCESS_PARAMETERS = read_process(process,
peb.ProcessParameters as _)?;
read_process_unicode_string(process, parameters.CommandLine)
})() {
Ok(command_line)
} else {
Err(())
}
}
pub unsafe fn
|
collect_requester_info
|
identifier_name
|
|
mod.rs
|
}
// One of two endpoints for a control channel with a connector on either end.
// The underlying transport is TCP, so we use an inbox buffer to allow
// discrete payload receipt.
struct NetEndpoint {
inbox: Vec<u8>,
stream: TcpStream,
}
// Datastructure used during the setup phase representing a NetEndpoint TO BE SETUP
#[derive(Debug, Clone)]
struct NetEndpointSetup {
getter_for_incoming: PortId,
sock_addr: SocketAddr,
endpoint_polarity: EndpointPolarity,
}
// Datastructure used during the setup phase representing a UdpEndpoint TO BE SETUP
#[derive(Debug, Clone)]
struct UdpEndpointSetup {
getter_for_incoming: PortId,
local_addr: SocketAddr,
peer_addr: SocketAddr,
}
// NetEndpoint annotated with the ID of the port that receives payload
// messages received through the endpoint. This approach assumes that NetEndpoints
// DO NOT multiplex port->port channels, and so a mapping such as this is possible.
// As a result, the messages themselves don't need to carry the PortID with them.
#[derive(Debug)]
struct NetEndpointExt {
net_endpoint: NetEndpoint,
getter_for_incoming: PortId,
}
// Endpoint for a "raw" UDP endpoint. Corresponds to the "Udp Mediator Component"
// described in the literature.
// It acts as an endpoint by receiving messages via the poller etc. (managed by EndpointManager),
// It acts as a native component by managing a (speculative) set of payload messages (an outbox,
// protecting the peer on the other side of the network).
#[derive(Debug)]
struct UdpEndpointExt {
sock: UdpSocket, // already bound and connected
received_this_round: bool,
outgoing_payloads: HashMap<Predicate, Payload>,
getter_for_incoming: PortId,
}
// Meta-data for the connector: its role in the consensus tree.
#[derive(Debug)]
struct Neighborhood {
parent: Option<usize>,
children: VecSet<usize>,
}
// Manages the connector's ID, and manages allocations for connector/port IDs.
#[derive(Debug, Clone)]
struct IdManager {
connector_id: ConnectorId,
port_suffix_stream: U32Stream,
component_suffix_stream: U32Stream,
}
// Newtype wrapper around a byte buffer, used for UDP mediators to receive incoming datagrams.
struct IoByteBuffer {
byte_vec: Vec<u8>,
}
// A generator of speculative variables. Created on-demand during the synchronous round
// by the IdManager.
#[derive(Debug)]
struct SpecVarStream {
connector_id: ConnectorId,
port_suffix_stream: U32Stream,
}
// Manages the messy state of the various endpoints, pollers, buffers, etc.
#[derive(Debug)]
struct EndpointManager {
// invariants:
// 1. net and udp endpoints are registered with poll with tokens computed with TargetToken::into
// 2. Events is empty
poll: Poll,
events: Events,
delayed_messages: Vec<(usize, Msg)>,
undelayed_messages: Vec<(usize, Msg)>, // ready to yield
net_endpoint_store: EndpointStore<NetEndpointExt>,
udp_endpoint_store: EndpointStore<UdpEndpointExt>,
io_byte_buffer: IoByteBuffer,
}
// A storage of endpoints, which keeps track of which components have raised
// an event during poll(), signifying that they need to be checked for new incoming data
#[derive(Debug)]
struct EndpointStore<T> {
endpoint_exts: Vec<T>,
polled_undrained: VecSet<usize>,
}
// The information associated with a port identifier, designed for local storage.
#[derive(Clone, Debug)]
struct PortInfo {
owner: ComponentId,
peer: Option<PortId>,
polarity: Polarity,
route: Route,
}
// Similar to `PortInfo`, but designed for communication during the setup procedure.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct MyPortInfo {
polarity: Polarity,
port: PortId,
owner: ComponentId,
}
// Newtype around port info map, allowing the implementation of some
// useful methods
#[derive(Default, Debug, Clone)]
struct PortInfoMap {
// invariant: self.invariant_preserved()
// `owned` is redundant information, allowing for fast lookup
// of a component's owned ports (which occurs during the sync round a lot)
map: HashMap<PortId, PortInfo>,
owned: HashMap<ComponentId, HashSet<PortId>>,
}
// A convenient substructure for containing port info and the ID manager.
// Houses the bulk of the connector's persistent state between rounds.
// It turns out several situations require access to both things.
#[derive(Debug, Clone)]
struct IdAndPortState {
port_info: PortInfoMap,
id_manager: IdManager,
}
// A component's setup-phase-specific data
#[derive(Debug)]
struct ConnectorCommunication {
round_index: usize,
endpoint_manager: EndpointManager,
neighborhood: Neighborhood,
native_batches: Vec<NativeBatch>,
round_result: Result<Option<RoundEndedNative>, SyncError>,
}
// A component's data common to both setup and communication phases
#[derive(Debug)]
struct ConnectorUnphased {
proto_description: Arc<ProtocolDescription>,
proto_components: HashMap<ComponentId, ComponentState>,
logger: Box<dyn Logger>,
ips: IdAndPortState,
native_component_id: ComponentId,
}
// A connector's phase-specific data
#[derive(Debug)]
enum ConnectorPhased {
Setup(Box<ConnectorSetup>),
Communication(Box<ConnectorCommunication>),
}
// A connector's setup-phase-specific data
#[derive(Debug)]
struct ConnectorSetup {
net_endpoint_setups: Vec<NetEndpointSetup>,
udp_endpoint_setups: Vec<UdpEndpointSetup>,
}
// A newtype wrapper for a map from speculative variable to speculative value
// A missing mapping corresponds with "unspecified".
#[derive(Default, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
struct Predicate {
assigned: BTreeMap<SpecVar, SpecVal>,
}
// Identifies a child of this connector in the _solution tree_.
// Each connector creates its own local solutions for the consensus procedure during `sync`,
// from the solutions of its children. Those children are either locally-managed components,
// (which are leaves in the solution tree), or other connectors reachable through the given
// network endpoint (which are internal nodes in the solution tree).
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
enum SubtreeId {
LocalComponent(ComponentId),
NetEndpoint { index: usize },
}
// An accumulation of the connector's knowledge of all (a) the local solutions its children
// in the solution tree have found, and (b) its own solutions derivable from those of its children.
// This structure starts off each round with an empty set, and accumulates solutions as they are found
// by local components, or received over the network in control messages.
// IMPORTANT: solutions, once found, don't go away until the end of the round. That is to
// say that these sets GROW until the round is over, and all solutions are reset.
#[derive(Debug)]
struct SolutionStorage {
// invariant: old_local U new_local solutions are those that can be created from
// the UNION of one element from each set in `subtree_solution`.
// invariant is maintained by potentially populating new_local whenever subtree_solutions is populated.
old_local: HashSet<Predicate>, // already sent to this connector's parent OR decided
new_local: HashSet<Predicate>, // not yet sent to this connector's parent OR decided
// this pair acts as SubtreeId -> HashSet<Predicate> which is friendlier to iteration
subtree_solutions: Vec<HashSet<Predicate>>,
subtree_id_to_index: HashMap<SubtreeId, usize>,
}
// Stores the transient data of a synchronous round.
// Some of it is for bookkeeping, and the rest is a temporary mirror of fields of
// `ConnectorUnphased`, such that any changes are safely contained within RoundCtx,
// and can be undone if the round fails.
struct RoundCtx {
solution_storage: SolutionStorage,
spec_var_stream: SpecVarStream,
payload_inbox: Vec<(PortId, SendPayloadMsg)>,
deadline: Option<Instant>,
ips: IdAndPortState,
}
// A trait intended to limit the access of the ConnectorUnphased structure
// such that we don't accidentally modify any important component/port data
// while the results of the round are undecided. Why? Any actions during Connector::sync
// are _speculative_ until the round is decided, and we need a safe way of rolling
// back any changes.
trait CuUndecided {
fn logger(&mut self) -> &mut dyn Logger;
fn proto_description(&self) -> &ProtocolDescription;
fn native_component_id(&self) -> ComponentId;
fn logger_and_protocol_description(&mut self) -> (&mut dyn Logger, &ProtocolDescription);
fn logger_and_protocol_components(
&mut self,
) -> (&mut dyn Logger, &mut HashMap<ComponentId, ComponentState>);
}
// Represents a set of synchronous port operations that the native component
// has described as an "option" for completing during the synchronous rounds.
// Operations contained here succeed together or not at all.
// A native with N=2+ batches are expressing an N-way nondeterministic choice
#[derive(Debug, Default)]
struct NativeBatch {
// invariant: putters' and getters' polarities respected
to_put: HashMap<PortId,
|
Equivalent,
New(Predicate),
Nonexistant,
|
random_line_split
|
|
mod.rs
|
(element).is_ok()
}
// Insert the given element. Returns whether it was already present.
fn insert(&mut self, element: T) -> bool {
match self.vec.binary_search(&element) {
Ok(_) => false,
Err(index) => {
self.vec.insert(index, element);
true
}
}
}
fn iter(&self) -> std::slice::Iter<T> {
self.vec.iter()
}
fn pop(&mut self) -> Option<T> {
self.vec.pop()
}
}
impl PortInfoMap {
fn ports_owned_by(&self, owner: ComponentId) -> impl Iterator<Item = &PortId> {
self.owned.get(&owner).into_iter().flat_map(HashSet::iter)
}
fn spec_var_for(&self, port: PortId) -> SpecVar {
// Every port maps to a speculative variable
// Two distinct ports map to the same variable
// IFF they are two ends of the same logical channel.
let info = self.map.get(&port).unwrap();
SpecVar(match info.polarity {
Getter => port,
Putter => info.peer.unwrap(),
})
}
fn invariant_preserved(&self) -> bool {
// for every port P with some owner O,
// P is in O's owned set
for (port, info) in self.map.iter() {
match self.owned.get(&info.owner) {
Some(set) if set.contains(port) => {}
_ => {
println!("{:#?}\n WITH port {:?}", self, port);
return false;
}
}
}
// for every port P owned by every owner O,
// P's owner is O
for (&owner, set) in self.owned.iter() {
for port in set {
match self.map.get(port) {
Some(info) if info.owner == owner => {}
_ => {
println!("{:#?}\n WITH owner {:?} port {:?}", self, owner, port);
return false;
}
}
}
}
true
}
}
impl SpecVarStream {
fn next(&mut self) -> SpecVar {
let phantom_port: PortId =
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }
.into();
SpecVar(phantom_port)
}
}
impl IdManager {
fn new(connector_id: ConnectorId) -> Self {
Self {
connector_id,
port_suffix_stream: Default::default(),
component_suffix_stream: Default::default(),
}
}
fn new_spec_var_stream(&self) -> SpecVarStream {
// Spec var stream starts where the current port_id stream ends, with gap of SKIP_N.
// This gap is entirely unnecessary (i.e. 0 is fine)
// It's purpose is only to make SpecVars easier to spot in logs.
// E.g. spot the spec var: { v0_0, v1_2, v1_103 }
const SKIP_N: u32 = 100;
let port_suffix_stream = self.port_suffix_stream.clone().n_skipped(SKIP_N);
SpecVarStream { connector_id: self.connector_id, port_suffix_stream }
}
fn new_port_id(&mut self) -> PortId {
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }.into()
}
fn new_component_id(&mut self) -> ComponentId {
Id { connector_id: self.connector_id, u32_suffix: self.component_suffix_stream.next() }
.into()
}
}
impl Drop for Connector {
fn drop(&mut self) {
log!(self.unphased.logger(), "Connector dropping. Goodbye!");
}
}
// Given a slice of ports, return the first, if any, port is present repeatedly
fn duplicate_port(slice: &[PortId]) -> Option<PortId> {
let mut vec = Vec::with_capacity(slice.len());
for port in slice.iter() {
match vec.binary_search(port) {
Err(index) => vec.insert(index, *port),
Ok(_) => return Some(*port),
}
}
None
}
impl Connector {
/// Generate a random connector identifier from the system's source of randomness.
pub fn random_id() -> ConnectorId {
type Bytes8 = [u8; std::mem::size_of::<ConnectorId>()];
unsafe {
let mut bytes = std::mem::MaybeUninit::<Bytes8>::uninit();
// getrandom is the canonical crate for a small, secure rng
getrandom::getrandom(&mut *bytes.as_mut_ptr()).unwrap();
// safe! representations of all valid Byte8 values are valid ConnectorId values
std::mem::transmute::<_, _>(bytes.assume_init())
}
}
/// Returns true iff the connector is in connected state, i.e., it's setup phase is complete,
/// and it is ready to participate in synchronous rounds of communication.
pub fn is_connected(&self) -> bool {
// If designed for Rust usage, connectors would be exposed as an enum type from the start.
// consequently, this "phased" business would also include connector variants and this would
// get a lot closer to the connector impl. itself.
// Instead, the C-oriented implementation doesn't distinguish connector states as types,
// and distinguish them as enum variants instead
match self.phased {
ConnectorPhased::Setup(..) => false,
ConnectorPhased::Communication(..) => true,
}
}
/// Enables the connector's current logger to be swapped out for another
pub fn swap_logger(&mut self, mut new_logger: Box<dyn Logger>) -> Box<dyn Logger> {
std::mem::swap(&mut self.unphased.logger, &mut new_logger);
new_logger
}
/// Access the connector's current logger
pub fn get_logger(&mut self) -> &mut dyn Logger {
&mut *self.unphased.logger
}
/// Create a new synchronous channel, returning its ends as a pair of ports,
/// with polarity output, input respectively. Available during either setup/communication phase.
/// # Panics
/// This function panics if the connector's (large) port id space is exhausted.
pub fn new_port_pair(&mut self) -> [PortId; 2] {
let cu = &mut self.unphased;
// adds two new associated ports, related to each other, and exposed to the native
let mut new_cid = || cu.ips.id_manager.new_port_id();
// allocate two fresh port identifiers
let [o, i] = [new_cid(), new_cid()];
// store info for each:
// - they are each others' peers
// - they are owned by a local component with id `cid`
// - polarity putter, getter respectively
cu.ips.port_info.map.insert(
o,
PortInfo {
route: Route::LocalComponent,
peer: Some(i),
owner: cu.native_component_id,
polarity: Putter,
},
);
cu.ips.port_info.map.insert(
i,
PortInfo {
route: Route::LocalComponent,
peer: Some(o),
owner: cu.native_component_id,
polarity: Getter,
},
);
cu.ips
.port_info
.owned
.entry(cu.native_component_id)
.or_default()
.extend([o, i].iter().copied());
log!(cu.logger, "Added port pair (out->in) {:?} -> {:?}", o, i);
[o, i]
}
/// Instantiates a new component for the connector runtime to manage, and passing
/// the given set of ports from the interface of the native component, to that of the
/// newly created component (passing their ownership).
/// # Errors
/// Error is returned if the moved ports are not owned by the native component,
/// if the given component name is not defined in the connector's protocol,
/// the given sequence of ports contains a duplicate port,
/// or if the component is unfit for instantiation with the given port sequence.
/// # Panics
/// This function panics if the connector's (large) component id space is exhausted.
pub fn add_component(
&mut self,
module_name: &[u8],
identifier: &[u8],
ports: &[PortId],
) -> Result<(), AddComponentError> {
// Check for error cases first before modifying `cu`
use AddComponentError as Ace;
let cu = &self.unphased;
if let Some(port) = duplicate_port(ports) {
return Err(Ace::DuplicatePort(port));
}
let expected_polarities = cu.proto_description.component_polarities(module_name, identifier)?;
if expected_polarities.len() != ports.len() {
return Err(Ace::WrongNumberOfParamaters { expected: expected_polarities.len() });
}
for (&expected_polarity, &port) in expected_polarities.iter().zip(ports.iter()) {
let info = cu.ips.port_info.map.get(&port).ok_or(Ace::UnknownPort(port))?;
if info.owner != cu.native_component_id {
return Err(Ace::UnknownPort(port));
}
if info.polarity != expected_polarity
|
{
return Err(Ace::WrongPortPolarity { port, expected_polarity });
}
|
conditional_block
|
|
mod.rs
|
a duplicate port,
/// or if the component is unfit for instantiation with the given port sequence.
/// # Panics
/// This function panics if the connector's (large) component id space is exhausted.
pub fn add_component(
&mut self,
module_name: &[u8],
identifier: &[u8],
ports: &[PortId],
) -> Result<(), AddComponentError> {
// Check for error cases first before modifying `cu`
use AddComponentError as Ace;
let cu = &self.unphased;
if let Some(port) = duplicate_port(ports) {
return Err(Ace::DuplicatePort(port));
}
let expected_polarities = cu.proto_description.component_polarities(module_name, identifier)?;
if expected_polarities.len() != ports.len() {
return Err(Ace::WrongNumberOfParamaters { expected: expected_polarities.len() });
}
for (&expected_polarity, &port) in expected_polarities.iter().zip(ports.iter()) {
let info = cu.ips.port_info.map.get(&port).ok_or(Ace::UnknownPort(port))?;
if info.owner != cu.native_component_id {
return Err(Ace::UnknownPort(port));
}
if info.polarity != expected_polarity {
return Err(Ace::WrongPortPolarity { port, expected_polarity });
}
}
// No errors! Time to modify `cu`
// create a new component and identifier
let Connector { phased, unphased: cu } = self;
let new_cid = cu.ips.id_manager.new_component_id();
cu.proto_components.insert(new_cid, cu.proto_description.new_component(module_name, identifier, ports));
// update the ownership of moved ports
for port in ports.iter() {
match cu.ips.port_info.map.get_mut(port) {
Some(port_info) => port_info.owner = new_cid,
None => unreachable!(),
}
}
if let Some(set) = cu.ips.port_info.owned.get_mut(&cu.native_component_id) {
set.retain(|x| !ports.contains(x));
}
let moved_port_set: HashSet<PortId> = ports.iter().copied().collect();
if let ConnectorPhased::Communication(comm) = phased {
// Preserve invariant: batches only reason about native's ports.
// Remove batch puts/gets for moved ports.
for batch in comm.native_batches.iter_mut() {
batch.to_put.retain(|port, _| !moved_port_set.contains(port));
batch.to_get.retain(|port| !moved_port_set.contains(port));
}
}
cu.ips.port_info.owned.insert(new_cid, moved_port_set);
Ok(())
}
}
impl Predicate {
#[inline]
pub fn singleton(k: SpecVar, v: SpecVal) -> Self {
Self::default().inserted(k, v)
}
#[inline]
pub fn inserted(mut self, k: SpecVar, v: SpecVal) -> Self {
self.assigned.insert(k, v);
self
}
// Return true whether `self` is a subset of `maybe_superset`
pub fn assigns_subset(&self, maybe_superset: &Self) -> bool {
for (var, val) in self.assigned.iter() {
match maybe_superset.assigned.get(var) {
Some(val2) if val2 == val => {}
_ => return false, // var unmapped, or mapped differently
}
}
// `maybe_superset` mirrored all my assignments!
true
}
/// Given the two predicates {self, other}, return that whose
/// assignments are the union of those of both.
fn assignment_union(&self, other: &Self) -> AssignmentUnionResult {
use AssignmentUnionResult as Aur;
// iterators over assignments of both predicates. Rely on SORTED ordering of BTreeMap's keys.
let [mut s_it, mut o_it] = [self.assigned.iter(), other.assigned.iter()];
let [mut s, mut o] = [s_it.next(), o_it.next()];
// populate lists of assignments in self but not other and vice versa.
// do this by incrementally unfolding the iterators, keeping an eye
// on the ordering between the head elements [s, o].
// whenever s<o, other is certainly missing element 's', etc.
let [mut s_not_o, mut o_not_s] = [vec![], vec![]];
loop {
match [s, o] {
[None, None] => break, // both iterators are empty
[None, Some(x)] => {
// self's iterator is empty.
// all remaning elements are in other but not self
o_not_s.push(x);
o_not_s.extend(o_it);
break;
}
[Some(x), None] => {
// other's iterator is empty.
// all remaning elements are in self but not other
s_not_o.push(x);
s_not_o.extend(s_it);
break;
}
[Some((sid, sb)), Some((oid, ob))] => {
if sid < oid {
// o is missing this element
s_not_o.push((sid, sb));
s = s_it.next();
} else if sid > oid {
// s is missing this element
o_not_s.push((oid, ob));
o = o_it.next();
} else if sb != ob {
assert_eq!(sid, oid);
// both predicates assign the variable but differ on the value
// No predicate exists which satisfies both!
return Aur::Nonexistant;
} else {
// both predicates assign the variable to the same value
s = s_it.next();
o = o_it.next();
}
}
}
}
// Observed zero inconsistencies. A unified predicate exists...
match [s_not_o.is_empty(), o_not_s.is_empty()] {
[true, true] => Aur::Equivalent, // ... equivalent to both.
[false, true] => Aur::FormerNotLatter, // ... equivalent to self.
[true, false] => Aur::LatterNotFormer, // ... equivalent to other.
[false, false] => {
// ... which is the union of the predicates' assignments but
// is equivalent to neither self nor other.
let mut new = self.clone();
for (&id, &b) in o_not_s {
new.assigned.insert(id, b);
}
Aur::New(new)
}
}
}
// Compute the union of the assignments of the two given predicates, if it exists.
// It doesn't exist if there is some value which the predicates assign to different values.
pub(crate) fn union_with(&self, other: &Self) -> Option<Self> {
let mut res = self.clone();
for (&channel_id, &assignment_1) in other.assigned.iter() {
match res.assigned.insert(channel_id, assignment_1) {
Some(assignment_2) if assignment_1 != assignment_2 => return None,
_ => {}
}
}
Some(res)
}
pub(crate) fn query(&self, var: SpecVar) -> Option<SpecVal> {
self.assigned.get(&var).copied()
}
}
impl RoundCtx {
// remove an arbitrary buffered message, along with the ID of the getter who receives it
fn getter_pop(&mut self) -> Option<(PortId, SendPayloadMsg)> {
self.payload_inbox.pop()
}
// buffer a message along with the ID of the getter who receives it
fn getter_push(&mut self, getter: PortId, msg: SendPayloadMsg) {
self.payload_inbox.push((getter, msg));
}
// buffer a message along with the ID of the putter who sent it
fn putter_push(&mut self, cu: &mut impl CuUndecided, putter: PortId, msg: SendPayloadMsg) {
if let Some(getter) = self.ips.port_info.map.get(&putter).unwrap().peer {
log!(cu.logger(), "Putter add (putter:{:?} => getter:{:?})", putter, getter);
self.getter_push(getter, msg);
} else {
log!(cu.logger(), "Putter {:?} has no known peer!", putter);
panic!("Putter {:?} has no known peer!", putter);
}
}
}
impl<T: Debug + std::cmp::Ord> Debug for VecSet<T> {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
f.debug_set().entries(self.vec.iter()).finish()
}
}
impl Debug for Predicate {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
struct Assignment<'a>((&'a SpecVar, &'a SpecVal));
impl Debug for Assignment<'_> {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(f, "{:?}={:?}", (self.0).0, (self.0).1)
}
}
f.debug_set().entries(self.assigned.iter().map(Assignment)).finish()
}
}
impl IdParts for SpecVar {
fn id_parts(self) -> (ConnectorId, U32Suffix) {
self.0.id_parts()
}
}
impl Debug for SpecVar {
fn
|
fmt
|
identifier_name
|
|
mod.rs
|
}
// Similar to `PortInfo`, but designed for communication during the setup procedure.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct MyPortInfo {
polarity: Polarity,
port: PortId,
owner: ComponentId,
}
// Newtype around port info map, allowing the implementation of some
// useful methods
#[derive(Default, Debug, Clone)]
struct PortInfoMap {
// invariant: self.invariant_preserved()
// `owned` is redundant information, allowing for fast lookup
// of a component's owned ports (which occurs during the sync round a lot)
map: HashMap<PortId, PortInfo>,
owned: HashMap<ComponentId, HashSet<PortId>>,
}
// A convenient substructure for containing port info and the ID manager.
// Houses the bulk of the connector's persistent state between rounds.
// It turns out several situations require access to both things.
#[derive(Debug, Clone)]
struct IdAndPortState {
port_info: PortInfoMap,
id_manager: IdManager,
}
// A component's setup-phase-specific data
#[derive(Debug)]
struct ConnectorCommunication {
round_index: usize,
endpoint_manager: EndpointManager,
neighborhood: Neighborhood,
native_batches: Vec<NativeBatch>,
round_result: Result<Option<RoundEndedNative>, SyncError>,
}
// A component's data common to both setup and communication phases
#[derive(Debug)]
struct ConnectorUnphased {
proto_description: Arc<ProtocolDescription>,
proto_components: HashMap<ComponentId, ComponentState>,
logger: Box<dyn Logger>,
ips: IdAndPortState,
native_component_id: ComponentId,
}
// A connector's phase-specific data
#[derive(Debug)]
enum ConnectorPhased {
Setup(Box<ConnectorSetup>),
Communication(Box<ConnectorCommunication>),
}
// A connector's setup-phase-specific data
#[derive(Debug)]
struct ConnectorSetup {
net_endpoint_setups: Vec<NetEndpointSetup>,
udp_endpoint_setups: Vec<UdpEndpointSetup>,
}
// A newtype wrapper for a map from speculative variable to speculative value
// A missing mapping corresponds with "unspecified".
#[derive(Default, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
struct Predicate {
assigned: BTreeMap<SpecVar, SpecVal>,
}
// Identifies a child of this connector in the _solution tree_.
// Each connector creates its own local solutions for the consensus procedure during `sync`,
// from the solutions of its children. Those children are either locally-managed components,
// (which are leaves in the solution tree), or other connectors reachable through the given
// network endpoint (which are internal nodes in the solution tree).
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
enum SubtreeId {
LocalComponent(ComponentId),
NetEndpoint { index: usize },
}
// An accumulation of the connector's knowledge of all (a) the local solutions its children
// in the solution tree have found, and (b) its own solutions derivable from those of its children.
// This structure starts off each round with an empty set, and accumulates solutions as they are found
// by local components, or received over the network in control messages.
// IMPORTANT: solutions, once found, don't go away until the end of the round. That is to
// say that these sets GROW until the round is over, and all solutions are reset.
#[derive(Debug)]
struct SolutionStorage {
// invariant: old_local U new_local solutions are those that can be created from
// the UNION of one element from each set in `subtree_solution`.
// invariant is maintained by potentially populating new_local whenever subtree_solutions is populated.
old_local: HashSet<Predicate>, // already sent to this connector's parent OR decided
new_local: HashSet<Predicate>, // not yet sent to this connector's parent OR decided
// this pair acts as SubtreeId -> HashSet<Predicate> which is friendlier to iteration
subtree_solutions: Vec<HashSet<Predicate>>,
subtree_id_to_index: HashMap<SubtreeId, usize>,
}
// Stores the transient data of a synchronous round.
// Some of it is for bookkeeping, and the rest is a temporary mirror of fields of
// `ConnectorUnphased`, such that any changes are safely contained within RoundCtx,
// and can be undone if the round fails.
struct RoundCtx {
solution_storage: SolutionStorage,
spec_var_stream: SpecVarStream,
payload_inbox: Vec<(PortId, SendPayloadMsg)>,
deadline: Option<Instant>,
ips: IdAndPortState,
}
// A trait intended to limit the access of the ConnectorUnphased structure
// such that we don't accidentally modify any important component/port data
// while the results of the round are undecided. Why? Any actions during Connector::sync
// are _speculative_ until the round is decided, and we need a safe way of rolling
// back any changes.
trait CuUndecided {
fn logger(&mut self) -> &mut dyn Logger;
fn proto_description(&self) -> &ProtocolDescription;
fn native_component_id(&self) -> ComponentId;
fn logger_and_protocol_description(&mut self) -> (&mut dyn Logger, &ProtocolDescription);
fn logger_and_protocol_components(
&mut self,
) -> (&mut dyn Logger, &mut HashMap<ComponentId, ComponentState>);
}
// Represents a set of synchronous port operations that the native component
// has described as an "option" for completing during the synchronous rounds.
// Operations contained here succeed together or not at all.
// A native with N=2+ batches are expressing an N-way nondeterministic choice
#[derive(Debug, Default)]
struct NativeBatch {
// invariant: putters' and getters' polarities respected
to_put: HashMap<PortId, Payload>,
to_get: HashSet<PortId>,
}
// Parallels a mio::Token type, but more clearly communicates
// the way it identifies the evented structre it corresponds to.
// See runtime/setup for methods converting between TokenTarget and mio::Token
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TokenTarget {
NetEndpoint { index: usize },
UdpEndpoint { index: usize },
}
// Returned by the endpoint manager as a result of comm_recv, telling the connector what happened,
// such that it can know when to continue polling, and when to block.
enum CommRecvOk {
TimeoutWithoutNew,
NewPayloadMsgs,
NewControlMsg { net_index: usize, msg: CommCtrlMsg },
}
////////////////
fn err_would_block(err: &std::io::Error) -> bool {
err.kind() == std::io::ErrorKind::WouldBlock
}
impl<T: std::cmp::Ord> VecSet<T> {
fn new(mut vec: Vec<T>) -> Self {
// establish the invariant
vec.sort();
vec.dedup();
Self { vec }
}
fn contains(&self, element: &T) -> bool {
self.vec.binary_search(element).is_ok()
}
// Insert the given element. Returns whether it was already present.
fn insert(&mut self, element: T) -> bool {
match self.vec.binary_search(&element) {
Ok(_) => false,
Err(index) => {
self.vec.insert(index, element);
true
}
}
}
fn iter(&self) -> std::slice::Iter<T> {
self.vec.iter()
}
fn pop(&mut self) -> Option<T> {
self.vec.pop()
}
}
impl PortInfoMap {
fn ports_owned_by(&self, owner: ComponentId) -> impl Iterator<Item = &PortId> {
self.owned.get(&owner).into_iter().flat_map(HashSet::iter)
}
fn spec_var_for(&self, port: PortId) -> SpecVar {
// Every port maps to a speculative variable
// Two distinct ports map to the same variable
// IFF they are two ends of the same logical channel.
let info = self.map.get(&port).unwrap();
SpecVar(match info.polarity {
Getter => port,
Putter => info.peer.unwrap(),
})
}
fn invariant_preserved(&self) -> bool {
// for every port P with some owner O,
// P is in O's owned set
for (port, info) in self.map.iter() {
match self.owned.get(&info.owner) {
Some(set) if set.contains(port) => {}
_ => {
println!("{:#?}\n WITH port {:?}", self, port);
return false;
}
}
}
// for every port P owned by every owner O,
// P's owner is O
for (&owner, set) in self.owned.iter() {
for port in set {
match self.map.get(port) {
Some(info) if info.owner == owner => {}
_ => {
println!("{:#?}\n WITH owner {:?} port {:?}", self, owner, port);
return false;
}
}
}
}
true
}
}
impl SpecVarStream {
fn next(&mut self) -> SpecVar {
let phantom_port: PortId =
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }
.into();
SpecVar(phantom_port)
}
}
impl IdManager {
fn new(connector_id: ConnectorId) -> Self
|
{
Self {
connector_id,
port_suffix_stream: Default::default(),
component_suffix_stream: Default::default(),
}
}
|
identifier_body
|
|
zkdevice.py
|
sys
import telnetlib
def get_server_ip(device_ip):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((device_ip, 80))
return s.getsockname()[0]
def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):
'''
Transfer file from from_ip to to_ip via telnet.
Use ftpput and ftpget.
'''
# ====FTP Server====
try:
import pyftpdlib
except ImportError:
import pip
pip.main('install pyftpdlib'.split())
# start pyftpdlib FTP server: anonymous with write permission, port 2121
ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])
print('Server started')
filename = os.path.basename(remote_file_path)
s = telnetlib.Telnet(DEVICE_IP)
print(s.read_until(b'login: ').decode())
s.write(b'root \n')
print(s.read_until(b'Password: ').decode())
s.write(b'solokey\n')
if s.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip,
filename,
remote_file_path),
'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
# stop pyftpdlib FTP server
ftp_server.kill()
print('Server killed')
def generate_verify_time(status='in', late=False):
'''
Generate normal verify time based on status `in` or `out`
`in` time will be random 10 mins before 8:00
`out` time will be random 10 mins after 17:00
'''
if status == 'in':
status = 0
if not late:
hour = 7
minute = random.randint(50, 59)
else:
hour = 8
minute = random.randint(15, 20)
elif status == 'out':
status = 1
hour = 17
minute = random.randint(0, 10)
else:
raise ValueError('status must be `in` or `out`')
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
return time
def add_log(uid, date, status, late=False):
'''
Edit ZKDB.db file, ATT_LOG table,
insert a row which represents a check in/out log
uid: User PIN
date: follow format: dd/mm/yyyy - 14/01/2017
status: 'in' is checking in, 'out' is checking out
'''
# verify_type: 0 is password, 1 is fingerprint
verify_type = 1
if status == 'in':
status = 0
time = generate_verify_time('in', late=late)
elif status == 'out':
status = 1
time = generate_verify_time('out')
else:
raise ValueError('status must be `in` or `out`')
date = datetime.datetime.strptime(date, '%d/%m/%Y')
combined = datetime.datetime.combine(date, time)
verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)
with sqlite3.connect(DB) as conn:
query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, '
'Status, Work_Code_ID, SEND_FLAG) '
'VALUES ({}, {}, "{}", {}, 0, 0)').format(uid, verify_type,
verify_time, status,
0, 0)
cur = conn.execute(query)
cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')
r = cur.fetchone()
print_log(r, uid, verify_type, verify_time, status)
def add_logs(uid, start, end, status, late=False):
start_date = datetime.datetime.strptime(start, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end, '%d/%m/%Y')
day_count = end_date - start_date
day_count = day_count.days + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date)
add_log(uid, date, status, late)
def delete_log(log_id):
'''
Delete a log row with ID=log_id
'''
with sqlite3.connect(DB) as conn:
query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id))
conn.execute(query)
print('Deleted log {}'.format(log_id))
def get_logs(uid, start_date, end_date):
'''
Returns logs of 'uid' from 'start_date' to 'end_date'
uid: User PIN
start_date: follow format 14/01/2017
end_date: follow format 15/01/2017
Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')
with sqlite3.connect(DB) as conn:
query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status '
'FROM ATT_LOG WHERE User_PIN = {}'.format(uid))
cur = conn.execute(query)
rows = cur.fetchall()
ret = []
for row in rows:
log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')
if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1):
ret.append(row)
return ret
def get_logs_by_date(uid, date):
return get_logs(uid, date, date)
def print_log(*log_row):
'''
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
def check_log_row(log_row):
'''
Each day must have exactly 2 logs.
One for checking in, before 8:00:00
One for checking out, after 17:00:00
Return True if satisfies all conditions, else False
'''
in_time = datetime.time(8, 0, 0)
out_time = datetime.time(17, 0, 0)
log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')
status = log_row[-1]
if status == 1 and log_date.time() < out_time:
print('Early log on {}: {}'.format(log_date.date(), log_date))
return False
elif status == 0 and log_date.time() > in_time:
print('Late log on {}: {}'.format(log_date.date(), log_date))
return False
else:
return True
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
'''
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
'''
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = (end_date - start_date) + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
|
add_log(uid, date, 'in')
add
|
delete_log(log[0])
|
conditional_block
|
zkdevice.py
|
.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip,
filename,
remote_file_path),
'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
# stop pyftpdlib FTP server
ftp_server.kill()
print('Server killed')
def generate_verify_time(status='in', late=False):
'''
Generate normal verify time based on status `in` or `out`
`in` time will be random 10 mins before 8:00
`out` time will be random 10 mins after 17:00
'''
if status == 'in':
status = 0
if not late:
hour = 7
minute = random.randint(50, 59)
else:
hour = 8
minute = random.randint(15, 20)
elif status == 'out':
status = 1
hour = 17
minute = random.randint(0, 10)
else:
raise ValueError('status must be `in` or `out`')
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
return time
def add_log(uid, date, status, late=False):
'''
Edit ZKDB.db file, ATT_LOG table,
insert a row which represents a check in/out log
uid: User PIN
date: follow format: dd/mm/yyyy - 14/01/2017
status: 'in' is checking in, 'out' is checking out
'''
# verify_type: 0 is password, 1 is fingerprint
verify_type = 1
if status == 'in':
status = 0
time = generate_verify_time('in', late=late)
elif status == 'out':
status = 1
time = generate_verify_time('out')
else:
raise ValueError('status must be `in` or `out`')
date = datetime.datetime.strptime(date, '%d/%m/%Y')
combined = datetime.datetime.combine(date, time)
verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)
with sqlite3.connect(DB) as conn:
query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, '
'Status, Work_Code_ID, SEND_FLAG) '
'VALUES ({}, {}, "{}", {}, 0, 0)').format(uid, verify_type,
verify_time, status,
0, 0)
cur = conn.execute(query)
cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')
r = cur.fetchone()
print_log(r, uid, verify_type, verify_time, status)
def add_logs(uid, start, end, status, late=False):
start_date = datetime.datetime.strptime(start, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end, '%d/%m/%Y')
day_count = end_date - start_date
day_count = day_count.days + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date)
add_log(uid, date, status, late)
def delete_log(log_id):
'''
Delete a log row with ID=log_id
'''
with sqlite3.connect(DB) as conn:
query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id))
conn.execute(query)
print('Deleted log {}'.format(log_id))
def get_logs(uid, start_date, end_date):
'''
Returns logs of 'uid' from 'start_date' to 'end_date'
uid: User PIN
start_date: follow format 14/01/2017
end_date: follow format 15/01/2017
Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')
with sqlite3.connect(DB) as conn:
query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status '
'FROM ATT_LOG WHERE User_PIN = {}'.format(uid))
cur = conn.execute(query)
rows = cur.fetchall()
ret = []
for row in rows:
log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')
if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1):
ret.append(row)
return ret
def get_logs_by_date(uid, date):
return get_logs(uid, date, date)
def print_log(*log_row):
'''
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
def check_log_row(log_row):
'''
Each day must have exactly 2 logs.
One for checking in, before 8:00:00
One for checking out, after 17:00:00
Return True if satisfies all conditions, else False
'''
in_time = datetime.time(8, 0, 0)
out_time = datetime.time(17, 0, 0)
log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')
status = log_row[-1]
if status == 1 and log_date.time() < out_time:
print('Early log on {}: {}'.format(log_date.date(), log_date))
return False
elif status == 0 and log_date.time() > in_time:
print('Late log on {}: {}'.format(log_date.date(), log_date))
return False
else:
return True
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
'''
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
'''
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = (end_date - start_date) + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
delete_log(log[0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
def main():
today = '{:%d/%m/%Y}'.format(datetime.date.today())
parser = argparse.ArgumentParser()
parser.add_argument('action', help='`get`, `checkin`, `checkout`, '
'`add` or `fix` logs', default='get')
parser.add_argument('uids', help='User PINs', type=int, nargs='*')
parser.add_argument('-d', '--date', help='Date', default=today)
parser.add_argument('-r', '--range',
help='Range of date, ex. 01/01/2017-02/01/2017')
parser.add_argument('--log', help='log id to delete')
parser.add_argument('--late', help='Checkin late or not',
action='store_true')
args = parser.parse_args()
uids = args.uids
date = args.date or today
if not args.range:
start, end = date, date
else:
|
start, end = args.range.split('-')
transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')
|
random_line_split
|
|
zkdevice.py
|
sys
import telnetlib
def get_server_ip(device_ip):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((device_ip, 80))
return s.getsockname()[0]
def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):
'''
Transfer file from from_ip to to_ip via telnet.
Use ftpput and ftpget.
'''
# ====FTP Server====
try:
import pyftpdlib
except ImportError:
import pip
pip.main('install pyftpdlib'.split())
# start pyftpdlib FTP server: anonymous with write permission, port 2121
ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])
print('Server started')
filename = os.path.basename(remote_file_path)
s = telnetlib.Telnet(DEVICE_IP)
print(s.read_until(b'login: ').decode())
s.write(b'root \n')
print(s.read_until(b'Password: ').decode())
s.write(b'solokey\n')
if s.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip,
filename,
remote_file_path),
'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
# stop pyftpdlib FTP server
ftp_server.kill()
print('Server killed')
def generate_verify_time(status='in', late=False):
'''
Generate normal verify time based on status `in` or `out`
`in` time will be random 10 mins before 8:00
`out` time will be random 10 mins after 17:00
'''
if status == 'in':
status = 0
if not late:
hour = 7
minute = random.randint(50, 59)
else:
hour = 8
minute = random.randint(15, 20)
elif status == 'out':
status = 1
hour = 17
minute = random.randint(0, 10)
else:
raise ValueError('status must be `in` or `out`')
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
return time
def add_log(uid, date, status, late=False):
'''
Edit ZKDB.db file, ATT_LOG table,
insert a row which represents a check in/out log
uid: User PIN
date: follow format: dd/mm/yyyy - 14/01/2017
status: 'in' is checking in, 'out' is checking out
'''
# verify_type: 0 is password, 1 is fingerprint
verify_type = 1
if status == 'in':
status = 0
time = generate_verify_time('in', late=late)
elif status == 'out':
status = 1
time = generate_verify_time('out')
else:
raise ValueError('status must be `in` or `out`')
date = datetime.datetime.strptime(date, '%d/%m/%Y')
combined = datetime.datetime.combine(date, time)
verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)
with sqlite3.connect(DB) as conn:
query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, '
'Status, Work_Code_ID, SEND_FLAG) '
'VALUES ({}, {}, "{}", {}, 0, 0)').format(uid, verify_type,
verify_time, status,
0, 0)
cur = conn.execute(query)
cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')
r = cur.fetchone()
print_log(r, uid, verify_type, verify_time, status)
def
|
(uid, start, end, status, late=False):
start_date = datetime.datetime.strptime(start, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end, '%d/%m/%Y')
day_count = end_date - start_date
day_count = day_count.days + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date)
add_log(uid, date, status, late)
def delete_log(log_id):
'''
Delete a log row with ID=log_id
'''
with sqlite3.connect(DB) as conn:
query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id))
conn.execute(query)
print('Deleted log {}'.format(log_id))
def get_logs(uid, start_date, end_date):
'''
Returns logs of 'uid' from 'start_date' to 'end_date'
uid: User PIN
start_date: follow format 14/01/2017
end_date: follow format 15/01/2017
Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')
with sqlite3.connect(DB) as conn:
query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status '
'FROM ATT_LOG WHERE User_PIN = {}'.format(uid))
cur = conn.execute(query)
rows = cur.fetchall()
ret = []
for row in rows:
log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')
if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1):
ret.append(row)
return ret
def get_logs_by_date(uid, date):
return get_logs(uid, date, date)
def print_log(*log_row):
'''
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
def check_log_row(log_row):
'''
Each day must have exactly 2 logs.
One for checking in, before 8:00:00
One for checking out, after 17:00:00
Return True if satisfies all conditions, else False
'''
in_time = datetime.time(8, 0, 0)
out_time = datetime.time(17, 0, 0)
log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')
status = log_row[-1]
if status == 1 and log_date.time() < out_time:
print('Early log on {}: {}'.format(log_date.date(), log_date))
return False
elif status == 0 and log_date.time() > in_time:
print('Late log on {}: {}'.format(log_date.date(), log_date))
return False
else:
return True
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
'''
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
'''
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = (end_date - start_date) + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
delete_log(log[0])
add_log(uid, date, 'in')
add
|
add_logs
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.