file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
bert_tagger_trainer.py
|
BertTagger.from_pretrained(args.bert_config_dir, config=bert_config)
logging.info(str(args.__dict__ if isinstance(args, argparse.ArgumentParser) else args))
self.result_logger = logging.getLogger(__name__)
self.result_logger.setLevel(logging.INFO)
self.loss_func = CrossEntropyLoss()
self.span_f1 = TaggerSpanF1()
self.chinese = args.chinese
self.optimizer = args.optimizer
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--train_batch_size", type=int, default=8, help="batch size")
parser.add_argument("--eval_batch_size", type=int, default=8, help="batch size")
parser.add_argument("--bert_dropout", type=float, default=0.1, help="bert dropout rate")
parser.add_argument("--classifier_sign", type=str, default="multi_nonlinear")
parser.add_argument("--classifier_dropout", type=float, default=0.1)
parser.add_argument("--classifier_act_func", type=str, default="gelu")
parser.add_argument("--classifier_intermediate_hidden_size", type=int, default=1024)
parser.add_argument("--chinese", action="store_true", help="is chinese dataset")
parser.add_argument("--optimizer", choices=["adamw", "torch.adam"], default="adamw", help="optimizer type")
parser.add_argument("--final_div_factor", type=float, default=1e4, help="final div factor of linear decay scheduler")
parser.add_argument("--output_dir", type=str, default="", help="the path for saving intermediate model checkpoints.")
parser.add_argument("--lr_scheduler", type=str, default="linear_decay", help="lr scheduler")
parser.add_argument("--data_sign", type=str, default="en_conll03", help="data signature for the dataset.")
parser.add_argument("--polydecay_ratio", type=float, default=4, help="ratio for polydecay learing rate scheduler.")
parser.add_argument("--do_lowercase", action="store_true", )
parser.add_argument("--data_file_suffix", type=str, default=".char.bmes")
parser.add_argument("--lr_scheulder", type=str, default="polydecay")
parser.add_argument("--lr_mini", type=float, default=-1)
parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for.")
return parser
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.optimizer == "adamw":
optimizer = AdamW(optimizer_grouped_parameters,
betas=(0.9, 0.98), # according to RoBERTa paper
lr=self.args.lr,
eps=self.args.adam_epsilon,)
elif self.optimizer == "torch.adam":
optimizer = torch.optim.AdamW(optimizer_grouped_parameters,
lr=self.args.lr,
eps=self.args.adam_epsilon,
weight_decay=self.args.weight_decay)
else:
raise ValueError("Optimizer type does not exist.")
num_gpus = len([x for x in str(self.args.gpus).split(",") if x.strip()])
t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs
warmup_steps = int(self.args.warmup_proportion * t_total)
if self.args.lr_scheduler == "onecycle":
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total),
final_div_factor=self.args.final_div_factor,
total_steps=t_total, anneal_strategy='linear')
elif self.args.lr_scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif self.args.lr_scheulder == "polydecay":
if self.args.lr_mini == -1:
lr_mini = self.args.lr / self.args.polydecay_ratio
else:
lr_mini = self.args.lr_mini
scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini)
else:
raise ValueError
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def forward(self, input_ids, token_type_ids, attention_mask):
return self.model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
def compute_loss(self, sequence_logits, sequence_labels, input_mask=None):
if input_mask is not None:
active_loss = input_mask.view(-1) == 1
active_logits = sequence_logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, sequence_labels.view(-1), torch.tensor(self.loss_func.ignore_index).type_as(sequence_labels)
)
loss = self.loss_func(active_logits, active_labels)
else:
loss = self.loss_func(sequence_logits.view(-1, self.num_labels), sequence_labels.view(-1))
return loss
def training_step(self, batch, batch_idx):
tf_board_logs = {"lr": self.trainer.optimizers[0].param_groups[0]['lr']}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
tf_board_logs[f"train_loss"] = loss
return {'loss': loss, 'log': tf_board_logs}
def validation_step(self, batch, batch_idx):
output = {}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
batch_size = token_input_ids.shape[0]
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
output[f"val_loss"] = loss
sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit")
sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label")
span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst)
output["span_f1_stats"] = span_f1_stats
return output
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0)
span_tp, span_fp, span_fn = all_counts
span_recall = span_tp / (span_tp + span_fn + 1e-10)
span_precision = span_tp / (span_tp + span_fp + 1e-10)
span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10)
tensorboard_logs[f"span_precision"] = span_precision
tensorboard_logs[f"span_recall"] = span_recall
tensorboard_logs[f"span_f1"] = span_f1
self.result_logger.info(f"EVAL INFO -> current_epoch is: {self.trainer.current_epoch}, current_global_step is: {self.trainer.global_step} ")
self.result_logger.info(f"EVAL INFO -> valid_f1 is: {span_f1}")
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def test_step(self, batch, batch_idx):
output = {}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
batch_size = token_input_ids.shape[0]
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
output[f"test_loss"] = loss
sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)),
is_wordpiece_mask, self.task_idx2label, input_type="logit")
sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label,
input_type="label")
span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst)
output["span_f1_stats"] = span_f1_stats
return output
|
def test_epoch_end(self, outputs) -> Dict[str, Dict[str, Tensor]]:
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss}
|
random_line_split
|
|
bert_tagger_trainer.py
|
_argument("--polydecay_ratio", type=float, default=4, help="ratio for polydecay learing rate scheduler.")
parser.add_argument("--do_lowercase", action="store_true", )
parser.add_argument("--data_file_suffix", type=str, default=".char.bmes")
parser.add_argument("--lr_scheulder", type=str, default="polydecay")
parser.add_argument("--lr_mini", type=float, default=-1)
parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for.")
return parser
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.optimizer == "adamw":
optimizer = AdamW(optimizer_grouped_parameters,
betas=(0.9, 0.98), # according to RoBERTa paper
lr=self.args.lr,
eps=self.args.adam_epsilon,)
elif self.optimizer == "torch.adam":
optimizer = torch.optim.AdamW(optimizer_grouped_parameters,
lr=self.args.lr,
eps=self.args.adam_epsilon,
weight_decay=self.args.weight_decay)
else:
raise ValueError("Optimizer type does not exist.")
num_gpus = len([x for x in str(self.args.gpus).split(",") if x.strip()])
t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs
warmup_steps = int(self.args.warmup_proportion * t_total)
if self.args.lr_scheduler == "onecycle":
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total),
final_div_factor=self.args.final_div_factor,
total_steps=t_total, anneal_strategy='linear')
elif self.args.lr_scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif self.args.lr_scheulder == "polydecay":
if self.args.lr_mini == -1:
lr_mini = self.args.lr / self.args.polydecay_ratio
else:
lr_mini = self.args.lr_mini
scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini)
else:
raise ValueError
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def forward(self, input_ids, token_type_ids, attention_mask):
return self.model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
def compute_loss(self, sequence_logits, sequence_labels, input_mask=None):
if input_mask is not None:
active_loss = input_mask.view(-1) == 1
active_logits = sequence_logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, sequence_labels.view(-1), torch.tensor(self.loss_func.ignore_index).type_as(sequence_labels)
)
loss = self.loss_func(active_logits, active_labels)
else:
loss = self.loss_func(sequence_logits.view(-1, self.num_labels), sequence_labels.view(-1))
return loss
def training_step(self, batch, batch_idx):
tf_board_logs = {"lr": self.trainer.optimizers[0].param_groups[0]['lr']}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
tf_board_logs[f"train_loss"] = loss
return {'loss': loss, 'log': tf_board_logs}
def validation_step(self, batch, batch_idx):
output = {}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
batch_size = token_input_ids.shape[0]
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
output[f"val_loss"] = loss
sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit")
sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label")
span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst)
output["span_f1_stats"] = span_f1_stats
return output
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0)
span_tp, span_fp, span_fn = all_counts
span_recall = span_tp / (span_tp + span_fn + 1e-10)
span_precision = span_tp / (span_tp + span_fp + 1e-10)
span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10)
tensorboard_logs[f"span_precision"] = span_precision
tensorboard_logs[f"span_recall"] = span_recall
tensorboard_logs[f"span_f1"] = span_f1
self.result_logger.info(f"EVAL INFO -> current_epoch is: {self.trainer.current_epoch}, current_global_step is: {self.trainer.global_step} ")
self.result_logger.info(f"EVAL INFO -> valid_f1 is: {span_f1}")
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def test_step(self, batch, batch_idx):
output = {}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
batch_size = token_input_ids.shape[0]
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
output[f"test_loss"] = loss
sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)),
is_wordpiece_mask, self.task_idx2label, input_type="logit")
sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label,
input_type="label")
span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst)
output["span_f1_stats"] = span_f1_stats
return output
def test_epoch_end(self, outputs) -> Dict[str, Dict[str, Tensor]]:
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss}
all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0)
span_tp, span_fp, span_fn = all_counts
span_recall = span_tp / (span_tp + span_fn + 1e-10)
span_precision = span_tp / (span_tp + span_fp + 1e-10)
span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10)
tensorboard_logs[f"span_precision"] = span_precision
tensorboard_logs[f"span_recall"] = span_recall
tensorboard_logs[f"span_f1"] = span_f1
print(f"TEST INFO -> test_f1 is: {span_f1} precision: {span_precision}, recall: {span_recall}")
self.result_logger.info(f"EVAL INFO -> test_f1 is: {span_f1}, test_precision is: {span_precision}, test_recall is: {span_recall}")
return {'test_loss': avg_loss, 'log': tensorboard_logs}
def train_dataloader(self) -> DataLoader:
return self.get_dataloader("train")
def val_dataloader(self) -> DataLoader:
return self.get_dataloader("dev")
def test_dataloader(self) -> DataLoader:
return self.get_dataloader("test")
def get_dataloader(self, prefix="train", limit: int = None) -> DataLoader:
"""get train/dev/test dataloader"""
data_path = os.path.join(self.data_dir, f"{prefix}{self.args.data_file_suffix}")
dataset = TaggerNERDataset(data_path, self.tokenizer, self.args.data_sign,
max_length=self.args.max_length, is_chinese=self.args.chinese,
pad_to_maxlen=False)
if limit is not None:
|
dataset = TruncateDataset(dataset, limit)
|
conditional_block
|
|
bert_tagger_trainer.py
|
_config_dir, use_fast=False, do_lower_case=args.do_lowercase)
self.model = BertTagger.from_pretrained(args.bert_config_dir, config=bert_config)
logging.info(str(args.__dict__ if isinstance(args, argparse.ArgumentParser) else args))
self.result_logger = logging.getLogger(__name__)
self.result_logger.setLevel(logging.INFO)
self.loss_func = CrossEntropyLoss()
self.span_f1 = TaggerSpanF1()
self.chinese = args.chinese
self.optimizer = args.optimizer
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--train_batch_size", type=int, default=8, help="batch size")
parser.add_argument("--eval_batch_size", type=int, default=8, help="batch size")
parser.add_argument("--bert_dropout", type=float, default=0.1, help="bert dropout rate")
parser.add_argument("--classifier_sign", type=str, default="multi_nonlinear")
parser.add_argument("--classifier_dropout", type=float, default=0.1)
parser.add_argument("--classifier_act_func", type=str, default="gelu")
parser.add_argument("--classifier_intermediate_hidden_size", type=int, default=1024)
parser.add_argument("--chinese", action="store_true", help="is chinese dataset")
parser.add_argument("--optimizer", choices=["adamw", "torch.adam"], default="adamw", help="optimizer type")
parser.add_argument("--final_div_factor", type=float, default=1e4, help="final div factor of linear decay scheduler")
parser.add_argument("--output_dir", type=str, default="", help="the path for saving intermediate model checkpoints.")
parser.add_argument("--lr_scheduler", type=str, default="linear_decay", help="lr scheduler")
parser.add_argument("--data_sign", type=str, default="en_conll03", help="data signature for the dataset.")
parser.add_argument("--polydecay_ratio", type=float, default=4, help="ratio for polydecay learing rate scheduler.")
parser.add_argument("--do_lowercase", action="store_true", )
parser.add_argument("--data_file_suffix", type=str, default=".char.bmes")
parser.add_argument("--lr_scheulder", type=str, default="polydecay")
parser.add_argument("--lr_mini", type=float, default=-1)
parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for.")
return parser
def
|
(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.optimizer == "adamw":
optimizer = AdamW(optimizer_grouped_parameters,
betas=(0.9, 0.98), # according to RoBERTa paper
lr=self.args.lr,
eps=self.args.adam_epsilon,)
elif self.optimizer == "torch.adam":
optimizer = torch.optim.AdamW(optimizer_grouped_parameters,
lr=self.args.lr,
eps=self.args.adam_epsilon,
weight_decay=self.args.weight_decay)
else:
raise ValueError("Optimizer type does not exist.")
num_gpus = len([x for x in str(self.args.gpus).split(",") if x.strip()])
t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs
warmup_steps = int(self.args.warmup_proportion * t_total)
if self.args.lr_scheduler == "onecycle":
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total),
final_div_factor=self.args.final_div_factor,
total_steps=t_total, anneal_strategy='linear')
elif self.args.lr_scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif self.args.lr_scheulder == "polydecay":
if self.args.lr_mini == -1:
lr_mini = self.args.lr / self.args.polydecay_ratio
else:
lr_mini = self.args.lr_mini
scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini)
else:
raise ValueError
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def forward(self, input_ids, token_type_ids, attention_mask):
return self.model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
def compute_loss(self, sequence_logits, sequence_labels, input_mask=None):
if input_mask is not None:
active_loss = input_mask.view(-1) == 1
active_logits = sequence_logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, sequence_labels.view(-1), torch.tensor(self.loss_func.ignore_index).type_as(sequence_labels)
)
loss = self.loss_func(active_logits, active_labels)
else:
loss = self.loss_func(sequence_logits.view(-1, self.num_labels), sequence_labels.view(-1))
return loss
def training_step(self, batch, batch_idx):
tf_board_logs = {"lr": self.trainer.optimizers[0].param_groups[0]['lr']}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
tf_board_logs[f"train_loss"] = loss
return {'loss': loss, 'log': tf_board_logs}
def validation_step(self, batch, batch_idx):
output = {}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
batch_size = token_input_ids.shape[0]
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
output[f"val_loss"] = loss
sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit")
sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label")
span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst)
output["span_f1_stats"] = span_f1_stats
return output
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0)
span_tp, span_fp, span_fn = all_counts
span_recall = span_tp / (span_tp + span_fn + 1e-10)
span_precision = span_tp / (span_tp + span_fp + 1e-10)
span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10)
tensorboard_logs[f"span_precision"] = span_precision
tensorboard_logs[f"span_recall"] = span_recall
tensorboard_logs[f"span_f1"] = span_f1
self.result_logger.info(f"EVAL INFO -> current_epoch is: {self.trainer.current_epoch}, current_global_step is: {self.trainer.global_step} ")
self.result_logger.info(f"EVAL INFO -> valid_f1 is: {span_f1}")
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def test_step(self, batch, batch_idx):
output = {}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
batch_size = token_input_ids.shape[0]
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
output[f"test_loss"] = loss
sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)),
is_wordpiece_mask, self.task_idx2label, input_type="logit")
sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label,
input_type="label")
span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst)
output["span_f1_stats"] = span_f1_stats
return output
def test_epoch_end(self, outputs) -> Dict[str, Dict[str, Tensor]]:
avg_loss = torch.stack([x['test_loss']
|
configure_optimizers
|
identifier_name
|
bert_tagger_trainer.py
|
_config_dir, use_fast=False, do_lower_case=args.do_lowercase)
self.model = BertTagger.from_pretrained(args.bert_config_dir, config=bert_config)
logging.info(str(args.__dict__ if isinstance(args, argparse.ArgumentParser) else args))
self.result_logger = logging.getLogger(__name__)
self.result_logger.setLevel(logging.INFO)
self.loss_func = CrossEntropyLoss()
self.span_f1 = TaggerSpanF1()
self.chinese = args.chinese
self.optimizer = args.optimizer
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--train_batch_size", type=int, default=8, help="batch size")
parser.add_argument("--eval_batch_size", type=int, default=8, help="batch size")
parser.add_argument("--bert_dropout", type=float, default=0.1, help="bert dropout rate")
parser.add_argument("--classifier_sign", type=str, default="multi_nonlinear")
parser.add_argument("--classifier_dropout", type=float, default=0.1)
parser.add_argument("--classifier_act_func", type=str, default="gelu")
parser.add_argument("--classifier_intermediate_hidden_size", type=int, default=1024)
parser.add_argument("--chinese", action="store_true", help="is chinese dataset")
parser.add_argument("--optimizer", choices=["adamw", "torch.adam"], default="adamw", help="optimizer type")
parser.add_argument("--final_div_factor", type=float, default=1e4, help="final div factor of linear decay scheduler")
parser.add_argument("--output_dir", type=str, default="", help="the path for saving intermediate model checkpoints.")
parser.add_argument("--lr_scheduler", type=str, default="linear_decay", help="lr scheduler")
parser.add_argument("--data_sign", type=str, default="en_conll03", help="data signature for the dataset.")
parser.add_argument("--polydecay_ratio", type=float, default=4, help="ratio for polydecay learing rate scheduler.")
parser.add_argument("--do_lowercase", action="store_true", )
parser.add_argument("--data_file_suffix", type=str, default=".char.bmes")
parser.add_argument("--lr_scheulder", type=str, default="polydecay")
parser.add_argument("--lr_mini", type=float, default=-1)
parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for.")
return parser
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.optimizer == "adamw":
optimizer = AdamW(optimizer_grouped_parameters,
betas=(0.9, 0.98), # according to RoBERTa paper
lr=self.args.lr,
eps=self.args.adam_epsilon,)
elif self.optimizer == "torch.adam":
optimizer = torch.optim.AdamW(optimizer_grouped_parameters,
lr=self.args.lr,
eps=self.args.adam_epsilon,
weight_decay=self.args.weight_decay)
else:
raise ValueError("Optimizer type does not exist.")
num_gpus = len([x for x in str(self.args.gpus).split(",") if x.strip()])
t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs
warmup_steps = int(self.args.warmup_proportion * t_total)
if self.args.lr_scheduler == "onecycle":
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total),
final_div_factor=self.args.final_div_factor,
total_steps=t_total, anneal_strategy='linear')
elif self.args.lr_scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif self.args.lr_scheulder == "polydecay":
if self.args.lr_mini == -1:
lr_mini = self.args.lr / self.args.polydecay_ratio
else:
lr_mini = self.args.lr_mini
scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini)
else:
raise ValueError
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def forward(self, input_ids, token_type_ids, attention_mask):
return self.model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
def compute_loss(self, sequence_logits, sequence_labels, input_mask=None):
if input_mask is not None:
active_loss = input_mask.view(-1) == 1
active_logits = sequence_logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, sequence_labels.view(-1), torch.tensor(self.loss_func.ignore_index).type_as(sequence_labels)
)
loss = self.loss_func(active_logits, active_labels)
else:
loss = self.loss_func(sequence_logits.view(-1, self.num_labels), sequence_labels.view(-1))
return loss
def training_step(self, batch, batch_idx):
|
def validation_step(self, batch, batch_idx):
output = {}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
batch_size = token_input_ids.shape[0]
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
output[f"val_loss"] = loss
sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)), is_wordpiece_mask, self.task_idx2label, input_type="logit")
sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label, input_type="label")
span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst)
output["span_f1_stats"] = span_f1_stats
return output
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
all_counts = torch.stack([x[f'span_f1_stats'] for x in outputs]).view(-1, 3).sum(0)
span_tp, span_fp, span_fn = all_counts
span_recall = span_tp / (span_tp + span_fn + 1e-10)
span_precision = span_tp / (span_tp + span_fp + 1e-10)
span_f1 = span_precision * span_recall * 2 / (span_recall + span_precision + 1e-10)
tensorboard_logs[f"span_precision"] = span_precision
tensorboard_logs[f"span_recall"] = span_recall
tensorboard_logs[f"span_f1"] = span_f1
self.result_logger.info(f"EVAL INFO -> current_epoch is: {self.trainer.current_epoch}, current_global_step is: {self.trainer.global_step} ")
self.result_logger.info(f"EVAL INFO -> valid_f1 is: {span_f1}")
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def test_step(self, batch, batch_idx):
output = {}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
batch_size = token_input_ids.shape[0]
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
output[f"test_loss"] = loss
sequence_pred_lst = transform_predictions_to_labels(logits.view(batch_size, -1, len(self.task_labels)),
is_wordpiece_mask, self.task_idx2label, input_type="logit")
sequence_gold_lst = transform_predictions_to_labels(sequence_labels, is_wordpiece_mask, self.task_idx2label,
input_type="label")
span_f1_stats = self.span_f1(sequence_pred_lst, sequence_gold_lst)
output["span_f1_stats"] = span_f1_stats
return output
def test_epoch_end(self, outputs) -> Dict[str, Dict[str, Tensor]]:
avg_loss = torch.stack([x['test_loss']
|
tf_board_logs = {"lr": self.trainer.optimizers[0].param_groups[0]['lr']}
token_input_ids, token_type_ids, attention_mask, sequence_labels, is_wordpiece_mask = batch
logits = self.model(token_input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
loss = self.compute_loss(logits, sequence_labels, input_mask=attention_mask)
tf_board_logs[f"train_loss"] = loss
return {'loss': loss, 'log': tf_board_logs}
|
identifier_body
|
user-order.component.ts
|
weightCache: { [key: string]: string } = {}
readonly = false
tablePageIndex = 1
tablePageSize = 5
pageSizeSelectorValues = [5, 10, 20, 30, 40, 50, 100, 200]
defaultCar: CarOrder
popVisible: { [key: string]: boolean } = {}
constructor(
private route: ActivatedRoute,
private location: Location,
private router: Router,
private subject: NzModalSubject,
private http: HttpClient,
private message: NzMessageService,
private modal: NzModalService,
) { }
addToCar() {
this.modal.open({
title: '选择车次',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
width: 640,
componentParams: {
onSelect: (selectedCar: CarOrder) => {
const req = {
car: selectedCar.id,
ids: this.checkedItems.map(item => item.id),
isByUser: false,
}
this.http.post<ApiResObj>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => {
this.message.success('操作成功')
this.checkedItems.forEach(item => item.car = req.car)
})
}
}
})
}
removeFromCar() {
this.modal.confirm({
title: '移除',
content: `确认移除吗?`,
onOk: () => {
const req = {
ids: this.checkedItems.map(item => item.id),
isByUser: false,
}
this.http.post<ApiRes<UserOrder>>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => {
this.message.success('操作成功')
this.checkedItems.forEach(item => item.car = undefined)
})
}
})
}
refreshStatus() {
const allChecked = this.values.every(value => value.checked === true)
const allUnChecked = this.values.every(value => !value.checked)
this.allChecked = allChecked
this.indeterminate = (!allChecked) && (!allUnChecked)
this.checkedItems = this.values.filter(value => value.checked)
this.checkedNumber = this.checkedItems.length
}
checkAll(value) {
if (value) {
this.values.forEach(item => {
item.checked = true
})
} else {
this.values.forEach(item => {
item.checked = false
})
}
this.refreshStatus()
}
descCar(car: CarOrder) {
if (car) {
return `单号: ${car.id}, 姓名: ${car.driver}, 日期: ${car.createdAt}`
} else {
return '未选择'
}
}
selectCar() {
this.modal.open({
title: '选择默认车次(本单中的单位默认加入该车次)',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
componentParams: {
data: this.defaultCar,
onSelect: (selectedCar: CarOrder) => {
this.defaultCar = selectedCar
this.order.car = this.defaultCar.id
this.orderChange()
}
}
})
}
itemSelectCar(item: OrderItem, index: number) {
let carOrder
if (item.car) {
carOrder = { id: item.car }
} else {
if (!item.id) {
carOrder = this.defaultCar
}
}
this.popVisible[index] = false
this.modal.open({
title: '选择车次(只是该单位)',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
componentParams: {
data: carOrder,
onSelect: (selectedCar: CarOrder) => {
const isNewCar = item.car !== selectedCar.id
item.car = selectedCar.id
if (item.id && isNewCar) {
item.status = '待更新'
item.subject.next(item)
}
}
}
})
}
itemDeleteCar(item: OrderItem, index: number) {
this.popVisible[index] = false
item.car = null
item.status = '待更新'
item.subject.next(item)
}
refreshTableData() {
this.values = [...this.values]
}
itemIndex(index: number) {
return (this.tablePageIndex - 1) * this.tablePageSize + index
}
isFinished() {
return OrderStatus.FINISHED === this.order.status
}
orderChange() {
this.orderSubject.next()
}
onEnter(weight: string) {
this.doAddNewItem(weight)
}
itemChange(item: OrderItem, index: number) {
if (index === this.values.length - 1) {
this.doAddEmptyItem()
}
if (item.id) {
// for foramt error then delete character
if (item.weight
|
em.id]) {
if (item.error) {
item.error = false
item.status = '上传完成'
}
return
}
item.status = '待更新'
} else {
item.status = '待上传'
}
item.subject.next(item)
}
itemBlur(item: OrderItem, index: number) {
if (item.weight && !this.readonly) {
this.itemChange(item, index)
}
}
remove(item: UserOrder, index: number) {
if (item.id) {
this.modal.confirm({
title: '删除',
content: '确认删除?',
onOk: () => {
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_DELETE, { id: item.id }).subscribe(res => {
this.values.splice(index, 1)
this.refreshTableData()
this.calcCount()
delete this.weightCache[item.id]
})
}
})
} else {
this.values.splice(index, 1)
this.refreshTableData()
}
}
doUpload(item: OrderItem) {
const r = /^[1-9]\d{0,3}(\.\d{1}){0,1}$/
if (item.weight && r.test(item.weight.toString())) {
item.error = false
if (item.id) {
// update
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_UPDATE, clearNewOrderItem(item)).subscribe(res => {
item.status = '上传完成'
item.id = res.data.id
this.weightCache[item.id] = item.weight.toString()
this.calcCount()
})
} else {
// insert
if (!item.dbStatus) {
const user = this.order.id
if (user) {
item.user = user
item.dbStatus = DbStatus.CREATING
item.status = '数据创建中...'
if (this.defaultCar && this.defaultCar.id) {
item.car = this.defaultCar.id
}
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_INSERT, clearNewOrderItem(item)).subscribe(res => {
item.status = '上传完成'
item.id = res.data.id
this.weightCache[item.id] = item.weight.toString()
item.dbStatus = DbStatus.CREATED
this.calcCount()
})
}
}
}
} else {
item.status = '格式错误'
item.error = true
}
}
calcCount() {
let c = 0
for (const o of this.values) {
if (o.status === '上传完成') {
c += 1
}
}
this.count = c
}
isItemSaved(item: OrderItem) {
return item.status === '上传完成'
}
itemStyle(item: OrderItem) {
if (this.isItemSaved(item)) {
return {
'color': 'green'
}
} else {
return {
'color': 'red'
}
}
}
doAddNewItem(weight: string) {
const orderItemUploadSubject = new Subject<OrderItem>()
orderItemUploadSubject.debounceTime(500).subscribe(item => {
this.doUpload(item)
})
const newItem = { status: '待上传', subject: orderItemUploadSubject, weight: weight }
this.values.push(newItem)
this.refreshTableData()
this.tablePageIndex = Math.ceil(this.values.length / this.tablePageSize)
orderItemUploadSubject.next(newItem)
}
doAddEmptyItem() {
// one item to one suject to reduce conflict
const orderItemUploadSubject = new Subject<OrderItem>()
orderItemUploadSubject.debounceTime(500).subscribe(item => {
this.doUpload(item)
})
this.values.push({ status: '待上传', subject: orderItemUploadSubject })
this.refreshTableData()
}
doCommit() {
this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[] }>>(`${API_USER_ORDER_DETAIL}/${this.order.id}`).subscribe(res => {
const order = res.data.order
const items = res.data.items
const feItems = this.values.filter(item => {
if (item.id) {
return true
} else {
return false
}
})
const warnings = []
if (feItems.length !== items.length) {
warnings.push('数量不一致
|
.toString() === this.weightCache[it
|
conditional_block
|
user-order.component.ts
|
weightCache: { [key: string]: string } = {}
readonly = false
tablePageIndex = 1
tablePageSize = 5
pageSizeSelectorValues = [5, 10, 20, 30, 40, 50, 100, 200]
defaultCar: CarOrder
popVisible: { [key: string]: boolean } = {}
constructor(
private route: ActivatedRoute,
private location: Location,
private router: Router,
private subject: NzModalSubject,
private http: HttpClient,
private message: NzMessageService,
private modal: NzModalService,
) { }
addToCar() {
this.modal.open({
title: '选择车次',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
width: 640,
componentParams: {
onSelect: (selectedCar: CarOrder) => {
const req = {
car: selectedCar.id,
ids: this.checkedItems.map(item => item.id),
isByUser: false,
}
this.http.post<ApiResObj>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => {
this.message.success('操作成功')
this.checkedItems.forEach(item => item.car = req.car)
})
}
}
})
}
removeFromCar() {
this.modal.confirm({
title: '移除',
content: `确认移除吗?`,
onOk: () => {
const req = {
ids: this.checkedItems.map(item => item.id),
isByUser: false,
}
this.http.post<ApiRes<UserOrder>>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => {
this.message.success('操作成功')
this.checkedItems.forEach(item => item.car = undefined)
})
}
})
}
refreshStatus() {
const allChecked = this.values.every(value => value.checked === true)
const allUnChecked = this.values.every(value => !value.checked)
this.allChecked = allChecked
this.indeterminate = (!allChecked) && (!allUnChecked)
this.checkedItems = this.values.filter(value => value.checked)
this.checkedNumber = this.checkedItems.length
}
checkAll(value) {
if (value) {
this.values.forEach(item => {
item.checked = true
})
} else {
this.values.forEach(item => {
item.checked = false
})
}
this.refreshStatus()
}
descCar(car: CarOrder) {
if (car) {
return `单号: ${car.id}, 姓名: ${car.driver}, 日期: ${car.createdAt}`
} else {
return '未选择'
}
}
selectCar() {
this.modal.open({
title: '选择默认车次(本单中的单位默认加入该车次)',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
componentParams: {
data: this.defaultCar,
|
nSelect: (selectedCar: CarOrder) => {
this.defaultCar = selectedCar
this.order.car = this.defaultCar.id
this.orderChange()
}
}
})
}
itemSelectCar(item: OrderItem, index: number) {
let carOrder
if (item.car) {
carOrder = { id: item.car }
} else {
if (!item.id) {
carOrder = this.defaultCar
}
}
this.popVisible[index] = false
this.modal.open({
title: '选择车次(只是该单位)',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
componentParams: {
data: carOrder,
onSelect: (selectedCar: CarOrder) => {
const isNewCar = item.car !== selectedCar.id
item.car = selectedCar.id
if (item.id && isNewCar) {
item.status = '待更新'
item.subject.next(item)
}
}
}
})
}
itemDeleteCar(item: OrderItem, index: number) {
this.popVisible[index] = false
item.car = null
item.status = '待更新'
item.subject.next(item)
}
refreshTableData() {
this.values = [...this.values]
}
itemIndex(index: number) {
return (this.tablePageIndex - 1) * this.tablePageSize + index
}
isFinished() {
return OrderStatus.FINISHED === this.order.status
}
orderChange() {
this.orderSubject.next()
}
onEnter(weight: string) {
this.doAddNewItem(weight)
}
itemChange(item: OrderItem, index: number) {
if (index === this.values.length - 1) {
this.doAddEmptyItem()
}
if (item.id) {
// for foramt error then delete character
if (item.weight.toString() === this.weightCache[item.id]) {
if (item.error) {
item.error = false
item.status = '上传完成'
}
return
}
item.status = '待更新'
} else {
item.status = '待上传'
}
item.subject.next(item)
}
itemBlur(item: OrderItem, index: number) {
if (item.weight && !this.readonly) {
this.itemChange(item, index)
}
}
remove(item: UserOrder, index: number) {
if (item.id) {
this.modal.confirm({
title: '删除',
content: '确认删除?',
onOk: () => {
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_DELETE, { id: item.id }).subscribe(res => {
this.values.splice(index, 1)
this.refreshTableData()
this.calcCount()
delete this.weightCache[item.id]
})
}
})
} else {
this.values.splice(index, 1)
this.refreshTableData()
}
}
doUpload(item: OrderItem) {
const r = /^[1-9]\d{0,3}(\.\d{1}){0,1}$/
if (item.weight && r.test(item.weight.toString())) {
item.error = false
if (item.id) {
// update
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_UPDATE, clearNewOrderItem(item)).subscribe(res => {
item.status = '上传完成'
item.id = res.data.id
this.weightCache[item.id] = item.weight.toString()
this.calcCount()
})
} else {
// insert
if (!item.dbStatus) {
const user = this.order.id
if (user) {
item.user = user
item.dbStatus = DbStatus.CREATING
item.status = '数据创建中...'
if (this.defaultCar && this.defaultCar.id) {
item.car = this.defaultCar.id
}
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_INSERT, clearNewOrderItem(item)).subscribe(res => {
item.status = '上传完成'
item.id = res.data.id
this.weightCache[item.id] = item.weight.toString()
item.dbStatus = DbStatus.CREATED
this.calcCount()
})
}
}
}
} else {
item.status = '格式错误'
item.error = true
}
}
calcCount() {
let c = 0
for (const o of this.values) {
if (o.status === '上传完成') {
c += 1
}
}
this.count = c
}
isItemSaved(item: OrderItem) {
return item.status === '上传完成'
}
itemStyle(item: OrderItem) {
if (this.isItemSaved(item)) {
return {
'color': 'green'
}
} else {
return {
'color': 'red'
}
}
}
doAddNewItem(weight: string) {
const orderItemUploadSubject = new Subject<OrderItem>()
orderItemUploadSubject.debounceTime(500).subscribe(item => {
this.doUpload(item)
})
const newItem = { status: '待上传', subject: orderItemUploadSubject, weight: weight }
this.values.push(newItem)
this.refreshTableData()
this.tablePageIndex = Math.ceil(this.values.length / this.tablePageSize)
orderItemUploadSubject.next(newItem)
}
doAddEmptyItem() {
// one item to one suject to reduce conflict
const orderItemUploadSubject = new Subject<OrderItem>()
orderItemUploadSubject.debounceTime(500).subscribe(item => {
this.doUpload(item)
})
this.values.push({ status: '待上传', subject: orderItemUploadSubject })
this.refreshTableData()
}
doCommit() {
this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[] }>>(`${API_USER_ORDER_DETAIL}/${this.order.id}`).subscribe(res => {
const order = res.data.order
const items = res.data.items
const feItems = this.values.filter(item => {
if (item.id) {
return true
} else {
return false
}
})
const warnings = []
if (feItems.length !== items.length) {
warnings.push('数量
|
o
|
identifier_name
|
user-order.component.ts
|
weightCache: { [key: string]: string } = {}
readonly = false
tablePageIndex = 1
tablePageSize = 5
pageSizeSelectorValues = [5, 10, 20, 30, 40, 50, 100, 200]
defaultCar: CarOrder
popVisible: { [key: string]: boolean } = {}
constructor(
private route: ActivatedRoute,
private location: Location,
private router: Router,
private subject: NzModalSubject,
private http: HttpClient,
private message: NzMessageService,
private modal: NzModalService,
) { }
addToCar() {
this.modal.open({
title: '选择车次',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
width: 640,
componentParams: {
onSelect: (selectedCar: CarOrder) => {
const req = {
car: selectedCar.id,
ids: this.checkedItems.map(item => item.id),
isByUser: false,
}
this.http.post<ApiResObj>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => {
this.message.success('操作成功')
this.checkedItems.forEach(item => item.car = req.car)
})
}
}
})
}
removeFromCar() {
this.modal.confirm({
title: '移除',
content: `确认移除吗?`,
onOk: () => {
const req = {
ids: this.checkedItems.map(item => item.id),
isByUser: false,
}
this.http.post<ApiRes<UserOrder>>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => {
this.message.success('操作成功')
this.checkedItems.forEach(item => item.car = undefined)
})
}
})
}
refreshStatus() {
const allChecked = this.values.every(value => value.checked === true)
const allUnChecked = this.values.every(value => !value.checked)
this.allChecked = allChecked
this.indeterminate = (!allChecked) && (!allUnChecked)
this.checkedItems = this.values.filter(value => value.checked)
this.checkedNumber = this.checkedItems.length
}
checkAll(value) {
if (value) {
this.values.forEach(item => {
item.checked = true
})
} else {
this.values.forEach(item => {
item.checked = false
})
}
this.refreshStatus()
}
descCar(car: CarOrder) {
if (car) {
return `单号: ${car.id}, 姓名: ${car.driver}, 日期: ${car.createdAt}`
} else {
return '未选择'
}
}
selectCar() {
this.modal.open({
title: '选择默认车次(本单中的单位默认加入该车次)',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
componentParams: {
data: this.de
|
ltCar,
onSelect: (selectedCar: CarOrder) => {
this.defaultCar = selectedCar
this.order.car = this.defaultCar.id
this.orderChange()
}
}
})
}
itemSelectCar(item: OrderItem, index: number) {
let carOrder
if (item.car) {
carOrder = { id: item.car }
} else {
if (!item.id) {
carOrder = this.defaultCar
}
}
this.popVisible[index] = false
this.modal.open({
title: '选择车次(只是该单位)',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
componentParams: {
data: carOrder,
onSelect: (selectedCar: CarOrder) => {
const isNewCar = item.car !== selectedCar.id
item.car = selectedCar.id
if (item.id && isNewCar) {
item.status = '待更新'
item.subject.next(item)
}
}
}
})
}
itemDeleteCar(item: OrderItem, index: number) {
this.popVisible[index] = false
item.car = null
item.status = '待更新'
item.subject.next(item)
}
refreshTableData() {
this.values = [...this.values]
}
itemIndex(index: number) {
return (this.tablePageIndex - 1) * this.tablePageSize + index
}
isFinished() {
return OrderStatus.FINISHED === this.order.status
}
orderChange() {
this.orderSubject.next()
}
onEnter(weight: string) {
this.doAddNewItem(weight)
}
itemChange(item: OrderItem, index: number) {
if (index === this.values.length - 1) {
this.doAddEmptyItem()
}
if (item.id) {
// for foramt error then delete character
if (item.weight.toString() === this.weightCache[item.id]) {
if (item.error) {
item.error = false
item.status = '上传完成'
}
return
}
item.status = '待更新'
} else {
item.status = '待上传'
}
item.subject.next(item)
}
itemBlur(item: OrderItem, index: number) {
if (item.weight && !this.readonly) {
this.itemChange(item, index)
}
}
remove(item: UserOrder, index: number) {
if (item.id) {
this.modal.confirm({
title: '删除',
content: '确认删除?',
onOk: () => {
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_DELETE, { id: item.id }).subscribe(res => {
this.values.splice(index, 1)
this.refreshTableData()
this.calcCount()
delete this.weightCache[item.id]
})
}
})
} else {
this.values.splice(index, 1)
this.refreshTableData()
}
}
doUpload(item: OrderItem) {
const r = /^[1-9]\d{0,3}(\.\d{1}){0,1}$/
if (item.weight && r.test(item.weight.toString())) {
item.error = false
if (item.id) {
// update
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_UPDATE, clearNewOrderItem(item)).subscribe(res => {
item.status = '上传完成'
item.id = res.data.id
this.weightCache[item.id] = item.weight.toString()
this.calcCount()
})
} else {
// insert
if (!item.dbStatus) {
const user = this.order.id
if (user) {
item.user = user
item.dbStatus = DbStatus.CREATING
item.status = '数据创建中...'
if (this.defaultCar && this.defaultCar.id) {
item.car = this.defaultCar.id
}
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_INSERT, clearNewOrderItem(item)).subscribe(res => {
item.status = '上传完成'
item.id = res.data.id
this.weightCache[item.id] = item.weight.toString()
item.dbStatus = DbStatus.CREATED
this.calcCount()
})
}
}
}
} else {
item.status = '格式错误'
item.error = true
}
}
calcCount() {
let c = 0
for (const o of this.values) {
if (o.status === '上传完成') {
c += 1
}
}
this.count = c
}
isItemSaved(item: OrderItem) {
return item.status === '上传完成'
}
itemStyle(item: OrderItem) {
if (this.isItemSaved(item)) {
return {
'color': 'green'
}
} else {
return {
'color': 'red'
}
}
}
doAddNewItem(weight: string) {
const orderItemUploadSubject = new Subject<OrderItem>()
orderItemUploadSubject.debounceTime(500).subscribe(item => {
this.doUpload(item)
})
const newItem = { status: '待上传', subject: orderItemUploadSubject, weight: weight }
this.values.push(newItem)
this.refreshTableData()
this.tablePageIndex = Math.ceil(this.values.length / this.tablePageSize)
orderItemUploadSubject.next(newItem)
}
doAddEmptyItem() {
// one item to one suject to reduce conflict
const orderItemUploadSubject = new Subject<OrderItem>()
orderItemUploadSubject.debounceTime(500).subscribe(item => {
this.doUpload(item)
})
this.values.push({ status: '待上传', subject: orderItemUploadSubject })
this.refreshTableData()
}
doCommit() {
this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[] }>>(`${API_USER_ORDER_DETAIL}/${this.order.id}`).subscribe(res => {
const order = res.data.order
const items = res.data.items
const feItems = this.values.filter(item => {
if (item.id) {
return true
} else {
return false
}
})
const warnings = []
if (feItems.length !== items.length) {
warnings.push('数量
|
fau
|
identifier_body
|
user-order.component.ts
|
0
weightCache: { [key: string]: string } = {}
readonly = false
tablePageIndex = 1
tablePageSize = 5
pageSizeSelectorValues = [5, 10, 20, 30, 40, 50, 100, 200]
defaultCar: CarOrder
popVisible: { [key: string]: boolean } = {}
constructor(
private route: ActivatedRoute,
private location: Location,
private router: Router,
private subject: NzModalSubject,
private http: HttpClient,
private message: NzMessageService,
private modal: NzModalService,
) { }
addToCar() {
this.modal.open({
title: '选择车次',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
width: 640,
componentParams: {
onSelect: (selectedCar: CarOrder) => {
const req = {
car: selectedCar.id,
ids: this.checkedItems.map(item => item.id),
isByUser: false,
}
this.http.post<ApiResObj>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => {
this.message.success('操作成功')
this.checkedItems.forEach(item => item.car = req.car)
})
}
}
})
}
removeFromCar() {
this.modal.confirm({
title: '移除',
content: `确认移除吗?`,
onOk: () => {
const req = {
ids: this.checkedItems.map(item => item.id),
isByUser: false,
}
this.http.post<ApiRes<UserOrder>>(API_ORDER_ITEM_BATCH_UPDATE_CAR, req).subscribe(res => {
this.message.success('操作成功')
this.checkedItems.forEach(item => item.car = undefined)
})
}
})
}
refreshStatus() {
const allChecked = this.values.every(value => value.checked === true)
const allUnChecked = this.values.every(value => !value.checked)
|
this.allChecked = allChecked
this.indeterminate = (!allChecked) && (!allUnChecked)
this.checkedItems = this.values.filter(value => value.checked)
this.checkedNumber = this.checkedItems.length
}
checkAll(value) {
if (value) {
this.values.forEach(item => {
item.checked = true
})
} else {
this.values.forEach(item => {
item.checked = false
})
}
this.refreshStatus()
}
descCar(car: CarOrder) {
if (car) {
return `单号: ${car.id}, 姓名: ${car.driver}, 日期: ${car.createdAt}`
} else {
return '未选择'
}
}
selectCar() {
this.modal.open({
title: '选择默认车次(本单中的单位默认加入该车次)',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
componentParams: {
data: this.defaultCar,
onSelect: (selectedCar: CarOrder) => {
this.defaultCar = selectedCar
this.order.car = this.defaultCar.id
this.orderChange()
}
}
})
}
itemSelectCar(item: OrderItem, index: number) {
let carOrder
if (item.car) {
carOrder = { id: item.car }
} else {
if (!item.id) {
carOrder = this.defaultCar
}
}
this.popVisible[index] = false
this.modal.open({
title: '选择车次(只是该单位)',
content: CarSelectorComponent,
onOk() { },
onCancel() { },
footer: false,
componentParams: {
data: carOrder,
onSelect: (selectedCar: CarOrder) => {
const isNewCar = item.car !== selectedCar.id
item.car = selectedCar.id
if (item.id && isNewCar) {
item.status = '待更新'
item.subject.next(item)
}
}
}
})
}
itemDeleteCar(item: OrderItem, index: number) {
this.popVisible[index] = false
item.car = null
item.status = '待更新'
item.subject.next(item)
}
refreshTableData() {
this.values = [...this.values]
}
itemIndex(index: number) {
return (this.tablePageIndex - 1) * this.tablePageSize + index
}
isFinished() {
return OrderStatus.FINISHED === this.order.status
}
orderChange() {
this.orderSubject.next()
}
onEnter(weight: string) {
this.doAddNewItem(weight)
}
itemChange(item: OrderItem, index: number) {
if (index === this.values.length - 1) {
this.doAddEmptyItem()
}
if (item.id) {
// for foramt error then delete character
if (item.weight.toString() === this.weightCache[item.id]) {
if (item.error) {
item.error = false
item.status = '上传完成'
}
return
}
item.status = '待更新'
} else {
item.status = '待上传'
}
item.subject.next(item)
}
itemBlur(item: OrderItem, index: number) {
if (item.weight && !this.readonly) {
this.itemChange(item, index)
}
}
remove(item: UserOrder, index: number) {
if (item.id) {
this.modal.confirm({
title: '删除',
content: '确认删除?',
onOk: () => {
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_DELETE, { id: item.id }).subscribe(res => {
this.values.splice(index, 1)
this.refreshTableData()
this.calcCount()
delete this.weightCache[item.id]
})
}
})
} else {
this.values.splice(index, 1)
this.refreshTableData()
}
}
doUpload(item: OrderItem) {
const r = /^[1-9]\d{0,3}(\.\d{1}){0,1}$/
if (item.weight && r.test(item.weight.toString())) {
item.error = false
if (item.id) {
// update
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_UPDATE, clearNewOrderItem(item)).subscribe(res => {
item.status = '上传完成'
item.id = res.data.id
this.weightCache[item.id] = item.weight.toString()
this.calcCount()
})
} else {
// insert
if (!item.dbStatus) {
const user = this.order.id
if (user) {
item.user = user
item.dbStatus = DbStatus.CREATING
item.status = '数据创建中...'
if (this.defaultCar && this.defaultCar.id) {
item.car = this.defaultCar.id
}
this.http.post<ApiRes<OrderItem>>(API_ORDER_ITEM_INSERT, clearNewOrderItem(item)).subscribe(res => {
item.status = '上传完成'
item.id = res.data.id
this.weightCache[item.id] = item.weight.toString()
item.dbStatus = DbStatus.CREATED
this.calcCount()
})
}
}
}
} else {
item.status = '格式错误'
item.error = true
}
}
calcCount() {
let c = 0
for (const o of this.values) {
if (o.status === '上传完成') {
c += 1
}
}
this.count = c
}
isItemSaved(item: OrderItem) {
return item.status === '上传完成'
}
itemStyle(item: OrderItem) {
if (this.isItemSaved(item)) {
return {
'color': 'green'
}
} else {
return {
'color': 'red'
}
}
}
doAddNewItem(weight: string) {
const orderItemUploadSubject = new Subject<OrderItem>()
orderItemUploadSubject.debounceTime(500).subscribe(item => {
this.doUpload(item)
})
const newItem = { status: '待上传', subject: orderItemUploadSubject, weight: weight }
this.values.push(newItem)
this.refreshTableData()
this.tablePageIndex = Math.ceil(this.values.length / this.tablePageSize)
orderItemUploadSubject.next(newItem)
}
doAddEmptyItem() {
// one item to one suject to reduce conflict
const orderItemUploadSubject = new Subject<OrderItem>()
orderItemUploadSubject.debounceTime(500).subscribe(item => {
this.doUpload(item)
})
this.values.push({ status: '待上传', subject: orderItemUploadSubject })
this.refreshTableData()
}
doCommit() {
this.http.get<ApiRes<{ order: UserOrder, items: OrderItem[] }>>(`${API_USER_ORDER_DETAIL}/${this.order.id}`).subscribe(res => {
const order = res.data.order
const items = res.data.items
const feItems = this.values.filter(item => {
if (item.id) {
return true
} else {
return false
}
})
const warnings = []
if (feItems.length !== items.length) {
warnings.push('数量不一致
|
random_line_split
|
|
app.js
|
1];
last.lesson = info.lesson;
} else if (info.sublesson != "" && info.sublesson != last.sublesson) {
counters[2] = counters[2] + 1;
counters[3] = -1;
obj.sublesson = counters[2];
last.sublesson = info.sublesson;
} else if (info.subsublesson != "" && info.subsublesson != last.subsublesson) {
counters[3] = counters[3] + 1;
obj.subsublesson = counters[3];
last.subsublesson = info.sublesson;
}
if (counters[0] != -1)
obj.part = counters[0];
if (counters[1] != -1)
obj.lesson = counters[1];
if (counters[2] != -1)
obj.sublesson = counters[2];
if (counters[3] != -1)
obj.subsublesson = counters[3];
}
function generateJavascriptTOC (options) {
var s = "define([], function () {\n";
var returnObj = {
toc: [],
markers: []
};
var lastTopLevel = undefined;
if (options.lastDepth) {
for (var i = 0; i < options.lastDepth.length; i++) {
if (options.lastDepth[i] != undefined) {
lastTopLevel = parseInt(options.lastDepth[i]);
break;
}
}
}
for (var i = 0; i < options.toc.length; i++) {
var entry = options.toc[i];
var obj = {depth: entry.depth, short: entry.short, desc: entry.desc, duration: entry.duration};
if (entry.captions) obj.captions = entry.captions;
if (entry.transcript) obj.transcript = entry.transcript;
// add this TOC entry to the search index
var doc = {
"title": entry.desc,
"id": i
};
options.idx.add(doc);
if (options.zipfiles) {
/*
// THEORY: lessons between 1 and n-1 get zipfile links
var lessonNumber = parseInt(entry.lesson);
if (lessonNumber > 0 && lessonNumber < options.lastLesson && (entry.sublesson === "" || entry.sublesson === undefined)) {
var lessondigits = parseInt(entry.lesson);
if (lessondigits < 10) lessondigits = "0" + lessondigits;
obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + lessondigits + ".zip");
}
*/
// NEW THEORY: top-level depths get zipfile links
var depths = entry.depth.split(",");
var count = 0;
var first_level = undefined;
for (var j = 0; j < depths.length; j++) {
if (depths[j] != undefined) {
count++;
if (first_level == undefined) first_level = depths[j];
}
}
if (count == 1) {
var d = parseInt(first_level);
if (d > 0 && d < lastTopLevel) {
if (d < 10) d = "0" + d;
obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + d + ".zip");
}
}
}
if (entry.isVideo) {
obj.video = path.join(options.mediaPath, entry.video.toLowerCase());
} else if (entry.video) {
if (entry.video.toLowerCase().indexOf(".html") != -1) {
obj.src = entry.video;
} else {
obj.video = entry.video;
}
}
if (entry.disabled) {
obj.disabled = true;
}
returnObj.toc.push(obj);
}
returnObj.projectTitle = options.title;
returnObj.bannerDownloadLabel = options.bannerDownloadLabel;
returnObj.bannerDownloadLink = options.bannerDownloadLink ? path.join(options.mediaPath, options.bannerDownloadLink) : undefined;
returnObj.posterImageData = options.posterImageData;
options.tocJS = s + "return " + JSON.stringify(returnObj) + ";\n});";
}
function parseInfoFromText (params) {
var obj = { part: params.part, lesson: params.lesson, sublesson: params.sublesson, subsublesson: params.subsublesson, short: params.short, desc: params.description };
var found = false;
if (!found) {
// look for: "Lesson _: Title" in filename
reg = /^lesson (.*):\s(.*)/i;
res = reg.exec(params.filename);
if (res) {
obj.short = res[1];
found = true;
}
}
if (!found) {
// X.Y Title in description
reg = /^(\d{1,2})\.(\d{1,2})\s(.*)/;
res = reg.exec(params.description);
if (res) {
obj.short = res[1] + "." + res[2];
obj.desc = res[3];
found = true;
}
}
return obj;
}
function streamToString (stream, cb) {
var chunks = [];
stream.on('data', function (chunk) {
chunks.push(chunk);
});
stream.on('end', function () {
cb(chunks.join(''));
});
}
function processTranscript (options) {
var returnDir = options.name + options.timestamp;
var targetDir = "temp/" + returnDir + "/";
// unzip transcript zip
// convert srt to vtt
// associate videos with transcript files
// add transcript (vtt or dbxf) to lunr search index
// zip up vtt, dbxf, and search index
yauzl.open(options.transcript_path, function (err, zipfile) {
if (err) throw err;
zipfile.on("close", function () {
doneWithTranscript(options);
});
zipfile.on("entry", function (entry) {
if (/\/$/.test(entry.fileName)) {
// directory file names end with '/'
return;
}
zipfile.openReadStream(entry, function (err, readStream) {
if (err) throw err;
readStream.setEncoding('utf8');
// process the srt files
if (entry.fileName.indexOf(".srt") != -1) {
// THEORY: find the toc video file that most closely matches this srt file
var tocReference = findTOCReference(options.toc, entry.fileName);
var newFilename = entry.fileName.replace(".srt", ".vtt");
streamToString(readStream, function (s) {
var writePath = path.join(targetDir + "/media/vtt/", newFilename);
var filePath = path.dirname(writePath);
makeAllPaths(filePath);
s = s.replace(/\r/g, "");
var searchableText = "";
var output = "WEBVTT\n\n";
var lines = s.split("\n");
var count = 0;
for (var i = 0; i < lines.length; i++) {
var line = lines[i];
if (line == "") count = 0;
else count++;
if (count == 2) {
// replace commas in timing lines with periods
line = line.replace(/,/g, ".");
// add line position to move the cue up a little (CSS was ineffective)
line += " line:80%";
} else if (count > 2) {
searchableText += line;
}
output += line + "\n";
}
output = output.trim();
fs.writeFileSync(writePath, output, {encoding: "UTF-8", flag: "w"});
if (tocReference) {
var doc = {
"title": tocReference.title,
"body": searchableText,
"id": tocReference.index
};
options.toc[tocReference.index].captions = "media/vtt/" + newFilename;
var transcriptFilename = path.basename(newFilename, path.extname(newFilename)) + ".dfxp";
options.toc[tocReference.index].transcript = "media/transcript/" + transcriptFilename;
options.idx.add(doc);
}
});
} else if (entry.fileName.indexOf(".dfxp") != -1) {
var writePath = path.join(targetDir + "/media/transcript/", entry.fileName);
// ensure parent directory exists
var filePath = path.dirname(writePath);
makeAllPaths(filePath);
// write file
readStream.pipe(fs.createWriteStream(writePath));
}
});
});
});
}
function findTOCReference (toc, filename) {
var file = path.basename(filename, path.extname(filename));
// assuming the transcript file is in this format: 9780789756350-02_04_01.vtt
var dash = file.indexOf("-");
if (dash != -1) {
file = file.substr(dash + 1);
}
if (file)
|
{
for (var i = 0; i < toc.length; i++) {
var entry = toc[i];
if (entry.video && entry.video.indexOf(file) != -1) {
return {
title: entry.desc,
index: i
}
}
}
}
|
conditional_block
|
|
app.js
|
deleteFolderRecursive(curPath);
} else { // delete file
fs.unlinkSync(curPath);
}
});
fs.rmdirSync(path);
}
}
function makeAllPaths (dir)
|
function doConversion (options) {
options.timestamp = Date.now();
options.idx = lunr(function () {
this.field('title');
this.field('body');
});
var input = fs.readFileSync(options.path, "utf8");
var parseOptions = { delimiter: "\t", quote: "" };
parse(input, parseOptions, function(err, output) {
if (!err) {
processData(options, output);
if (options.transcript_path)
processTranscript(options);
else
doneWithTranscript(options);
} else {
console.log("error");
console.log(err);
}
});
}
function processPosterImage (options) {
if (options.posterFile) {
var imageURI = new Datauri(options.posterFile);
options.posterImageData = imageURI.content;
}
}
function processData (options, data) {
var toc = [];
var lastPart = -1, lastLesson = -1, lastSublesson = -1, lastSubsublesson = -1, lastDepth = undefined;
var last = [undefined, undefined, undefined, undefined];
var counters = [-1, -1, -1, -1];
for (var i = 0; i < data.length; i++) {
var row = data[i];
var obj = {};
var parsed = {
part: row[0],
lesson: row[1],
short: row[2],
sublesson: row[3],
subsublesson: row[4],
filename: row[5],
duration: row[6],
isDisabled: row[7]
};
var description = parsed.part;
if (description == "") description = parsed.lesson;
if (description == "") description = parsed.sublesson;
if (description == "") description = parsed.subsublesson;
parsed.description = description;
obj.video = parsed.filename;
var duration = parsed.duration;
if (duration) {
obj.isVideo = true;
obj.duration = duration;
} else {
obj.isVideo = false;
}
var info = parseInfoFromText(parsed);
parseDepthsFromFields(obj, info, last, counters);
obj.short = info.short;
obj.desc = info.desc;
obj.disabled = parsed.isDisabled;
if (obj.desc == "Learning Objectives") {
obj.short = obj.lesson + ".0";
}
var curDepth = [];
curDepth.push(obj.part);
curDepth.push(obj.lesson);
curDepth.push(obj.sublesson);
curDepth.push(obj.subsublesson);
obj.depth = "";
for (var j = 0; j < curDepth.length; j++) {
if (curDepth[j] != -1 && curDepth[j] != undefined) {
if (obj.depth != "") obj.depth += ",";
obj.depth += curDepth[j];
}
}
lastPart = obj.part;
lastLesson = obj.lesson;
lastSublesson = obj.sublesson;
lastSubsublesson = obj.subsublesson;
lastInfoLesson = info.lesson;
lastDepth = curDepth;
toc.push(obj);
}
options.toc = toc;
processPosterImage(options);
options.lastPart = lastPart;
options.lastLesson = lastLesson;
options.lastSublesson = lastSublesson;
options.lastDepth = lastDepth;
}
function parseDepthsFromFields (obj, info, last, counters) {
if (info.part != "" && info.part != last.part) {
counters[0] = counters[0] + 1;
counters[1] = counters[2] = counters[3] = -1;
obj.part = counters[0];
last.part = info.part;
} else if (info.lesson != "" && info.lesson != last.lesson) {
counters[1] = counters[1] + 1;
counters[2] = counters[3] = -1;
obj.lesson = counters[1];
last.lesson = info.lesson;
} else if (info.sublesson != "" && info.sublesson != last.sublesson) {
counters[2] = counters[2] + 1;
counters[3] = -1;
obj.sublesson = counters[2];
last.sublesson = info.sublesson;
} else if (info.subsublesson != "" && info.subsublesson != last.subsublesson) {
counters[3] = counters[3] + 1;
obj.subsublesson = counters[3];
last.subsublesson = info.sublesson;
}
if (counters[0] != -1)
obj.part = counters[0];
if (counters[1] != -1)
obj.lesson = counters[1];
if (counters[2] != -1)
obj.sublesson = counters[2];
if (counters[3] != -1)
obj.subsublesson = counters[3];
}
function generateJavascriptTOC (options) {
var s = "define([], function () {\n";
var returnObj = {
toc: [],
markers: []
};
var lastTopLevel = undefined;
if (options.lastDepth) {
for (var i = 0; i < options.lastDepth.length; i++) {
if (options.lastDepth[i] != undefined) {
lastTopLevel = parseInt(options.lastDepth[i]);
break;
}
}
}
for (var i = 0; i < options.toc.length; i++) {
var entry = options.toc[i];
var obj = {depth: entry.depth, short: entry.short, desc: entry.desc, duration: entry.duration};
if (entry.captions) obj.captions = entry.captions;
if (entry.transcript) obj.transcript = entry.transcript;
// add this TOC entry to the search index
var doc = {
"title": entry.desc,
"id": i
};
options.idx.add(doc);
if (options.zipfiles) {
/*
// THEORY: lessons between 1 and n-1 get zipfile links
var lessonNumber = parseInt(entry.lesson);
if (lessonNumber > 0 && lessonNumber < options.lastLesson && (entry.sublesson === "" || entry.sublesson === undefined)) {
var lessondigits = parseInt(entry.lesson);
if (lessondigits < 10) lessondigits = "0" + lessondigits;
obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + lessondigits + ".zip");
}
*/
// NEW THEORY: top-level depths get zipfile links
var depths = entry.depth.split(",");
var count = 0;
var first_level = undefined;
for (var j = 0; j < depths.length; j++) {
if (depths[j] != undefined) {
count++;
if (first_level == undefined) first_level = depths[j];
}
}
if (count == 1) {
var d = parseInt(first_level);
if (d > 0 && d < lastTopLevel) {
if (d < 10) d = "0" + d;
obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + d + ".zip");
}
}
}
if (entry.isVideo) {
obj.video = path.join(options.mediaPath, entry.video.toLowerCase());
} else if (entry.video) {
if (entry.video.toLowerCase().indexOf(".html") != -1) {
obj.src = entry.video;
} else {
obj.video = entry.video;
}
}
if (entry.disabled) {
obj.disabled = true;
}
returnObj.toc.push(obj);
}
returnObj.projectTitle = options.title;
returnObj.bannerDownloadLabel = options.bannerDownloadLabel;
returnObj.bannerDownloadLink = options.bannerDownloadLink ? path.join(options.mediaPath, options.bannerDownloadLink) : undefined;
returnObj.posterImageData = options.posterImageData;
options.tocJS = s + "return " + JSON.stringify(returnObj) + ";\n});";
}
function parseInfoFromText (params) {
var obj = { part: params.part, lesson: params.lesson, sublesson: params.sublesson, subsublesson: params.subsublesson, short: params.short, desc: params.description };
var found = false;
if (!found) {
// look for: "Lesson _: Title" in filename
reg = /^lesson (.*):\s(.*)/i;
res = reg.exec(params.filename);
if (res) {
obj.short = res[1];
found = true;
}
}
if (!found) {
// X.Y Title in description
reg = /^(\d{1,2})\.(\d{1,2})\s(.*)/;
res = reg.exec(params.description);
if (res)
|
{
var paths = dir.split(path.sep);
var curPath = "";
for (var i = 0; i < paths.length; i++) {
curPath += paths[i] + path.sep;
try {
fs.accessSync(curPath, fs.W_OK);
} catch (err) {
fs.mkdirSync(curPath);
}
}
}
|
identifier_body
|
app.js
|
.lesson;
} else if (info.sublesson != "" && info.sublesson != last.sublesson) {
counters[2] = counters[2] + 1;
counters[3] = -1;
obj.sublesson = counters[2];
last.sublesson = info.sublesson;
} else if (info.subsublesson != "" && info.subsublesson != last.subsublesson) {
counters[3] = counters[3] + 1;
obj.subsublesson = counters[3];
last.subsublesson = info.sublesson;
}
if (counters[0] != -1)
obj.part = counters[0];
if (counters[1] != -1)
obj.lesson = counters[1];
if (counters[2] != -1)
obj.sublesson = counters[2];
if (counters[3] != -1)
obj.subsublesson = counters[3];
}
function generateJavascriptTOC (options) {
var s = "define([], function () {\n";
var returnObj = {
toc: [],
markers: []
};
var lastTopLevel = undefined;
if (options.lastDepth) {
for (var i = 0; i < options.lastDepth.length; i++) {
if (options.lastDepth[i] != undefined) {
lastTopLevel = parseInt(options.lastDepth[i]);
break;
}
}
}
for (var i = 0; i < options.toc.length; i++) {
var entry = options.toc[i];
var obj = {depth: entry.depth, short: entry.short, desc: entry.desc, duration: entry.duration};
if (entry.captions) obj.captions = entry.captions;
if (entry.transcript) obj.transcript = entry.transcript;
// add this TOC entry to the search index
var doc = {
"title": entry.desc,
"id": i
};
options.idx.add(doc);
if (options.zipfiles) {
/*
// THEORY: lessons between 1 and n-1 get zipfile links
var lessonNumber = parseInt(entry.lesson);
if (lessonNumber > 0 && lessonNumber < options.lastLesson && (entry.sublesson === "" || entry.sublesson === undefined)) {
var lessondigits = parseInt(entry.lesson);
if (lessondigits < 10) lessondigits = "0" + lessondigits;
obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + lessondigits + ".zip");
}
*/
// NEW THEORY: top-level depths get zipfile links
var depths = entry.depth.split(",");
var count = 0;
var first_level = undefined;
for (var j = 0; j < depths.length; j++) {
if (depths[j] != undefined) {
count++;
if (first_level == undefined) first_level = depths[j];
}
}
if (count == 1) {
var d = parseInt(first_level);
if (d > 0 && d < lastTopLevel) {
if (d < 10) d = "0" + d;
obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + d + ".zip");
}
}
}
if (entry.isVideo) {
obj.video = path.join(options.mediaPath, entry.video.toLowerCase());
} else if (entry.video) {
if (entry.video.toLowerCase().indexOf(".html") != -1) {
obj.src = entry.video;
} else {
obj.video = entry.video;
}
}
if (entry.disabled) {
obj.disabled = true;
}
returnObj.toc.push(obj);
}
returnObj.projectTitle = options.title;
returnObj.bannerDownloadLabel = options.bannerDownloadLabel;
returnObj.bannerDownloadLink = options.bannerDownloadLink ? path.join(options.mediaPath, options.bannerDownloadLink) : undefined;
returnObj.posterImageData = options.posterImageData;
options.tocJS = s + "return " + JSON.stringify(returnObj) + ";\n});";
}
function parseInfoFromText (params) {
var obj = { part: params.part, lesson: params.lesson, sublesson: params.sublesson, subsublesson: params.subsublesson, short: params.short, desc: params.description };
var found = false;
if (!found) {
// look for: "Lesson _: Title" in filename
reg = /^lesson (.*):\s(.*)/i;
res = reg.exec(params.filename);
if (res) {
obj.short = res[1];
found = true;
}
}
if (!found) {
// X.Y Title in description
reg = /^(\d{1,2})\.(\d{1,2})\s(.*)/;
res = reg.exec(params.description);
if (res) {
obj.short = res[1] + "." + res[2];
obj.desc = res[3];
found = true;
}
}
return obj;
}
function streamToString (stream, cb) {
var chunks = [];
stream.on('data', function (chunk) {
chunks.push(chunk);
});
stream.on('end', function () {
cb(chunks.join(''));
});
}
function processTranscript (options) {
var returnDir = options.name + options.timestamp;
var targetDir = "temp/" + returnDir + "/";
// unzip transcript zip
// convert srt to vtt
// associate videos with transcript files
// add transcript (vtt or dbxf) to lunr search index
// zip up vtt, dbxf, and search index
yauzl.open(options.transcript_path, function (err, zipfile) {
if (err) throw err;
zipfile.on("close", function () {
doneWithTranscript(options);
});
zipfile.on("entry", function (entry) {
if (/\/$/.test(entry.fileName)) {
// directory file names end with '/'
return;
}
zipfile.openReadStream(entry, function (err, readStream) {
if (err) throw err;
readStream.setEncoding('utf8');
// process the srt files
if (entry.fileName.indexOf(".srt") != -1) {
// THEORY: find the toc video file that most closely matches this srt file
var tocReference = findTOCReference(options.toc, entry.fileName);
var newFilename = entry.fileName.replace(".srt", ".vtt");
streamToString(readStream, function (s) {
var writePath = path.join(targetDir + "/media/vtt/", newFilename);
var filePath = path.dirname(writePath);
makeAllPaths(filePath);
s = s.replace(/\r/g, "");
var searchableText = "";
var output = "WEBVTT\n\n";
var lines = s.split("\n");
var count = 0;
for (var i = 0; i < lines.length; i++) {
var line = lines[i];
if (line == "") count = 0;
else count++;
if (count == 2) {
// replace commas in timing lines with periods
line = line.replace(/,/g, ".");
// add line position to move the cue up a little (CSS was ineffective)
line += " line:80%";
} else if (count > 2) {
searchableText += line;
}
output += line + "\n";
}
output = output.trim();
fs.writeFileSync(writePath, output, {encoding: "UTF-8", flag: "w"});
if (tocReference) {
var doc = {
"title": tocReference.title,
"body": searchableText,
"id": tocReference.index
};
options.toc[tocReference.index].captions = "media/vtt/" + newFilename;
var transcriptFilename = path.basename(newFilename, path.extname(newFilename)) + ".dfxp";
options.toc[tocReference.index].transcript = "media/transcript/" + transcriptFilename;
options.idx.add(doc);
}
});
} else if (entry.fileName.indexOf(".dfxp") != -1) {
var writePath = path.join(targetDir + "/media/transcript/", entry.fileName);
// ensure parent directory exists
var filePath = path.dirname(writePath);
makeAllPaths(filePath);
// write file
readStream.pipe(fs.createWriteStream(writePath));
}
});
});
});
}
function findTOCReference (toc, filename) {
var file = path.basename(filename, path.extname(filename));
// assuming the transcript file is in this format: 9780789756350-02_04_01.vtt
var dash = file.indexOf("-");
if (dash != -1) {
file = file.substr(dash + 1);
}
if (file) {
for (var i = 0; i < toc.length; i++) {
var entry = toc[i];
if (entry.video && entry.video.indexOf(file) != -1) {
return {
title: entry.desc,
index: i
}
}
}
}
return undefined;
}
function
|
includeSearch
|
identifier_name
|
|
app.js
|
j++) {
if (curDepth[j] != -1 && curDepth[j] != undefined) {
if (obj.depth != "") obj.depth += ",";
obj.depth += curDepth[j];
}
}
lastPart = obj.part;
lastLesson = obj.lesson;
lastSublesson = obj.sublesson;
lastSubsublesson = obj.subsublesson;
lastInfoLesson = info.lesson;
lastDepth = curDepth;
toc.push(obj);
}
options.toc = toc;
processPosterImage(options);
options.lastPart = lastPart;
options.lastLesson = lastLesson;
options.lastSublesson = lastSublesson;
options.lastDepth = lastDepth;
}
function parseDepthsFromFields (obj, info, last, counters) {
if (info.part != "" && info.part != last.part) {
counters[0] = counters[0] + 1;
counters[1] = counters[2] = counters[3] = -1;
obj.part = counters[0];
last.part = info.part;
} else if (info.lesson != "" && info.lesson != last.lesson) {
counters[1] = counters[1] + 1;
counters[2] = counters[3] = -1;
obj.lesson = counters[1];
last.lesson = info.lesson;
} else if (info.sublesson != "" && info.sublesson != last.sublesson) {
counters[2] = counters[2] + 1;
counters[3] = -1;
obj.sublesson = counters[2];
last.sublesson = info.sublesson;
} else if (info.subsublesson != "" && info.subsublesson != last.subsublesson) {
counters[3] = counters[3] + 1;
obj.subsublesson = counters[3];
last.subsublesson = info.sublesson;
}
if (counters[0] != -1)
obj.part = counters[0];
if (counters[1] != -1)
obj.lesson = counters[1];
if (counters[2] != -1)
obj.sublesson = counters[2];
if (counters[3] != -1)
obj.subsublesson = counters[3];
}
function generateJavascriptTOC (options) {
var s = "define([], function () {\n";
var returnObj = {
toc: [],
markers: []
};
var lastTopLevel = undefined;
if (options.lastDepth) {
for (var i = 0; i < options.lastDepth.length; i++) {
if (options.lastDepth[i] != undefined) {
lastTopLevel = parseInt(options.lastDepth[i]);
break;
}
}
}
for (var i = 0; i < options.toc.length; i++) {
var entry = options.toc[i];
var obj = {depth: entry.depth, short: entry.short, desc: entry.desc, duration: entry.duration};
if (entry.captions) obj.captions = entry.captions;
if (entry.transcript) obj.transcript = entry.transcript;
// add this TOC entry to the search index
var doc = {
"title": entry.desc,
"id": i
};
options.idx.add(doc);
if (options.zipfiles) {
/*
// THEORY: lessons between 1 and n-1 get zipfile links
var lessonNumber = parseInt(entry.lesson);
if (lessonNumber > 0 && lessonNumber < options.lastLesson && (entry.sublesson === "" || entry.sublesson === undefined)) {
var lessondigits = parseInt(entry.lesson);
if (lessondigits < 10) lessondigits = "0" + lessondigits;
obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + lessondigits + ".zip");
}
*/
// NEW THEORY: top-level depths get zipfile links
var depths = entry.depth.split(",");
var count = 0;
var first_level = undefined;
for (var j = 0; j < depths.length; j++) {
if (depths[j] != undefined) {
count++;
if (first_level == undefined) first_level = depths[j];
}
}
if (count == 1) {
var d = parseInt(first_level);
if (d > 0 && d < lastTopLevel) {
if (d < 10) d = "0" + d;
obj.download = path.join(options.mediaPath, options.isbn + "-lesson_" + d + ".zip");
}
}
}
if (entry.isVideo) {
obj.video = path.join(options.mediaPath, entry.video.toLowerCase());
} else if (entry.video) {
if (entry.video.toLowerCase().indexOf(".html") != -1) {
obj.src = entry.video;
} else {
obj.video = entry.video;
}
}
if (entry.disabled) {
obj.disabled = true;
}
returnObj.toc.push(obj);
}
returnObj.projectTitle = options.title;
returnObj.bannerDownloadLabel = options.bannerDownloadLabel;
returnObj.bannerDownloadLink = options.bannerDownloadLink ? path.join(options.mediaPath, options.bannerDownloadLink) : undefined;
returnObj.posterImageData = options.posterImageData;
options.tocJS = s + "return " + JSON.stringify(returnObj) + ";\n});";
}
function parseInfoFromText (params) {
var obj = { part: params.part, lesson: params.lesson, sublesson: params.sublesson, subsublesson: params.subsublesson, short: params.short, desc: params.description };
var found = false;
if (!found) {
// look for: "Lesson _: Title" in filename
reg = /^lesson (.*):\s(.*)/i;
res = reg.exec(params.filename);
if (res) {
obj.short = res[1];
found = true;
}
}
if (!found) {
// X.Y Title in description
reg = /^(\d{1,2})\.(\d{1,2})\s(.*)/;
res = reg.exec(params.description);
if (res) {
obj.short = res[1] + "." + res[2];
obj.desc = res[3];
found = true;
}
}
return obj;
}
function streamToString (stream, cb) {
var chunks = [];
stream.on('data', function (chunk) {
chunks.push(chunk);
});
stream.on('end', function () {
cb(chunks.join(''));
});
}
function processTranscript (options) {
var returnDir = options.name + options.timestamp;
var targetDir = "temp/" + returnDir + "/";
// unzip transcript zip
// convert srt to vtt
// associate videos with transcript files
// add transcript (vtt or dbxf) to lunr search index
// zip up vtt, dbxf, and search index
yauzl.open(options.transcript_path, function (err, zipfile) {
if (err) throw err;
zipfile.on("close", function () {
doneWithTranscript(options);
});
zipfile.on("entry", function (entry) {
if (/\/$/.test(entry.fileName)) {
// directory file names end with '/'
return;
}
zipfile.openReadStream(entry, function (err, readStream) {
if (err) throw err;
readStream.setEncoding('utf8');
// process the srt files
if (entry.fileName.indexOf(".srt") != -1) {
// THEORY: find the toc video file that most closely matches this srt file
var tocReference = findTOCReference(options.toc, entry.fileName);
var newFilename = entry.fileName.replace(".srt", ".vtt");
streamToString(readStream, function (s) {
var writePath = path.join(targetDir + "/media/vtt/", newFilename);
var filePath = path.dirname(writePath);
makeAllPaths(filePath);
s = s.replace(/\r/g, "");
var searchableText = "";
var output = "WEBVTT\n\n";
var lines = s.split("\n");
var count = 0;
for (var i = 0; i < lines.length; i++) {
var line = lines[i];
if (line == "") count = 0;
else count++;
if (count == 2) {
// replace commas in timing lines with periods
line = line.replace(/,/g, ".");
// add line position to move the cue up a little (CSS was ineffective)
line += " line:80%";
} else if (count > 2) {
searchableText += line;
}
output += line + "\n";
}
output = output.trim();
fs.writeFileSync(writePath, output, {encoding: "UTF-8", flag: "w"});
if (tocReference) {
var doc = {
"title": tocReference.title,
"body": searchableText,
"id": tocReference.index
};
options.toc[tocReference.index].captions = "media/vtt/" + newFilename;
|
var transcriptFilename = path.basename(newFilename, path.extname(newFilename)) + ".dfxp";
|
random_line_split
|
|
retransmit_stage.rs
|
)
.num_peers();
let stats = std::mem::replace(
self,
Self {
since: Some(Instant::now()),
..Self::default()
},
);
datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64));
datapoint_info!(
"retransmit-stage",
("total_time", stats.total_time, i64),
("epoch_fetch", stats.epoch_fetch, i64),
("epoch_cache_update", stats.epoch_cache_update, i64),
("total_batches", stats.total_batches, i64),
("num_nodes", stats.num_nodes.into_inner(), i64),
("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64),
("num_shreds", stats.num_shreds, i64),
(
"num_shreds_skipped",
stats.num_shreds_skipped.into_inner(),
i64
),
("retransmit_total", stats.retransmit_total.into_inner(), i64),
(
"compute_turbine",
stats.compute_turbine_peers_total.into_inner(),
i64
),
(
"unknown_shred_slot_leader",
stats.unknown_shred_slot_leader.into_inner(),
i64
),
);
for (slot, stats) in stats.slot_stats {
datapoint_info!(
"retransmit-stage-slot-stats",
("slot", slot, i64),
("num_shreds", stats.num_shreds, i64),
("num_nodes", stats.num_nodes, i64),
);
}
}
}
// Map of shred (slot, index, type) => list of hash values seen for that key.
type ShredFilter = LruCache<ShredId, Vec<u64>>;
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns true if shred is already received and should skip retransmit.
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
let key = shred.id();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&key) {
Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true,
Some(sent) => {
let hash = hasher.hash_shred(shred);
if sent.contains(&hash) {
true
} else {
sent.push(hash);
false
}
}
None => {
let hash = hasher.hash_shred(shred);
cache.put(key, vec![hash]);
false
}
}
}
// Returns true if this is the first time receiving a shred for `shred_slot`.
fn check_if_first_shred_received(
shred_slot: Slot,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
root_bank: &Bank,
) -> bool {
if shred_slot <= root_bank.slot() {
return false;
}
let mut first_shreds_received_locked = first_shreds_received.lock().unwrap();
if first_shreds_received_locked.insert(shred_slot) {
datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64));
if first_shreds_received_locked.len() > 100
|
true
} else {
false
}
}
fn maybe_reset_shreds_received_cache(
shreds_received: &Mutex<ShredFilterAndHasher>,
hasher_reset_ts: &mut Instant,
) {
const UPDATE_INTERVAL: Duration = Duration::from_secs(1);
if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL {
*hasher_reset_ts = Instant::now();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
cache.clear();
hasher.reset();
}
}
#[allow(clippy::too_many_arguments)]
fn retransmit(
thread_pool: &ThreadPool,
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &LeaderScheduleCache,
cluster_info: &ClusterInfo,
shreds_receiver: &Receiver<Vec<Shred>>,
sockets: &[UdpSocket],
stats: &mut RetransmitStats,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
hasher_reset_ts: &mut Instant,
shreds_received: &Mutex<ShredFilterAndHasher>,
max_slots: &MaxSlots,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
rpc_subscriptions: Option<&RpcSubscriptions>,
) -> Result<(), RecvTimeoutError> {
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?;
let mut timer_start = Measure::start("retransmit");
shreds.extend(shreds_receiver.try_iter().flatten());
stats.num_shreds += shreds.len();
stats.total_batches += 1;
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let (working_bank, root_bank) = {
let bank_forks = bank_forks.read().unwrap();
(bank_forks.working_bank(), bank_forks.root_bank())
};
epoch_fetch.stop();
stats.epoch_fetch += epoch_fetch.as_us();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update");
maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts);
epoch_cache_update.stop();
stats.epoch_cache_update += epoch_cache_update.as_us();
let socket_addr_space = cluster_info.socket_addr_space();
let retransmit_shred = |shred: &Shred, socket: &UdpSocket| {
if should_skip_retransmit(shred, shreds_received) {
stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed);
return 0;
}
let shred_slot = shred.slot();
max_slots
.retransmit
.fetch_max(shred_slot, Ordering::Relaxed);
if let Some(rpc_subscriptions) = rpc_subscriptions {
if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) {
rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived {
slot: shred_slot,
timestamp: timestamp(),
});
}
}
let mut compute_turbine_peers = Measure::start("turbine_start");
// TODO: consider using root-bank here for leader lookup!
// Shreds' signatures should be verified before they reach here, and if
// the leader is unknown they should fail signature check. So here we
// should expect to know the slot leader and otherwise skip the shred.
let slot_leader =
match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) {
Some(pubkey) => pubkey,
None => {
stats
.unknown_shred_slot_leader
.fetch_add(1, Ordering::Relaxed);
return 0;
}
};
let cluster_nodes =
cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info);
let addrs: Vec<_> = cluster_nodes
.get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT)
.into_iter()
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
.collect();
compute_turbine_peers.stop();
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed);
let mut retransmit_time = Measure::start("retransmit_to");
let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) {
Ok(()) => addrs.len(),
Err(SendPktsError::IoError(ioerr, num_failed)) => {
stats
.num_addrs_failed
.fetch_add(num_failed, Ordering::Relaxed);
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
addrs.len(),
);
addrs.len() - num_failed
}
};
retransmit_time.stop();
stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_time.as_us(), Ordering::Relaxed);
num_nodes
};
fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V>
where
K: Eq + std::hash::Hash,
V: Default + AddAssign,
{
if acc.len() < other.len() {
return merge(other, acc);
}
for (key, value) in other {
*acc.entry(key).or_default() += value;
}
acc
}
let slot_stats = thread_pool.install(|| {
shreds
.into_par_iter()
.with_min_len(4)
.map(|shred| {
let index = thread_pool.current_thread_index().unwrap();
let socket = &sockets[index % sockets.len()];
|
{
*first_shreds_received_locked =
first_shreds_received_locked.split_off(&(root_bank.slot() + 1));
}
|
conditional_block
|
retransmit_stage.rs
|
_info)
.num_peers();
let stats = std::mem::replace(
self,
Self {
since: Some(Instant::now()),
..Self::default()
},
);
datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64));
datapoint_info!(
"retransmit-stage",
("total_time", stats.total_time, i64),
("epoch_fetch", stats.epoch_fetch, i64),
("epoch_cache_update", stats.epoch_cache_update, i64),
("total_batches", stats.total_batches, i64),
("num_nodes", stats.num_nodes.into_inner(), i64),
("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64),
("num_shreds", stats.num_shreds, i64),
(
"num_shreds_skipped",
stats.num_shreds_skipped.into_inner(),
i64
),
("retransmit_total", stats.retransmit_total.into_inner(), i64),
(
"compute_turbine",
stats.compute_turbine_peers_total.into_inner(),
i64
),
(
"unknown_shred_slot_leader",
stats.unknown_shred_slot_leader.into_inner(),
i64
),
);
for (slot, stats) in stats.slot_stats {
datapoint_info!(
"retransmit-stage-slot-stats",
("slot", slot, i64),
("num_shreds", stats.num_shreds, i64),
("num_nodes", stats.num_nodes, i64),
);
}
}
}
// Map of shred (slot, index, type) => list of hash values seen for that key.
type ShredFilter = LruCache<ShredId, Vec<u64>>;
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns true if shred is already received and should skip retransmit.
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
let key = shred.id();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&key) {
Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true,
Some(sent) => {
let hash = hasher.hash_shred(shred);
if sent.contains(&hash) {
true
} else {
sent.push(hash);
false
}
}
None => {
let hash = hasher.hash_shred(shred);
cache.put(key, vec![hash]);
false
}
}
}
// Returns true if this is the first time receiving a shred for `shred_slot`.
fn
|
(
shred_slot: Slot,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
root_bank: &Bank,
) -> bool {
if shred_slot <= root_bank.slot() {
return false;
}
let mut first_shreds_received_locked = first_shreds_received.lock().unwrap();
if first_shreds_received_locked.insert(shred_slot) {
datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64));
if first_shreds_received_locked.len() > 100 {
*first_shreds_received_locked =
first_shreds_received_locked.split_off(&(root_bank.slot() + 1));
}
true
} else {
false
}
}
fn maybe_reset_shreds_received_cache(
shreds_received: &Mutex<ShredFilterAndHasher>,
hasher_reset_ts: &mut Instant,
) {
const UPDATE_INTERVAL: Duration = Duration::from_secs(1);
if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL {
*hasher_reset_ts = Instant::now();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
cache.clear();
hasher.reset();
}
}
#[allow(clippy::too_many_arguments)]
fn retransmit(
thread_pool: &ThreadPool,
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &LeaderScheduleCache,
cluster_info: &ClusterInfo,
shreds_receiver: &Receiver<Vec<Shred>>,
sockets: &[UdpSocket],
stats: &mut RetransmitStats,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
hasher_reset_ts: &mut Instant,
shreds_received: &Mutex<ShredFilterAndHasher>,
max_slots: &MaxSlots,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
rpc_subscriptions: Option<&RpcSubscriptions>,
) -> Result<(), RecvTimeoutError> {
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?;
let mut timer_start = Measure::start("retransmit");
shreds.extend(shreds_receiver.try_iter().flatten());
stats.num_shreds += shreds.len();
stats.total_batches += 1;
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let (working_bank, root_bank) = {
let bank_forks = bank_forks.read().unwrap();
(bank_forks.working_bank(), bank_forks.root_bank())
};
epoch_fetch.stop();
stats.epoch_fetch += epoch_fetch.as_us();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update");
maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts);
epoch_cache_update.stop();
stats.epoch_cache_update += epoch_cache_update.as_us();
let socket_addr_space = cluster_info.socket_addr_space();
let retransmit_shred = |shred: &Shred, socket: &UdpSocket| {
if should_skip_retransmit(shred, shreds_received) {
stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed);
return 0;
}
let shred_slot = shred.slot();
max_slots
.retransmit
.fetch_max(shred_slot, Ordering::Relaxed);
if let Some(rpc_subscriptions) = rpc_subscriptions {
if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) {
rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived {
slot: shred_slot,
timestamp: timestamp(),
});
}
}
let mut compute_turbine_peers = Measure::start("turbine_start");
// TODO: consider using root-bank here for leader lookup!
// Shreds' signatures should be verified before they reach here, and if
// the leader is unknown they should fail signature check. So here we
// should expect to know the slot leader and otherwise skip the shred.
let slot_leader =
match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) {
Some(pubkey) => pubkey,
None => {
stats
.unknown_shred_slot_leader
.fetch_add(1, Ordering::Relaxed);
return 0;
}
};
let cluster_nodes =
cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info);
let addrs: Vec<_> = cluster_nodes
.get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT)
.into_iter()
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
.collect();
compute_turbine_peers.stop();
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed);
let mut retransmit_time = Measure::start("retransmit_to");
let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) {
Ok(()) => addrs.len(),
Err(SendPktsError::IoError(ioerr, num_failed)) => {
stats
.num_addrs_failed
.fetch_add(num_failed, Ordering::Relaxed);
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
addrs.len(),
);
addrs.len() - num_failed
}
};
retransmit_time.stop();
stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_time.as_us(), Ordering::Relaxed);
num_nodes
};
fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V>
where
K: Eq + std::hash::Hash,
V: Default + AddAssign,
{
if acc.len() < other.len() {
return merge(other, acc);
}
for (key, value) in other {
*acc.entry(key).or_default() += value;
}
acc
}
let slot_stats = thread_pool.install(|| {
shreds
.into_par_iter()
.with_min_len(4)
.map(|shred| {
let index = thread_pool.current_thread_index().unwrap();
let socket = &sockets[index % sockets.len()];
|
check_if_first_shred_received
|
identifier_name
|
retransmit_stage.rs
|
_info)
.num_peers();
let stats = std::mem::replace(
self,
Self {
since: Some(Instant::now()),
..Self::default()
},
);
datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64));
datapoint_info!(
"retransmit-stage",
("total_time", stats.total_time, i64),
("epoch_fetch", stats.epoch_fetch, i64),
("epoch_cache_update", stats.epoch_cache_update, i64),
("total_batches", stats.total_batches, i64),
("num_nodes", stats.num_nodes.into_inner(), i64),
("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64),
("num_shreds", stats.num_shreds, i64),
(
"num_shreds_skipped",
stats.num_shreds_skipped.into_inner(),
i64
),
("retransmit_total", stats.retransmit_total.into_inner(), i64),
(
"compute_turbine",
stats.compute_turbine_peers_total.into_inner(),
i64
),
(
"unknown_shred_slot_leader",
stats.unknown_shred_slot_leader.into_inner(),
i64
),
);
for (slot, stats) in stats.slot_stats {
datapoint_info!(
"retransmit-stage-slot-stats",
("slot", slot, i64),
("num_shreds", stats.num_shreds, i64),
("num_nodes", stats.num_nodes, i64),
);
}
}
}
// Map of shred (slot, index, type) => list of hash values seen for that key.
type ShredFilter = LruCache<ShredId, Vec<u64>>;
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns true if shred is already received and should skip retransmit.
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
let key = shred.id();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&key) {
Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true,
Some(sent) => {
let hash = hasher.hash_shred(shred);
if sent.contains(&hash) {
true
} else {
sent.push(hash);
false
}
}
None => {
let hash = hasher.hash_shred(shred);
cache.put(key, vec![hash]);
false
}
}
}
// Returns true if this is the first time receiving a shred for `shred_slot`.
fn check_if_first_shred_received(
shred_slot: Slot,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
root_bank: &Bank,
) -> bool {
if shred_slot <= root_bank.slot() {
return false;
}
let mut first_shreds_received_locked = first_shreds_received.lock().unwrap();
if first_shreds_received_locked.insert(shred_slot) {
datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64));
if first_shreds_received_locked.len() > 100 {
*first_shreds_received_locked =
first_shreds_received_locked.split_off(&(root_bank.slot() + 1));
}
true
} else {
false
}
}
fn maybe_reset_shreds_received_cache(
shreds_received: &Mutex<ShredFilterAndHasher>,
hasher_reset_ts: &mut Instant,
) {
const UPDATE_INTERVAL: Duration = Duration::from_secs(1);
if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL {
*hasher_reset_ts = Instant::now();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
cache.clear();
hasher.reset();
}
}
#[allow(clippy::too_many_arguments)]
fn retransmit(
|
sockets: &[UdpSocket],
stats: &mut RetransmitStats,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
hasher_reset_ts: &mut Instant,
shreds_received: &Mutex<ShredFilterAndHasher>,
max_slots: &MaxSlots,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
rpc_subscriptions: Option<&RpcSubscriptions>,
) -> Result<(), RecvTimeoutError> {
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?;
let mut timer_start = Measure::start("retransmit");
shreds.extend(shreds_receiver.try_iter().flatten());
stats.num_shreds += shreds.len();
stats.total_batches += 1;
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let (working_bank, root_bank) = {
let bank_forks = bank_forks.read().unwrap();
(bank_forks.working_bank(), bank_forks.root_bank())
};
epoch_fetch.stop();
stats.epoch_fetch += epoch_fetch.as_us();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update");
maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts);
epoch_cache_update.stop();
stats.epoch_cache_update += epoch_cache_update.as_us();
let socket_addr_space = cluster_info.socket_addr_space();
let retransmit_shred = |shred: &Shred, socket: &UdpSocket| {
if should_skip_retransmit(shred, shreds_received) {
stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed);
return 0;
}
let shred_slot = shred.slot();
max_slots
.retransmit
.fetch_max(shred_slot, Ordering::Relaxed);
if let Some(rpc_subscriptions) = rpc_subscriptions {
if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) {
rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived {
slot: shred_slot,
timestamp: timestamp(),
});
}
}
let mut compute_turbine_peers = Measure::start("turbine_start");
// TODO: consider using root-bank here for leader lookup!
// Shreds' signatures should be verified before they reach here, and if
// the leader is unknown they should fail signature check. So here we
// should expect to know the slot leader and otherwise skip the shred.
let slot_leader =
match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) {
Some(pubkey) => pubkey,
None => {
stats
.unknown_shred_slot_leader
.fetch_add(1, Ordering::Relaxed);
return 0;
}
};
let cluster_nodes =
cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info);
let addrs: Vec<_> = cluster_nodes
.get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT)
.into_iter()
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
.collect();
compute_turbine_peers.stop();
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed);
let mut retransmit_time = Measure::start("retransmit_to");
let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) {
Ok(()) => addrs.len(),
Err(SendPktsError::IoError(ioerr, num_failed)) => {
stats
.num_addrs_failed
.fetch_add(num_failed, Ordering::Relaxed);
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
addrs.len(),
);
addrs.len() - num_failed
}
};
retransmit_time.stop();
stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_time.as_us(), Ordering::Relaxed);
num_nodes
};
fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V>
where
K: Eq + std::hash::Hash,
V: Default + AddAssign,
{
if acc.len() < other.len() {
return merge(other, acc);
}
for (key, value) in other {
*acc.entry(key).or_default() += value;
}
acc
}
let slot_stats = thread_pool.install(|| {
shreds
.into_par_iter()
.with_min_len(4)
.map(|shred| {
let index = thread_pool.current_thread_index().unwrap();
let socket = &sockets[index % sockets.len()];
|
thread_pool: &ThreadPool,
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &LeaderScheduleCache,
cluster_info: &ClusterInfo,
shreds_receiver: &Receiver<Vec<Shred>>,
|
random_line_split
|
eng.ts
|
an issue',
edit: 'Purpose an edit'
},
form: {
title: 'Report an issue',
subtitle: 'We do our best to have a high-quality database, but sometimes some issues are there. Thanks for help!',
type: {
label: 'Problem type',
time: 'Lyrics not synchronised',
quality: 'Low quality video'
},
comment: {
label: 'Comment',
placeholder: 'After 2 minutes, the lyrics is not synchronised'
},
username: {
label: 'Your name',
placeholder: 'IAmMeticulous'
},
submit: 'Submit',
thanks: {
text: 'Thanks! We will address this issue as soon as possible: {url}',
btn: 'Close'
}
}
},
import: {
description: 'This form allows you to submit a karaoke to the Karaoke Mugen team. It will not be immediately integrated in the karaoke database because it requires a validation. Please be patient. Your karaoke may be modified if it doesn\'t comply to KM\'s rules.',
attention: 'ATTENTION:',
check_in_progress: 'Please check the list of karaokes currently being made before sending us a song. This\'ll avoid duplicate work, and the world will thus be a better place.',
documentation_link: 'Documentation',
in_progress_link: 'Karaokes In Progress List',
license_reminder: 'Your karaoke will be published with the {name} license',
license_link: 'Learn more about this license by clicking here.',
add: 'Add',
create: 'Create',
choose_file: 'Choose a file',
add_file_media_error: '{name} is not a media file',
add_file_lyrics_error: '{name} is not a subtitle file',
add_file_success: '{name} file added successfully',
comment: 'Leave a comment?',
comment_edit: 'If you\'re submitting an edit, tell us who you are here!',
comment_tooltip: 'If you want to add a message for the integrators or just say thanks, say it here!',
submit: 'Send karaoke',
media_file: 'Media file',
media_file_required: 'Media file is mandatory',
media_file_tooltip: 'Supported file formats: {formats}',
lyrics_file: 'Lyrics file',
lyrics_file_tooltip: 'Supported file formats: {formats}',
title: 'Song title',
title_required: 'Please enter a song title',
title_tooltip: 'If you don\'t know, put the name of the series here as well. In the case of an alternative version, name your title as: \'My title ~ Disco vers.\' for example',
series_tooltip: 'TV series, movie title, video game title, etc.',
series_singers_required: 'Series or singers cannot be empty in the same time.',
songtypes_required: 'Song type is mandatory',
songorder: 'Song order',
songorder_invalid: 'Song order is invalid',
songorder_tooltip: 'Opening/Ending number. If this is the only opening/ending in the series, leave blank.',
langs_required: 'Please choose a language',
year: 'Broadcast year',
year_tooltip: 'Year when the series was broadcasted or the video was produced',
year_required: 'Broadcast year is mandatory',
year_invalid: 'Broadcast year is invalid',
songwriters_tooltip: 'Songwriters compose lyrics AND music.',
creators_tooltip: 'Entity that created the series. Can be animation studio, movie studio, or game studio',
authors_tooltip: 'You should add yourself here ;)',
authors_required: 'Author of the kara is mandatory',
groups_tooltip: 'Download groups for this song. The song will be included in these download packs',
created_at: 'Creation date',
modified_at: 'Last updated date',
add_success: 'Your karaoke has been successfully sent!',
add_success_description: 'An issue has been created on our tracker. You can check its progression at {url}',
add_error: 'An error has occurred, karaoke has not been sent properly',
restart: 'Submit new karaoke'
},
stats: {
favorited: 'Added to favorites by {number} users',
requested: 'Requested {number} times',
played: 'Played {number} times'
}
},
layout: {
loading: 'Loading...',
suggest: 'Can\'t find what you\'re looking for?',
suggest_open: 'Suggest us!',
empty: 'This is the end of your favorites.',
explore: 'Go add some!',
results: '{count} result | {count} results',
slogan: 'This song is available on the Karaoke Mugen songbase!'
},
footer: {
home: 'Project home',
software_under_license: 'Software under license',
base_under_licence: 'Karaoke base under license'
},
stats: {
karaokes: 'Karaoké | Karaokés',
all_duration: 'Duration of all karas',
last_generation: 'Last update',
media_size: 'Media Size'
},
home: {
noInstance: {
title: 'No Karaoke Mugen instance runs on your local network.',
1: 'Double check you\'re logged in to the same WiFi network as the server. ',
2: 'Make sure the Karaoke Mugen application is running. Please check that the kara.moe setting is enabled in Options -> Karaoke -> Short URL (kara.moe).',
3: 'If you just want to explore the base, you can safely ignore this message.'
}
},
duration: {
days: 'days',
hours: 'hours',
minutes: 'minutes',
seconds: 'seconds'
},
menu: {
add_repository: 'Add this repository to your app',
database: 'Database',
karas: 'Songs',
songtypes: 'Types',
tags: 'Tags',
miscs: 'Miscs',
groups: 'Groups',
families: 'Families',
origins: 'Origins',
genres: 'Genres',
platforms: 'Platforms',
singers: 'Singers',
series: 'Series',
songwriters: 'Songwriters',
creators: 'Creators',
authors: 'Authors',
languages: 'Languages',
years: 'Years',
community: 'Community',
kara_import: 'Submit a kara',
account: 'Account',
favorites: 'Favorites',
login: 'Login',
logout: 'Logout',
register: 'Register',
connection: 'Login',
profile: 'Profile',
switch_language: 'Switch language'
},
search: {
placeholder: 'Series, singers, names...',
sort: {
a_z: 'De A à Z',
kara_count: 'Kara count',
recent: 'Recent',
most_played: 'Most played',
most_favorites: 'Plus favoris',
most_requested: 'Plus demandés'
},
next: 'Next page',
previous: 'Previous page',
aria: {
goto: 'Go to page {0}',
page: 'Page {0}',
sort: 'Sort by'
}
},
modal: {
login: {
title: 'Login',
subtitle: 'Login to view your favorites and edit your profile!',
fields: {
username: {
label: 'Username',
placeholder: 'LoveLiveFan93'
},
password: {
label: 'Password',
placeholder: 'ActuallyIdolM@sterIsBetter'
},
forgot_password: {
label: 'Forgot password?',
error: 'Could not reset your password: contact the the server\'s administrator your account belongs to.',
success: 'An email has been sent with a link to reset your password.'
}
},
submit: 'Login'
},
signup: {
title: 'Signup',
subtitle: 'Signup to view your favorites and edit your profile!',
fields: {
username: {
label: 'Username',
placeholder: 'LoveLiveFan93'
},
password: {
label: 'Password',
placeholder: 'ActuallyIdolM@sterIsBetter'
},
password_confirmation: {
label: 'Password Confirmation',
placeholder: 'ActuallyIdolM@sterIsBetter'
},
email: {
label: 'Email',
placeholder: 'test@shelter.moe'
}
},
passwords_mismatch: 'Passwords do not match',
submit: 'Signup'
},
profile: {
title: 'Edit profile',
fields: {
username: {
label: 'Username'
},
|
placeholder: 'LoveLiveFan93'
},
password: {
header: 'Change password',
label: 'Password',
placeholder: 'EnVraiJePréfèreIdolM@ster'
|
nickname: {
label: 'Nickname',
|
random_line_split
|
quadstore.go
|
call Ping."
// Source: http://golang.org/pkg/database/sql/#Open
if err := conn.Ping(); err != nil {
clog.Errorf("Couldn't open database at %s: %#v", addr, err)
return nil, err
}
return conn, nil
}
var nodesColumns = []string{
"hash",
"value",
"value_string",
"datatype",
"language",
"iri",
"bnode",
"value_int",
"value_bool",
"value_float",
"value_time",
}
var nodeInsertColumns = [][]string{
{"value"},
{"value_string", "iri"},
{"value_string", "bnode"},
{"value_string"},
{"value_string", "datatype"},
{"value_string", "language"},
{"value_int"},
{"value_bool"},
{"value_float"},
{"value_time"},
}
func createSQLTables(addr string, options graph.Options) error {
flavor, _, _ := options.StringKey("flavor")
if flavor == "" {
flavor = defaultFlavor
}
fl, ok := flavors[flavor]
if !ok {
return fmt.Errorf("unsupported sql flavor: %s", flavor)
}
dr := fl.Driver
if dr == "" {
dr = fl.Name
}
conn, err := connect(addr, dr, options)
if err != nil {
return err
}
defer conn.Close()
if fl.NoSchemaChangesInTx {
_, err = conn.Exec(fl.NodesTable)
if err != nil {
err = fl.Error(err)
clog.Errorf("Cannot create nodes table: %v", err)
return err
}
_, err = conn.Exec(fl.QuadsTable)
if err != nil {
err = fl.Error(err)
clog.Errorf("Cannot create quad table: %v", err)
return err
}
for _, index := range fl.Indexes(options) {
if _, err = conn.Exec(index); err != nil {
clog.Errorf("Cannot create index: %v", err)
return err
}
}
return nil
}
tx, err := conn.Begin()
if err != nil {
clog.Errorf("Couldn't begin creation transaction: %s", err)
return err
}
_, err = tx.Exec(fl.NodesTable)
if err != nil {
tx.Rollback()
err = fl.Error(err)
clog.Errorf("Cannot create nodes table: %v", err)
return err
}
_, err = tx.Exec(fl.QuadsTable)
if err != nil {
tx.Rollback()
err = fl.Error(err)
clog.Errorf("Cannot create quad table: %v", err)
return err
}
for _, index := range fl.Indexes(options) {
if _, err = tx.Exec(index); err != nil {
clog.Errorf("Cannot create index: %v", err)
tx.Rollback()
return err
}
}
tx.Commit()
return nil
}
func newQuadStore(addr string, options graph.Options) (graph.QuadStore, error) {
flavor, _, _ := options.StringKey("flavor")
if flavor == "" {
flavor = defaultFlavor
}
fl, ok := flavors[flavor]
if !ok {
return nil, fmt.Errorf("unsupported sql flavor: %s", flavor)
}
dr := fl.Driver
if dr == "" {
dr = fl.Name
}
var qs QuadStore
conn, err := connect(addr, dr, options)
if err != nil {
return nil, err
}
localOpt, localOptOk, err := options.BoolKey("local_optimize")
if err != nil {
return nil, err
}
qs.db = conn
qs.flavor = fl
qs.size = -1
qs.sizes = lru.New(1024)
qs.ids = lru.New(1024)
// Skip size checking by default.
qs.noSizes = true
if localOptOk {
if localOpt {
qs.noSizes = false
}
}
qs.useEstimates, _, err = options.BoolKey("use_estimates")
if err != nil {
return nil, err
}
return &qs, nil
}
func marshalQuadDirections(q quad.Quad) (s, p, o, l []byte, err error) {
s, err = pquads.MarshalValue(q.Subject)
if err != nil {
return
}
p, err = pquads.MarshalValue(q.Predicate)
if err != nil {
return
}
o, err = pquads.MarshalValue(q.Object)
if err != nil {
return
}
l, err = pquads.MarshalValue(q.Label)
if err != nil {
return
}
return
}
func escapeNullByte(s string) string {
return strings.Replace(s, "\u0000", `\x00`, -1)
}
func unescapeNullByte(s string) string {
return strings.Replace(s, `\x00`, "\u0000", -1)
}
func nodeValues(h NodeHash, v quad.Value) (int, []interface{}, error) {
var (
nodeKey int
values = []interface{}{h.toSQL(), nil, nil}[:1]
)
switch v := v.(type) {
case quad.IRI:
nodeKey = 1
values = append(values, string(v), true)
case quad.BNode:
nodeKey = 2
values = append(values, string(v), true)
case quad.String:
nodeKey = 3
values = append(values, escapeNullByte(string(v)))
case quad.TypedString:
nodeKey = 4
values = append(values, escapeNullByte(string(v.Value)), string(v.Type))
case quad.LangString:
nodeKey = 5
values = append(values, escapeNullByte(string(v.Value)), v.Lang)
case quad.Int:
nodeKey = 6
values = append(values, int64(v))
case quad.Bool:
nodeKey = 7
values = append(values, bool(v))
case quad.Float:
nodeKey = 8
values = append(values, float64(v))
case quad.Time:
nodeKey = 9
values = append(values, time.Time(v))
default:
nodeKey = 0
p, err := pquads.MarshalValue(v)
if err != nil {
clog.Errorf("couldn't marshal value: %v", err)
return 0, nil, err
}
values = append(values, p)
}
return nodeKey, values, nil
}
func (qs *QuadStore) ApplyDeltas(in []graph.Delta, opts graph.IgnoreOpts) error {
tx, err := qs.db.Begin()
if err != nil {
clog.Errorf("couldn't begin write transaction: %v", err)
return err
}
err = qs.flavor.RunTx(tx, in, opts)
if err != nil {
tx.Rollback()
return err
}
qs.size = -1 // TODO(barakmich): Sync size with writes.
return tx.Commit()
}
func (qs *QuadStore) Quad(val graph.Value) quad.Quad {
h := val.(QuadHashes)
return quad.Quad{
Subject: qs.NameOf(h.Get(quad.Subject)),
Predicate: qs.NameOf(h.Get(quad.Predicate)),
Object: qs.NameOf(h.Get(quad.Object)),
Label: qs.NameOf(h.Get(quad.Label)),
}
}
func (qs *QuadStore) QuadIterator(d quad.Direction, val graph.Value) graph.Iterator {
return newSQLLinkIterator(qs, d, val.(NodeHash))
}
func (qs *QuadStore) NodesAllIterator() graph.Iterator {
return NewAllIterator(qs, "nodes")
}
func (qs *QuadStore) QuadsAllIterator() graph.Iterator {
return NewAllIterator(qs, "quads")
}
func (qs *QuadStore) ValueOf(s quad.Value) graph.Value {
return NodeHash(hashOf(s))
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return nil
}
switch value := value.(type) {
case time.Time:
nt.Time, nt.Valid = value, true
case []byte:
t, err := time.Parse("2006-01-02 15:04:05.999999", string(value))
if err != nil {
return err
}
nt.Time, nt.Valid = t, true
default:
return fmt.Errorf("unsupported time format: %T: %v", value, value)
}
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
|
func (qs *QuadStore) NameOf(v graph.Value) quad.Value {
if v == nil {
if clog.V(2) {
|
random_line_split
|
|
quadstore.go
|
err = pquads.MarshalValue(q.Subject)
if err != nil {
return
}
p, err = pquads.MarshalValue(q.Predicate)
if err != nil {
return
}
o, err = pquads.MarshalValue(q.Object)
if err != nil {
return
}
l, err = pquads.MarshalValue(q.Label)
if err != nil {
return
}
return
}
func escapeNullByte(s string) string {
return strings.Replace(s, "\u0000", `\x00`, -1)
}
func unescapeNullByte(s string) string {
return strings.Replace(s, `\x00`, "\u0000", -1)
}
func nodeValues(h NodeHash, v quad.Value) (int, []interface{}, error) {
var (
nodeKey int
values = []interface{}{h.toSQL(), nil, nil}[:1]
)
switch v := v.(type) {
case quad.IRI:
nodeKey = 1
values = append(values, string(v), true)
case quad.BNode:
nodeKey = 2
values = append(values, string(v), true)
case quad.String:
nodeKey = 3
values = append(values, escapeNullByte(string(v)))
case quad.TypedString:
nodeKey = 4
values = append(values, escapeNullByte(string(v.Value)), string(v.Type))
case quad.LangString:
nodeKey = 5
values = append(values, escapeNullByte(string(v.Value)), v.Lang)
case quad.Int:
nodeKey = 6
values = append(values, int64(v))
case quad.Bool:
nodeKey = 7
values = append(values, bool(v))
case quad.Float:
nodeKey = 8
values = append(values, float64(v))
case quad.Time:
nodeKey = 9
values = append(values, time.Time(v))
default:
nodeKey = 0
p, err := pquads.MarshalValue(v)
if err != nil {
clog.Errorf("couldn't marshal value: %v", err)
return 0, nil, err
}
values = append(values, p)
}
return nodeKey, values, nil
}
func (qs *QuadStore) ApplyDeltas(in []graph.Delta, opts graph.IgnoreOpts) error {
tx, err := qs.db.Begin()
if err != nil {
clog.Errorf("couldn't begin write transaction: %v", err)
return err
}
err = qs.flavor.RunTx(tx, in, opts)
if err != nil {
tx.Rollback()
return err
}
qs.size = -1 // TODO(barakmich): Sync size with writes.
return tx.Commit()
}
func (qs *QuadStore) Quad(val graph.Value) quad.Quad {
h := val.(QuadHashes)
return quad.Quad{
Subject: qs.NameOf(h.Get(quad.Subject)),
Predicate: qs.NameOf(h.Get(quad.Predicate)),
Object: qs.NameOf(h.Get(quad.Object)),
Label: qs.NameOf(h.Get(quad.Label)),
}
}
func (qs *QuadStore) QuadIterator(d quad.Direction, val graph.Value) graph.Iterator {
return newSQLLinkIterator(qs, d, val.(NodeHash))
}
func (qs *QuadStore) NodesAllIterator() graph.Iterator {
return NewAllIterator(qs, "nodes")
}
func (qs *QuadStore) QuadsAllIterator() graph.Iterator {
return NewAllIterator(qs, "quads")
}
func (qs *QuadStore) ValueOf(s quad.Value) graph.Value {
return NodeHash(hashOf(s))
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return nil
}
switch value := value.(type) {
case time.Time:
nt.Time, nt.Valid = value, true
case []byte:
t, err := time.Parse("2006-01-02 15:04:05.999999", string(value))
if err != nil {
return err
}
nt.Time, nt.Valid = t, true
default:
return fmt.Errorf("unsupported time format: %T: %v", value, value)
}
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
func (qs *QuadStore) NameOf(v graph.Value) quad.Value {
if v == nil {
if clog.V(2) {
clog.Infof("NameOf was nil")
}
return nil
} else if v, ok := v.(graph.PreFetchedValue); ok {
return v.NameOf()
}
hash := v.(NodeHash)
if !hash.Valid() {
if clog.V(2) {
clog.Infof("NameOf was nil")
}
return nil
}
if val, ok := qs.ids.Get(hash.String()); ok {
return val.(quad.Value)
}
query := `SELECT
value,
value_string,
datatype,
language,
iri,
bnode,
value_int,
value_bool,
value_float,
value_time
FROM nodes WHERE hash = ` + qs.flavor.Placeholder(1) + ` LIMIT 1;`
c := qs.db.QueryRow(query, hash.toSQL())
var (
data []byte
str sql.NullString
typ sql.NullString
lang sql.NullString
iri sql.NullBool
bnode sql.NullBool
vint sql.NullInt64
vbool sql.NullBool
vfloat sql.NullFloat64
vtime NullTime
)
if err := c.Scan(
&data,
&str,
&typ,
&lang,
&iri,
&bnode,
&vint,
&vbool,
&vfloat,
&vtime,
); err != nil {
clog.Errorf("Couldn't execute value lookup: %v", err)
return nil
}
var val quad.Value
if str.Valid {
if iri.Bool {
val = quad.IRI(str.String)
} else if bnode.Bool {
val = quad.BNode(str.String)
} else if lang.Valid {
val = quad.LangString{
Value: quad.String(unescapeNullByte(str.String)),
Lang: lang.String,
}
} else if typ.Valid {
val = quad.TypedString{
Value: quad.String(unescapeNullByte(str.String)),
Type: quad.IRI(typ.String),
}
} else {
val = quad.String(unescapeNullByte(str.String))
}
} else if vint.Valid {
val = quad.Int(vint.Int64)
} else if vbool.Valid {
val = quad.Bool(vbool.Bool)
} else if vfloat.Valid {
val = quad.Float(vfloat.Float64)
} else if vtime.Valid {
val = quad.Time(vtime.Time)
} else {
qv, err := pquads.UnmarshalValue(data)
if err != nil {
clog.Errorf("Couldn't unmarshal value: %v", err)
return nil
}
val = qv
}
if val != nil {
qs.ids.Put(hash.String(), val)
}
return val
}
func (qs *QuadStore) Size() int64 {
if qs.size != -1 {
return qs.size
}
query := "SELECT COUNT(*) FROM quads;"
if qs.useEstimates && qs.flavor.Estimated != nil {
query = qs.flavor.Estimated("quads")
}
c := qs.db.QueryRow(query)
err := c.Scan(&qs.size)
if err != nil {
clog.Errorf("Couldn't execute COUNT: %v", err)
return 0
}
return qs.size
}
func (qs *QuadStore) Horizon() graph.PrimaryKey {
var horizon int64
err := qs.db.QueryRow("SELECT horizon FROM quads ORDER BY horizon DESC LIMIT 1;").Scan(&horizon)
if err != nil {
if err != sql.ErrNoRows {
clog.Errorf("Couldn't execute horizon: %v", err)
}
return graph.NewSequentialKey(0)
}
return graph.NewSequentialKey(horizon)
}
func (qs *QuadStore) FixedIterator() graph.FixedIterator {
return iterator.NewFixed(iterator.Identity)
}
func (qs *QuadStore) Close() error {
return qs.db.Close()
}
func (qs *QuadStore) QuadDirection(in graph.Value, d quad.Direction) graph.Value {
return NodeHash(in.(QuadHashes).Get(d))
}
func (qs *QuadStore) Type() string {
return QuadStoreType
}
func (qs *QuadStore)
|
sizeForIterator
|
identifier_name
|
|
quadstore.go
|
lru.New(1024)
// Skip size checking by default.
qs.noSizes = true
if localOptOk {
if localOpt {
qs.noSizes = false
}
}
qs.useEstimates, _, err = options.BoolKey("use_estimates")
if err != nil {
return nil, err
}
return &qs, nil
}
func marshalQuadDirections(q quad.Quad) (s, p, o, l []byte, err error) {
s, err = pquads.MarshalValue(q.Subject)
if err != nil {
return
}
p, err = pquads.MarshalValue(q.Predicate)
if err != nil {
return
}
o, err = pquads.MarshalValue(q.Object)
if err != nil {
return
}
l, err = pquads.MarshalValue(q.Label)
if err != nil {
return
}
return
}
func escapeNullByte(s string) string {
return strings.Replace(s, "\u0000", `\x00`, -1)
}
func unescapeNullByte(s string) string {
return strings.Replace(s, `\x00`, "\u0000", -1)
}
func nodeValues(h NodeHash, v quad.Value) (int, []interface{}, error) {
var (
nodeKey int
values = []interface{}{h.toSQL(), nil, nil}[:1]
)
switch v := v.(type) {
case quad.IRI:
nodeKey = 1
values = append(values, string(v), true)
case quad.BNode:
nodeKey = 2
values = append(values, string(v), true)
case quad.String:
nodeKey = 3
values = append(values, escapeNullByte(string(v)))
case quad.TypedString:
nodeKey = 4
values = append(values, escapeNullByte(string(v.Value)), string(v.Type))
case quad.LangString:
nodeKey = 5
values = append(values, escapeNullByte(string(v.Value)), v.Lang)
case quad.Int:
nodeKey = 6
values = append(values, int64(v))
case quad.Bool:
nodeKey = 7
values = append(values, bool(v))
case quad.Float:
nodeKey = 8
values = append(values, float64(v))
case quad.Time:
nodeKey = 9
values = append(values, time.Time(v))
default:
nodeKey = 0
p, err := pquads.MarshalValue(v)
if err != nil {
clog.Errorf("couldn't marshal value: %v", err)
return 0, nil, err
}
values = append(values, p)
}
return nodeKey, values, nil
}
func (qs *QuadStore) ApplyDeltas(in []graph.Delta, opts graph.IgnoreOpts) error {
tx, err := qs.db.Begin()
if err != nil {
clog.Errorf("couldn't begin write transaction: %v", err)
return err
}
err = qs.flavor.RunTx(tx, in, opts)
if err != nil {
tx.Rollback()
return err
}
qs.size = -1 // TODO(barakmich): Sync size with writes.
return tx.Commit()
}
func (qs *QuadStore) Quad(val graph.Value) quad.Quad {
h := val.(QuadHashes)
return quad.Quad{
Subject: qs.NameOf(h.Get(quad.Subject)),
Predicate: qs.NameOf(h.Get(quad.Predicate)),
Object: qs.NameOf(h.Get(quad.Object)),
Label: qs.NameOf(h.Get(quad.Label)),
}
}
func (qs *QuadStore) QuadIterator(d quad.Direction, val graph.Value) graph.Iterator {
return newSQLLinkIterator(qs, d, val.(NodeHash))
}
func (qs *QuadStore) NodesAllIterator() graph.Iterator {
return NewAllIterator(qs, "nodes")
}
func (qs *QuadStore) QuadsAllIterator() graph.Iterator {
return NewAllIterator(qs, "quads")
}
func (qs *QuadStore) ValueOf(s quad.Value) graph.Value {
return NodeHash(hashOf(s))
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return nil
}
switch value := value.(type) {
case time.Time:
nt.Time, nt.Valid = value, true
case []byte:
t, err := time.Parse("2006-01-02 15:04:05.999999", string(value))
if err != nil {
return err
}
nt.Time, nt.Valid = t, true
default:
return fmt.Errorf("unsupported time format: %T: %v", value, value)
}
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
func (qs *QuadStore) NameOf(v graph.Value) quad.Value {
if v == nil {
if clog.V(2) {
clog.Infof("NameOf was nil")
}
return nil
} else if v, ok := v.(graph.PreFetchedValue); ok {
return v.NameOf()
}
hash := v.(NodeHash)
if !hash.Valid() {
if clog.V(2) {
clog.Infof("NameOf was nil")
}
return nil
}
if val, ok := qs.ids.Get(hash.String()); ok {
return val.(quad.Value)
}
query := `SELECT
value,
value_string,
datatype,
language,
iri,
bnode,
value_int,
value_bool,
value_float,
value_time
FROM nodes WHERE hash = ` + qs.flavor.Placeholder(1) + ` LIMIT 1;`
c := qs.db.QueryRow(query, hash.toSQL())
var (
data []byte
str sql.NullString
typ sql.NullString
lang sql.NullString
iri sql.NullBool
bnode sql.NullBool
vint sql.NullInt64
vbool sql.NullBool
vfloat sql.NullFloat64
vtime NullTime
)
if err := c.Scan(
&data,
&str,
&typ,
&lang,
&iri,
&bnode,
&vint,
&vbool,
&vfloat,
&vtime,
); err != nil {
clog.Errorf("Couldn't execute value lookup: %v", err)
return nil
}
var val quad.Value
if str.Valid {
if iri.Bool {
val = quad.IRI(str.String)
} else if bnode.Bool {
val = quad.BNode(str.String)
} else if lang.Valid {
val = quad.LangString{
Value: quad.String(unescapeNullByte(str.String)),
Lang: lang.String,
}
} else if typ.Valid {
val = quad.TypedString{
Value: quad.String(unescapeNullByte(str.String)),
Type: quad.IRI(typ.String),
}
} else {
val = quad.String(unescapeNullByte(str.String))
}
} else if vint.Valid {
val = quad.Int(vint.Int64)
} else if vbool.Valid {
val = quad.Bool(vbool.Bool)
} else if vfloat.Valid {
val = quad.Float(vfloat.Float64)
} else if vtime.Valid {
val = quad.Time(vtime.Time)
} else {
qv, err := pquads.UnmarshalValue(data)
if err != nil {
clog.Errorf("Couldn't unmarshal value: %v", err)
return nil
}
val = qv
}
if val != nil {
qs.ids.Put(hash.String(), val)
}
return val
}
func (qs *QuadStore) Size() int64 {
if qs.size != -1 {
return qs.size
}
query := "SELECT COUNT(*) FROM quads;"
if qs.useEstimates && qs.flavor.Estimated != nil {
query = qs.flavor.Estimated("quads")
}
c := qs.db.QueryRow(query)
err := c.Scan(&qs.size)
if err != nil {
clog.Errorf("Couldn't execute COUNT: %v", err)
return 0
}
return qs.size
}
func (qs *QuadStore) Horizon() graph.PrimaryKey {
var horizon int64
err := qs.db.QueryRow("SELECT horizon FROM quads ORDER BY horizon DESC LIMIT 1;").Scan(&horizon)
if err != nil
|
{
if err != sql.ErrNoRows {
clog.Errorf("Couldn't execute horizon: %v", err)
}
return graph.NewSequentialKey(0)
}
|
conditional_block
|
|
quadstore.go
|
Opt {
qs.noSizes = false
}
}
qs.useEstimates, _, err = options.BoolKey("use_estimates")
if err != nil {
return nil, err
}
return &qs, nil
}
func marshalQuadDirections(q quad.Quad) (s, p, o, l []byte, err error) {
s, err = pquads.MarshalValue(q.Subject)
if err != nil {
return
}
p, err = pquads.MarshalValue(q.Predicate)
if err != nil {
return
}
o, err = pquads.MarshalValue(q.Object)
if err != nil {
return
}
l, err = pquads.MarshalValue(q.Label)
if err != nil {
return
}
return
}
func escapeNullByte(s string) string {
return strings.Replace(s, "\u0000", `\x00`, -1)
}
func unescapeNullByte(s string) string {
return strings.Replace(s, `\x00`, "\u0000", -1)
}
func nodeValues(h NodeHash, v quad.Value) (int, []interface{}, error) {
var (
nodeKey int
values = []interface{}{h.toSQL(), nil, nil}[:1]
)
switch v := v.(type) {
case quad.IRI:
nodeKey = 1
values = append(values, string(v), true)
case quad.BNode:
nodeKey = 2
values = append(values, string(v), true)
case quad.String:
nodeKey = 3
values = append(values, escapeNullByte(string(v)))
case quad.TypedString:
nodeKey = 4
values = append(values, escapeNullByte(string(v.Value)), string(v.Type))
case quad.LangString:
nodeKey = 5
values = append(values, escapeNullByte(string(v.Value)), v.Lang)
case quad.Int:
nodeKey = 6
values = append(values, int64(v))
case quad.Bool:
nodeKey = 7
values = append(values, bool(v))
case quad.Float:
nodeKey = 8
values = append(values, float64(v))
case quad.Time:
nodeKey = 9
values = append(values, time.Time(v))
default:
nodeKey = 0
p, err := pquads.MarshalValue(v)
if err != nil {
clog.Errorf("couldn't marshal value: %v", err)
return 0, nil, err
}
values = append(values, p)
}
return nodeKey, values, nil
}
func (qs *QuadStore) ApplyDeltas(in []graph.Delta, opts graph.IgnoreOpts) error {
tx, err := qs.db.Begin()
if err != nil {
clog.Errorf("couldn't begin write transaction: %v", err)
return err
}
err = qs.flavor.RunTx(tx, in, opts)
if err != nil {
tx.Rollback()
return err
}
qs.size = -1 // TODO(barakmich): Sync size with writes.
return tx.Commit()
}
func (qs *QuadStore) Quad(val graph.Value) quad.Quad {
h := val.(QuadHashes)
return quad.Quad{
Subject: qs.NameOf(h.Get(quad.Subject)),
Predicate: qs.NameOf(h.Get(quad.Predicate)),
Object: qs.NameOf(h.Get(quad.Object)),
Label: qs.NameOf(h.Get(quad.Label)),
}
}
func (qs *QuadStore) QuadIterator(d quad.Direction, val graph.Value) graph.Iterator {
return newSQLLinkIterator(qs, d, val.(NodeHash))
}
func (qs *QuadStore) NodesAllIterator() graph.Iterator {
return NewAllIterator(qs, "nodes")
}
func (qs *QuadStore) QuadsAllIterator() graph.Iterator {
return NewAllIterator(qs, "quads")
}
func (qs *QuadStore) ValueOf(s quad.Value) graph.Value {
return NodeHash(hashOf(s))
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return nil
}
switch value := value.(type) {
case time.Time:
nt.Time, nt.Valid = value, true
case []byte:
t, err := time.Parse("2006-01-02 15:04:05.999999", string(value))
if err != nil {
return err
}
nt.Time, nt.Valid = t, true
default:
return fmt.Errorf("unsupported time format: %T: %v", value, value)
}
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
func (qs *QuadStore) NameOf(v graph.Value) quad.Value {
if v == nil {
if clog.V(2) {
clog.Infof("NameOf was nil")
}
return nil
} else if v, ok := v.(graph.PreFetchedValue); ok {
return v.NameOf()
}
hash := v.(NodeHash)
if !hash.Valid() {
if clog.V(2) {
clog.Infof("NameOf was nil")
}
return nil
}
if val, ok := qs.ids.Get(hash.String()); ok {
return val.(quad.Value)
}
query := `SELECT
value,
value_string,
datatype,
language,
iri,
bnode,
value_int,
value_bool,
value_float,
value_time
FROM nodes WHERE hash = ` + qs.flavor.Placeholder(1) + ` LIMIT 1;`
c := qs.db.QueryRow(query, hash.toSQL())
var (
data []byte
str sql.NullString
typ sql.NullString
lang sql.NullString
iri sql.NullBool
bnode sql.NullBool
vint sql.NullInt64
vbool sql.NullBool
vfloat sql.NullFloat64
vtime NullTime
)
if err := c.Scan(
&data,
&str,
&typ,
&lang,
&iri,
&bnode,
&vint,
&vbool,
&vfloat,
&vtime,
); err != nil {
clog.Errorf("Couldn't execute value lookup: %v", err)
return nil
}
var val quad.Value
if str.Valid {
if iri.Bool {
val = quad.IRI(str.String)
} else if bnode.Bool {
val = quad.BNode(str.String)
} else if lang.Valid {
val = quad.LangString{
Value: quad.String(unescapeNullByte(str.String)),
Lang: lang.String,
}
} else if typ.Valid {
val = quad.TypedString{
Value: quad.String(unescapeNullByte(str.String)),
Type: quad.IRI(typ.String),
}
} else {
val = quad.String(unescapeNullByte(str.String))
}
} else if vint.Valid {
val = quad.Int(vint.Int64)
} else if vbool.Valid {
val = quad.Bool(vbool.Bool)
} else if vfloat.Valid {
val = quad.Float(vfloat.Float64)
} else if vtime.Valid {
val = quad.Time(vtime.Time)
} else {
qv, err := pquads.UnmarshalValue(data)
if err != nil {
clog.Errorf("Couldn't unmarshal value: %v", err)
return nil
}
val = qv
}
if val != nil {
qs.ids.Put(hash.String(), val)
}
return val
}
func (qs *QuadStore) Size() int64 {
if qs.size != -1 {
return qs.size
}
query := "SELECT COUNT(*) FROM quads;"
if qs.useEstimates && qs.flavor.Estimated != nil {
query = qs.flavor.Estimated("quads")
}
c := qs.db.QueryRow(query)
err := c.Scan(&qs.size)
if err != nil {
clog.Errorf("Couldn't execute COUNT: %v", err)
return 0
}
return qs.size
}
func (qs *QuadStore) Horizon() graph.PrimaryKey {
var horizon int64
err := qs.db.QueryRow("SELECT horizon FROM quads ORDER BY horizon DESC LIMIT 1;").Scan(&horizon)
if err != nil {
if err != sql.ErrNoRows {
clog.Errorf("Couldn't execute horizon: %v", err)
}
return graph.NewSequentialKey(0)
}
return graph.NewSequentialKey(horizon)
}
func (qs *QuadStore) FixedIterator() graph.FixedIterator
|
{
return iterator.NewFixed(iterator.Identity)
}
|
identifier_body
|
|
tbs.rs
|
BS {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
/// Returns the to-be-signed serialization of the given message.
pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> {
// TODO: should perform the serialization and sign block by block to reduce the max memory
// usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's
// more important
let mut buf: Vec<u8> = Vec::with_capacity(512);
let mut buf2: Vec<u8> = Vec::with_capacity(512);
{
let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal);
assert!(sig::emit_pre_sig(
&mut encoder,
pre_sig0.type_covered(),
pre_sig0.algorithm(),
pre_sig0.num_labels(),
pre_sig0.original_ttl(),
pre_sig0.sig_expiration(),
pre_sig0.sig_inception(),
pre_sig0.key_tag(),
pre_sig0.signer_name(),
)
.is_ok());
// need a separate encoder here, as the encoding references absolute positions
// inside the buffer. If the buffer already contains the sig0 RDATA, offsets
// are wrong and the signature won't match.
let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing);
message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?)
}
buf.append(&mut buf2);
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set.
///
/// # Arguments
///
/// * `name` - RRset record name
/// * `dns_class` - DNSClass, i.e. IN, of the records
/// * `num_labels` - number of labels in the name, needed to deal with `*.example.com`
/// * `type_covered` - RecordType of the RRSet being hashed
/// * `algorithm` - The Algorithm type used for the hashing
/// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record
/// * `sig_expiration` - the epoch seconds of when this hashed signature will expire
/// * `key_inception` - the epoch seconds of when this hashed signature will be valid
/// * `signer_name` - label of the etity responsible for signing this hash
/// * `records` - RRSet to hash
///
/// # Returns
///
/// the binary hash of the specified RRSet and associated information
// FIXME: OMG, there are a ton of asserts in here...
#[allow(clippy::too_many_arguments)]
pub fn rrset_tbs(
name: &Name,
dns_class: DNSClass,
num_labels: u8,
type_covered: RecordType,
algorithm: Algorithm,
original_ttl: u32,
sig_expiration: u32,
sig_inception: u32,
key_tag: u16,
signer_name: &Name,
records: &[Record],
) -> ProtoResult<TBS> {
// TODO: change this to a BTreeSet so that it's preordered, no sort necessary
let mut rrset: Vec<&Record> = Vec::new();
// collect only the records for this rrset
for record in records {
if dns_class == record.dns_class()
&& type_covered == record.rr_type()
&& name == record.name()
{
rrset.push(record);
}
}
// put records in canonical order
rrset.sort();
let name = determine_name(name, num_labels)?;
// TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations...
let mut buf: Vec<u8> = Vec::new();
{
let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf);
encoder.set_canonical_names(true);
// signed_data = RRSIG_RDATA | RR(1) | RR(2)... where
//
// "|" denotes concatenation
//
// RRSIG_RDATA is the wire format of the RRSIG RDATA fields
// with the Signature field excluded and the Signer's Name
// in canonical form.
assert!(sig::emit_pre_sig(
&mut encoder,
type_covered,
algorithm,
name.num_labels(),
original_ttl,
sig_expiration,
sig_inception,
key_tag,
signer_name,
)
.is_ok());
// construct the rrset signing data
for record in rrset {
// RR(i) = name | type | class | OrigTTL | RDATA length | RDATA
//
// name is calculated according to the function in the RFC 4035
assert!(name
.to_lowercase()
.emit_as_canonical(&mut encoder, true)
.is_ok());
//
// type is the RRset type and all RRs in the class
assert!(type_covered.emit(&mut encoder).is_ok());
//
// class is the RRset's class
assert!(dns_class.emit(&mut encoder).is_ok());
//
// OrigTTL is the value from the RRSIG Original TTL field
assert!(encoder.emit_u32(original_ttl).is_ok());
//
// RDATA length
// TODO: add support to the encoder to set a marker to go back and write the length
let mut rdata_buf = Vec::new();
{
let mut rdata_encoder = BinEncoder::new(&mut rdata_buf);
rdata_encoder.set_canonical_names(true);
if let Some(rdata) = record.data() {
assert!(rdata.emit(&mut rdata_encoder).is_ok());
}
}
assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok());
//
// All names in the RDATA field are in canonical form (set above)
assert!(encoder.emit_vec(&rdata_buf).is_ok());
}
}
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the RRSIG record.
///
/// # Arguments
///
/// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> {
if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() {
rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records)
} else {
Err(format!("could not determine name from {}", rrsig.name()).into())
}
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the SIG record.
///
/// # Arguments
///
/// * `name` - labels of the record to sign
/// * `dns_class` - DNSClass of the RRSet, i.e. IN
/// * `sig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn
|
(
name: &Name,
dns_class: DNSClass,
sig: &SIG,
records: &[Record],
) -> ProtoResult<TBS> {
rrset_tbs(
name,
dns_class,
sig.num_labels(),
sig.type_covered(),
sig.algorithm(),
sig.original_ttl(),
sig.sig_expiration(),
sig.sig_inception(),
sig.key_tag(),
sig.signer_name(),
records,
)
}
/// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005
///
/// ```text
///
/// 5.3.2. Reconstructing the Signed Data
/// ...
/// To calculate the name:
/// let rrsig_labels = the value of the RRSIG Labels field
///
/// let fqdn = RRset's fully qualified domain name in
/// canonical form
///
/// let fqdn_labels = Label count of the fqdn above.
///
/// if rrsig_labels = fqdn_labels,
/// name = fqdn
///
/// if rrsig_labels < fqdn_labels,
/// name = "*." | the rightmost rrsig_label labels of the
/// fqdn
///
/// if rrsig_labels > fqdn_labels
/// the RRSIG RR did not pass the necessary validation
/// checks and MUST NOT be used to authenticate this
/// RRset.
///
/// The canonical forms for names and RRsets are defined in [RFC4034].
/// ```
pub fn determine_name(name: &Name,
|
rrset_tbs_with_sig
|
identifier_name
|
tbs.rs
|
BS {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
/// Returns the to-be-signed serialization of the given message.
pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> {
// TODO: should perform the serialization and sign block by block to reduce the max memory
// usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's
// more important
let mut buf: Vec<u8> = Vec::with_capacity(512);
let mut buf2: Vec<u8> = Vec::with_capacity(512);
{
let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal);
assert!(sig::emit_pre_sig(
&mut encoder,
pre_sig0.type_covered(),
pre_sig0.algorithm(),
pre_sig0.num_labels(),
pre_sig0.original_ttl(),
pre_sig0.sig_expiration(),
pre_sig0.sig_inception(),
pre_sig0.key_tag(),
pre_sig0.signer_name(),
)
.is_ok());
// need a separate encoder here, as the encoding references absolute positions
// inside the buffer. If the buffer already contains the sig0 RDATA, offsets
// are wrong and the signature won't match.
let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing);
message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?)
}
buf.append(&mut buf2);
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set.
///
/// # Arguments
///
/// * `name` - RRset record name
/// * `dns_class` - DNSClass, i.e. IN, of the records
/// * `num_labels` - number of labels in the name, needed to deal with `*.example.com`
/// * `type_covered` - RecordType of the RRSet being hashed
/// * `algorithm` - The Algorithm type used for the hashing
/// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record
/// * `sig_expiration` - the epoch seconds of when this hashed signature will expire
/// * `key_inception` - the epoch seconds of when this hashed signature will be valid
/// * `signer_name` - label of the etity responsible for signing this hash
/// * `records` - RRSet to hash
///
/// # Returns
///
/// the binary hash of the specified RRSet and associated information
// FIXME: OMG, there are a ton of asserts in here...
#[allow(clippy::too_many_arguments)]
pub fn rrset_tbs(
name: &Name,
dns_class: DNSClass,
num_labels: u8,
type_covered: RecordType,
algorithm: Algorithm,
original_ttl: u32,
sig_expiration: u32,
sig_inception: u32,
key_tag: u16,
signer_name: &Name,
records: &[Record],
) -> ProtoResult<TBS>
|
let mut buf: Vec<u8> = Vec::new();
{
let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf);
encoder.set_canonical_names(true);
// signed_data = RRSIG_RDATA | RR(1) | RR(2)... where
//
// "|" denotes concatenation
//
// RRSIG_RDATA is the wire format of the RRSIG RDATA fields
// with the Signature field excluded and the Signer's Name
// in canonical form.
assert!(sig::emit_pre_sig(
&mut encoder,
type_covered,
algorithm,
name.num_labels(),
original_ttl,
sig_expiration,
sig_inception,
key_tag,
signer_name,
)
.is_ok());
// construct the rrset signing data
for record in rrset {
// RR(i) = name | type | class | OrigTTL | RDATA length | RDATA
//
// name is calculated according to the function in the RFC 4035
assert!(name
.to_lowercase()
.emit_as_canonical(&mut encoder, true)
.is_ok());
//
// type is the RRset type and all RRs in the class
assert!(type_covered.emit(&mut encoder).is_ok());
//
// class is the RRset's class
assert!(dns_class.emit(&mut encoder).is_ok());
//
// OrigTTL is the value from the RRSIG Original TTL field
assert!(encoder.emit_u32(original_ttl).is_ok());
//
// RDATA length
// TODO: add support to the encoder to set a marker to go back and write the length
let mut rdata_buf = Vec::new();
{
let mut rdata_encoder = BinEncoder::new(&mut rdata_buf);
rdata_encoder.set_canonical_names(true);
if let Some(rdata) = record.data() {
assert!(rdata.emit(&mut rdata_encoder).is_ok());
}
}
assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok());
//
// All names in the RDATA field are in canonical form (set above)
assert!(encoder.emit_vec(&rdata_buf).is_ok());
}
}
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the RRSIG record.
///
/// # Arguments
///
/// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> {
if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() {
rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records)
} else {
Err(format!("could not determine name from {}", rrsig.name()).into())
}
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the SIG record.
///
/// # Arguments
///
/// * `name` - labels of the record to sign
/// * `dns_class` - DNSClass of the RRSet, i.e. IN
/// * `sig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_sig(
name: &Name,
dns_class: DNSClass,
sig: &SIG,
records: &[Record],
) -> ProtoResult<TBS> {
rrset_tbs(
name,
dns_class,
sig.num_labels(),
sig.type_covered(),
sig.algorithm(),
sig.original_ttl(),
sig.sig_expiration(),
sig.sig_inception(),
sig.key_tag(),
sig.signer_name(),
records,
)
}
/// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005
///
/// ```text
///
/// 5.3.2. Reconstructing the Signed Data
/// ...
/// To calculate the name:
/// let rrsig_labels = the value of the RRSIG Labels field
///
/// let fqdn = RRset's fully qualified domain name in
/// canonical form
///
/// let fqdn_labels = Label count of the fqdn above.
///
/// if rrsig_labels = fqdn_labels,
/// name = fqdn
///
/// if rrsig_labels < fqdn_labels,
/// name = "*." | the rightmost rrsig_label labels of the
/// fqdn
///
/// if rrsig_labels > fqdn_labels
/// the RRSIG RR did not pass the necessary validation
/// checks and MUST NOT be used to authenticate this
/// RRset.
///
/// The canonical forms for names and RRsets are defined in [RFC4034].
/// ```
pub fn determine_name(name: &Name,
|
{
// TODO: change this to a BTreeSet so that it's preordered, no sort necessary
let mut rrset: Vec<&Record> = Vec::new();
// collect only the records for this rrset
for record in records {
if dns_class == record.dns_class()
&& type_covered == record.rr_type()
&& name == record.name()
{
rrset.push(record);
}
}
// put records in canonical order
rrset.sort();
let name = determine_name(name, num_labels)?;
// TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations...
|
identifier_body
|
tbs.rs
|
TBS {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
/// Returns the to-be-signed serialization of the given message.
pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> {
// TODO: should perform the serialization and sign block by block to reduce the max memory
// usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's
// more important
let mut buf: Vec<u8> = Vec::with_capacity(512);
let mut buf2: Vec<u8> = Vec::with_capacity(512);
{
let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal);
assert!(sig::emit_pre_sig(
&mut encoder,
pre_sig0.type_covered(),
pre_sig0.algorithm(),
pre_sig0.num_labels(),
pre_sig0.original_ttl(),
pre_sig0.sig_expiration(),
pre_sig0.sig_inception(),
pre_sig0.key_tag(),
pre_sig0.signer_name(),
)
.is_ok());
// need a separate encoder here, as the encoding references absolute positions
// inside the buffer. If the buffer already contains the sig0 RDATA, offsets
// are wrong and the signature won't match.
let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing);
message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?)
}
buf.append(&mut buf2);
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set.
///
/// # Arguments
///
/// * `name` - RRset record name
/// * `dns_class` - DNSClass, i.e. IN, of the records
/// * `num_labels` - number of labels in the name, needed to deal with `*.example.com`
/// * `type_covered` - RecordType of the RRSet being hashed
/// * `algorithm` - The Algorithm type used for the hashing
/// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record
/// * `sig_expiration` - the epoch seconds of when this hashed signature will expire
/// * `key_inception` - the epoch seconds of when this hashed signature will be valid
/// * `signer_name` - label of the etity responsible for signing this hash
/// * `records` - RRSet to hash
///
/// # Returns
///
/// the binary hash of the specified RRSet and associated information
// FIXME: OMG, there are a ton of asserts in here...
#[allow(clippy::too_many_arguments)]
pub fn rrset_tbs(
name: &Name,
dns_class: DNSClass,
num_labels: u8,
type_covered: RecordType,
algorithm: Algorithm,
original_ttl: u32,
sig_expiration: u32,
sig_inception: u32,
key_tag: u16,
signer_name: &Name,
records: &[Record],
) -> ProtoResult<TBS> {
// TODO: change this to a BTreeSet so that it's preordered, no sort necessary
let mut rrset: Vec<&Record> = Vec::new();
// collect only the records for this rrset
for record in records {
if dns_class == record.dns_class()
&& type_covered == record.rr_type()
&& name == record.name()
{
rrset.push(record);
}
}
// put records in canonical order
rrset.sort();
let name = determine_name(name, num_labels)?;
// TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations...
let mut buf: Vec<u8> = Vec::new();
{
let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf);
encoder.set_canonical_names(true);
// signed_data = RRSIG_RDATA | RR(1) | RR(2)... where
//
// "|" denotes concatenation
//
// RRSIG_RDATA is the wire format of the RRSIG RDATA fields
// with the Signature field excluded and the Signer's Name
// in canonical form.
assert!(sig::emit_pre_sig(
&mut encoder,
type_covered,
algorithm,
name.num_labels(),
original_ttl,
sig_expiration,
sig_inception,
key_tag,
signer_name,
)
.is_ok());
// construct the rrset signing data
for record in rrset {
// RR(i) = name | type | class | OrigTTL | RDATA length | RDATA
//
// name is calculated according to the function in the RFC 4035
assert!(name
.to_lowercase()
.emit_as_canonical(&mut encoder, true)
.is_ok());
//
// type is the RRset type and all RRs in the class
assert!(type_covered.emit(&mut encoder).is_ok());
//
// class is the RRset's class
assert!(dns_class.emit(&mut encoder).is_ok());
//
// OrigTTL is the value from the RRSIG Original TTL field
assert!(encoder.emit_u32(original_ttl).is_ok());
//
// RDATA length
// TODO: add support to the encoder to set a marker to go back and write the length
let mut rdata_buf = Vec::new();
|
{
let mut rdata_encoder = BinEncoder::new(&mut rdata_buf);
rdata_encoder.set_canonical_names(true);
if let Some(rdata) = record.data() {
assert!(rdata.emit(&mut rdata_encoder).is_ok());
}
}
assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok());
//
// All names in the RDATA field are in canonical form (set above)
assert!(encoder.emit_vec(&rdata_buf).is_ok());
}
}
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the RRSIG record.
///
/// # Arguments
///
/// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> {
if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() {
rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records)
} else {
Err(format!("could not determine name from {}", rrsig.name()).into())
}
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the SIG record.
///
/// # Arguments
///
/// * `name` - labels of the record to sign
/// * `dns_class` - DNSClass of the RRSet, i.e. IN
/// * `sig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_sig(
name: &Name,
dns_class: DNSClass,
sig: &SIG,
records: &[Record],
) -> ProtoResult<TBS> {
rrset_tbs(
name,
dns_class,
sig.num_labels(),
sig.type_covered(),
sig.algorithm(),
sig.original_ttl(),
sig.sig_expiration(),
sig.sig_inception(),
sig.key_tag(),
sig.signer_name(),
records,
)
}
/// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005
///
/// ```text
///
/// 5.3.2. Reconstructing the Signed Data
/// ...
/// To calculate the name:
/// let rrsig_labels = the value of the RRSIG Labels field
///
/// let fqdn = RRset's fully qualified domain name in
/// canonical form
///
/// let fqdn_labels = Label count of the fqdn above.
///
/// if rrsig_labels = fqdn_labels,
/// name = fqdn
///
/// if rrsig_labels < fqdn_labels,
/// name = "*." | the rightmost rrsig_label labels of the
/// fqdn
///
/// if rrsig_labels > fqdn_labels
/// the RRSIG RR did not pass the necessary validation
/// checks and MUST NOT be used to authenticate this
/// RRset.
///
/// The canonical forms for names and RRsets are defined in [RFC4034].
/// ```
pub fn determine_name(name: &Name,
|
random_line_split
|
|
tbs.rs
|
_capacity(512);
let mut buf2: Vec<u8> = Vec::with_capacity(512);
{
let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal);
assert!(sig::emit_pre_sig(
&mut encoder,
pre_sig0.type_covered(),
pre_sig0.algorithm(),
pre_sig0.num_labels(),
pre_sig0.original_ttl(),
pre_sig0.sig_expiration(),
pre_sig0.sig_inception(),
pre_sig0.key_tag(),
pre_sig0.signer_name(),
)
.is_ok());
// need a separate encoder here, as the encoding references absolute positions
// inside the buffer. If the buffer already contains the sig0 RDATA, offsets
// are wrong and the signature won't match.
let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing);
message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?)
}
buf.append(&mut buf2);
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set.
///
/// # Arguments
///
/// * `name` - RRset record name
/// * `dns_class` - DNSClass, i.e. IN, of the records
/// * `num_labels` - number of labels in the name, needed to deal with `*.example.com`
/// * `type_covered` - RecordType of the RRSet being hashed
/// * `algorithm` - The Algorithm type used for the hashing
/// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record
/// * `sig_expiration` - the epoch seconds of when this hashed signature will expire
/// * `key_inception` - the epoch seconds of when this hashed signature will be valid
/// * `signer_name` - label of the etity responsible for signing this hash
/// * `records` - RRSet to hash
///
/// # Returns
///
/// the binary hash of the specified RRSet and associated information
// FIXME: OMG, there are a ton of asserts in here...
#[allow(clippy::too_many_arguments)]
pub fn rrset_tbs(
name: &Name,
dns_class: DNSClass,
num_labels: u8,
type_covered: RecordType,
algorithm: Algorithm,
original_ttl: u32,
sig_expiration: u32,
sig_inception: u32,
key_tag: u16,
signer_name: &Name,
records: &[Record],
) -> ProtoResult<TBS> {
// TODO: change this to a BTreeSet so that it's preordered, no sort necessary
let mut rrset: Vec<&Record> = Vec::new();
// collect only the records for this rrset
for record in records {
if dns_class == record.dns_class()
&& type_covered == record.rr_type()
&& name == record.name()
{
rrset.push(record);
}
}
// put records in canonical order
rrset.sort();
let name = determine_name(name, num_labels)?;
// TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations...
let mut buf: Vec<u8> = Vec::new();
{
let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf);
encoder.set_canonical_names(true);
// signed_data = RRSIG_RDATA | RR(1) | RR(2)... where
//
// "|" denotes concatenation
//
// RRSIG_RDATA is the wire format of the RRSIG RDATA fields
// with the Signature field excluded and the Signer's Name
// in canonical form.
assert!(sig::emit_pre_sig(
&mut encoder,
type_covered,
algorithm,
name.num_labels(),
original_ttl,
sig_expiration,
sig_inception,
key_tag,
signer_name,
)
.is_ok());
// construct the rrset signing data
for record in rrset {
// RR(i) = name | type | class | OrigTTL | RDATA length | RDATA
//
// name is calculated according to the function in the RFC 4035
assert!(name
.to_lowercase()
.emit_as_canonical(&mut encoder, true)
.is_ok());
//
// type is the RRset type and all RRs in the class
assert!(type_covered.emit(&mut encoder).is_ok());
//
// class is the RRset's class
assert!(dns_class.emit(&mut encoder).is_ok());
//
// OrigTTL is the value from the RRSIG Original TTL field
assert!(encoder.emit_u32(original_ttl).is_ok());
//
// RDATA length
// TODO: add support to the encoder to set a marker to go back and write the length
let mut rdata_buf = Vec::new();
{
let mut rdata_encoder = BinEncoder::new(&mut rdata_buf);
rdata_encoder.set_canonical_names(true);
if let Some(rdata) = record.data() {
assert!(rdata.emit(&mut rdata_encoder).is_ok());
}
}
assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok());
//
// All names in the RDATA field are in canonical form (set above)
assert!(encoder.emit_vec(&rdata_buf).is_ok());
}
}
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the RRSIG record.
///
/// # Arguments
///
/// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> {
if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() {
rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records)
} else {
Err(format!("could not determine name from {}", rrsig.name()).into())
}
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the SIG record.
///
/// # Arguments
///
/// * `name` - labels of the record to sign
/// * `dns_class` - DNSClass of the RRSet, i.e. IN
/// * `sig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_sig(
name: &Name,
dns_class: DNSClass,
sig: &SIG,
records: &[Record],
) -> ProtoResult<TBS> {
rrset_tbs(
name,
dns_class,
sig.num_labels(),
sig.type_covered(),
sig.algorithm(),
sig.original_ttl(),
sig.sig_expiration(),
sig.sig_inception(),
sig.key_tag(),
sig.signer_name(),
records,
)
}
/// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005
///
/// ```text
///
/// 5.3.2. Reconstructing the Signed Data
/// ...
/// To calculate the name:
/// let rrsig_labels = the value of the RRSIG Labels field
///
/// let fqdn = RRset's fully qualified domain name in
/// canonical form
///
/// let fqdn_labels = Label count of the fqdn above.
///
/// if rrsig_labels = fqdn_labels,
/// name = fqdn
///
/// if rrsig_labels < fqdn_labels,
/// name = "*." | the rightmost rrsig_label labels of the
/// fqdn
///
/// if rrsig_labels > fqdn_labels
/// the RRSIG RR did not pass the necessary validation
/// checks and MUST NOT be used to authenticate this
/// RRset.
///
/// The canonical forms for names and RRsets are defined in [RFC4034].
/// ```
pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> {
// To calculate the name:
// let rrsig_labels = the value of the RRSIG Labels field
//
// let fqdn = RRset's fully qualified domain name in
// canonical form
//
// let fqdn_labels = Label count of the fqdn above.
let fqdn_labels = name.num_labels();
// if rrsig_labels = fqdn_labels,
// name = fqdn
if fqdn_labels == num_labels
|
{
return Ok(name.clone());
}
|
conditional_block
|
|
index.js
|
.LED_COLORS[led][1];
obj.material.transparent = (obj.material.opacity < 1) ? true : false;
});
};
Drone.prototype._addLEDs = function(group) {
var leds = [];
for (var i = 0; i < 4; i++) {
var led = new this.game.THREE.Mesh(
new this.game.THREE.CubeGeometry(this.size/20, this.size/20, this.size/20),
new this.game.THREE.MeshLambertMaterial({color:0x000000,ambient:0xffffff,emissive:0x000000})
);
led.translateX((this.size / 3) * (Math.sin(deg2Rad(i * 90) + deg2Rad(45))));
led.translateZ((this.size / 3) * (Math.cos(deg2Rad(i * 90) + deg2Rad(45))));
leds.push(led);
if (group) group.add(led);
}
return leds;
};
Drone.prototype._emitNavdata = function(seq) {
var self = this;
with (self._navdata) {
sequenceNumber = seq;
demo.batteryPercentage = Math.floor((self._batteryLevel / self.batteryCapacity) * 100);
droneState.flying = self.flying ? 1 : 0;
// todo: set this closer to actual states
demo.controlState = self.flying ? 'CTRL_FLYING' : 'CTRL_LANDED';
if (self._drone !== false) {
/*demo.rotation.frontBack = demo.rotation.pitch = demo.rotation.theta = demo.rotation.y = demo.frontBackDegrees = self._drone.avatar.mesh.rotation.x;
demo.rotation.leftRight = demo.rotation.roll = demo.rotation.phi = demo.rotation.x = demo.leftRightDegrees = self._drone.avatar.mesh.rotation.z;
demo.rotation.clockwise = demo.rotation.yaw = demo.rotation.psi = demo.rotation.z = demo.clockwiseDegrees = self._drone.avatar.mesh.rotation.y;
demo.velocity.x = demo.xVelocity = self._drone.velocity.z;
demo.velocity.y = demo.yVelocity = self._drone.velocity.x;
demo.velocity.z = demo.zVelocity = self._drone.velocity.y;*/
// todo: calculate altitude
}
}
return self._navdata;
};
Drone.prototype._handleREF = function(dt, drone, cmd) {
var self = this;
if (cmd.args[0] === 512) {
setxyz(drone.resting, false);
if (!self.flying) {
// takeoff!
drone.removeForce(self.game.gravity);
drone.velocity.y += 0.002;
self.flying = true;
tic.timeout(function() { drone.velocity.y = 0; }, 500);
}
} else {
if (self.flying) {
// land!
self.stop();
setxyz(drone.velocity, 0);
setxyz(drone.avatar.children[0].rotation, 0);
drone.subjectTo(self.game.gravity);
self.flying = false;
// TODO: land more realistically
}
}
};
Drone.prototype._handlePCMD = function(dt, drone, cmd) {
if (!this.flying) return;
setxyz(drone.velocity, 0);
// args: flags, leftRight, frontBack, upDown, clockWise
// dont know why leftRight/frontBack are totally switched but they are!
var frontBack = cmd.args[2] || 0;
var leftRight = cmd.args[1] || 0;
var upDown = cmd.args[3] || 0;
var clockwise = cmd.args[4] || 0;
// reduce speed
var tilt = this.tilt / 100;
var verticalSpeed = this.verticalSpeed / 100;
var rot = drone.avatar.children[0];
// todo: figure auto leveling out
// when it hits 0, it doesnt level for some reason
rot.rotation.x = anim(dt, rot.rotation.x, -frontBack/2);
if (frontBack !== 0) drone.velocity.z = frontBack * tilt;
else if (!this._animating) rot.rotation.x = 0;
rot.rotation.z = anim(dt, rot.rotation.z, -leftRight/2);
if (leftRight !== 0) drone.velocity.x = -leftRight * tilt;
else if (!this._animating) rot.rotation.z = 0;
if (upDown !== 0) drone.velocity.y += upDown * verticalSpeed;
if (clockwise !== 0) drone.rotation.y += clockwise * this.yawSpeed;
// tmp fallback level out
if (frontBack === 0 && leftRight === 0 && !this._animating) {
rot.rotation.x = 0;
rot.rotation.z = 0;
}
// cap the amount of tilt
if (Math.abs(rot.rotation.z) >= 1 && !this._animating) {
rot.rotation.z = rot.rotation.z < 0 ? -1 : 1;
}
if (Math.abs(rot.rotation.x) >= 1 && !this._animating) {
rot.rotation.x = rot.rotation.x < 0 ? -1 : 1;
}
};
// Handle AT*CONFIG
Drone.prototype._handleCONFIG = function(dt, drone, cmd) {
switch (cmd.args[0]) {
case 'control:flight_anim':
this._handleANIM(dt, drone, cmd);
break;
case 'leds:leds_anim':
this._handleLED(dt, drone, cmd);
break;
}
};
// Handle AT*CONFG=1,control:flight_anim
Drone.prototype._handleANIM = function(dt, drone, cmd) {
var self = this;
if (!self.flying || this._animating) return;
// todo: tweak this closer to actual drone
var duration = Number(cmd.args[2]) * 10;
var type = this.ANIMATIONS[parseInt(cmd.args[1])];
self._animating = true;
tic.timeout(function() { self._animating = false; }, duration);
switch (type) {
case 'flipLeft': case 'flipRight':
case 'flipAhead': case 'flipBehind':
// todo: for longer durations this gets out of hand. should only happen once.
drone.velocity.y += 0.0045;
tic.timeout(function() {
var amt = (type === 'flipLeft' || type === 'flipAhead') ? deg2Rad(360) : -deg2Rad(360);
var dir = (type === 'flipLeft' || type === 'flipRight') ? 'x' : 'z';
drone.avatar.children[0].rotation[dir] = anim(dt, drone.avatar.children[0].rotation[dir], amt, duration);
}, duration / 5);
// todo: better adjust above to mimic actual drone
// where it flies up dramatically flips and comes down
tic.timeout(function() {
drone.velocity.y -= 0.002;
}, duration - (duration / 10));
break;
// todo: handle the other animations
}
};
// Handle AT*CONFG=1,control:leds_anim
// todo: this is totally not correct!
Drone.prototype._handleLED = function(dt, drone, cmd) {
var self = this;
if (this._ledanimating) return;
var type = this.LED_ANIMATIONS[parseInt(cmd.args[1])];
var hz = Number(cmd.args[2]);
var duration = Number(cmd.args[3]) * 1000;
var on = 0;
var i = 0;
self.leds('blank');
var clearInterval = tic.interval(function() {
if (!self._ledanimating) return;
switch (type) {
case 'blinkRed':
case 'blinkGreen':
case 'blinkOrange':
var n = type === 'blinkRed' ? 1 : type === 'blinkGreen' ? 2 : 3;
on = Math.sin(TAU * hz * i) > 0 ? n : 0;
self.leds([on, on, on, on]);
break;
case 'blinkStandard':
self.leds(Math.sin(TAU * hz * i) > 0 ? 'standard' : 'blank');
break;
case 'blinkGreenRed':
self.leds(Math.sin(TAU * hz * i) > 0 ? 'green' : 'red');
break;
default:
self.leds(type);
break;
// todo: handle other leds animations
}
i += 0.01;
}, 100);
self._ledanimating = true;
tic.timeout(function() {
clearInterval();
self.leds('standard');
self._ledanimating = false;
}, duration);
};
function setxyz(item, x, y, z) {
if (arguments.length < 3) {
y = x; z = x;
}
item.x = x; item.y = y; item.z = z;
}
// animate values to produce smoother results
function
|
anim
|
identifier_name
|
|
index.js
|
});
// start up emitters
self.resume();
// emit navdata
var seq = 0;
setInterval(function() {
if (options.udpNavdataStream._initialized === true) {
options.udpNavdataStream._socket.emit('message', self._emitNavdata(seq++));
}
}, 100);
};
util.inherits(Drone, ardrone.Client);
module.exports = function(options) { return new Drone(options); };
module.exports.Drone = Drone;
// return the drone item to add to game
Drone.prototype.item = function(item) {
var self = this;
if (item) {
item.tick = self.createTick(item);
self._drone = item;
return self._drone;
}
var group = new self.game.THREE.Object3D();
var drone = new self.game.THREE.Mesh(
new self.game.THREE.CubeGeometry(self.size, self.size/6, self.size),
self.game.materials.material
);
drone.position.set(0, self.size/6, 0);
drone.rotation.y = deg2Rad(-90);
group.add(drone);
self.game.materials.load([[
'drone-side', 'drone-front',
'drone-top', 'drone-bottom',
'drone-side', 'drone-side'
]], function(textures) {
self.game.materials.paint(drone, textures[0]);
});
self._leds = self._addLEDs(group);
self.leds('standard');
self._drone = self.game.addItem({
mesh: group,
size: self.size,
velocity: {x: 0, y: 0, z: 0}
});
self._drone.tick = self.createTick(self._drone);
return self._drone;
};
// process AT* commands to control drone
Drone.prototype.createTick = function(drone) {
var self = this;
var dt = 0;
var oldTick = drone.tick || function() {};
return function(delta) {
dt += 0.01;
// drain battery - video on, flying, animating
self._batteryLevel -= (self._animating && self.flying) ? 4
: (self.flying) ? 1.75
: 0.5;
// dead battery X|
if (self._batteryLevel <= 0) { self.land(); return; }
oldTick.call(drone, delta);
var didem = [];
self._cmds.forEach(function(cmd) {
// only process the first unique
if (didem.indexOf(cmd.type + cmd.args[0]) !== -1) return;
didem.push(cmd.type + cmd.args[0]);
self['_handle' + cmd.type](dt, drone, cmd);
});
self._cmds = [];
// render the camera, follow the drone
if (self._cameraControl) {
self._cameraControl.render(
self._drone,
new self.game.THREE.Vector3(-2, 0, 0),
new self.game.THREE.Vector3(-100, 0, 0)
);
// monitor follows the player
self._monitor.position = self.game.controls.yawObject.position.clone();
self._monitor.position.z += 2;
self._monitor.position.y += 0.75;
}
};
};
// display video monitor
// todo: integrate more with ar-drone lib
// also where is the bottom camera?
Drone.prototype.viewCamera = function() {
var self = this;
if (!self._cameraControl) {
self._cameraControl = createCam(self.game);
// use the camera's png stream :D
self._pngStream = self._cameraControl;
// add the camera
var camera = self._cameraControl.camera();
self.game.scene.add(camera);
self._monitor = new self.game.THREE.Object3D();
var height = 1;
var padding = 0.01;
var video = new self.game.THREE.Mesh(
new self.game.THREE.CubeGeometry(1.77 * height, height, 0),
new self.game.THREE.MeshBasicMaterial({
map: self._cameraControl.monitor()
})
);
self._monitor.add(video);
// border
var border = new self.game.THREE.Mesh(
new self.game.THREE.CubeGeometry((1.77 * height) + padding, height + padding, padding),
new self.game.THREE.MeshBasicMaterial({color: 0x000000})
);
border.position.set(0, 0, padding);
self._monitor.add(border);
self._monitor.rotation.x = deg2Rad(60);
self.game.scene.add(self._monitor);
}
return self._monitor;
};
// turn on/off the leds
Drone.prototype.leds = function(leds) {
var self = this;
if (typeof leds === 'string') {
if (leds === 'red') leds = [1, 1, 1, 1];
else if (leds === 'green') leds = [2, 2, 2, 2];
else if (leds === 'standard') leds = [1, 1, 2, 2];
else leds = [0, 0, 0, 0];
}
leds.forEach(function(led, i) {
var obj = self._leds[i];
obj.material.color = obj.material.emissive = self.LED_COLORS[led][0];
obj.material.opacity = self.LED_COLORS[led][1];
obj.material.transparent = (obj.material.opacity < 1) ? true : false;
});
};
Drone.prototype._addLEDs = function(group) {
var leds = [];
for (var i = 0; i < 4; i++) {
var led = new this.game.THREE.Mesh(
new this.game.THREE.CubeGeometry(this.size/20, this.size/20, this.size/20),
new this.game.THREE.MeshLambertMaterial({color:0x000000,ambient:0xffffff,emissive:0x000000})
);
led.translateX((this.size / 3) * (Math.sin(deg2Rad(i * 90) + deg2Rad(45))));
led.translateZ((this.size / 3) * (Math.cos(deg2Rad(i * 90) + deg2Rad(45))));
leds.push(led);
if (group) group.add(led);
}
return leds;
};
Drone.prototype._emitNavdata = function(seq) {
var self = this;
with (self._navdata) {
sequenceNumber = seq;
demo.batteryPercentage = Math.floor((self._batteryLevel / self.batteryCapacity) * 100);
droneState.flying = self.flying ? 1 : 0;
// todo: set this closer to actual states
demo.controlState = self.flying ? 'CTRL_FLYING' : 'CTRL_LANDED';
if (self._drone !== false) {
/*demo.rotation.frontBack = demo.rotation.pitch = demo.rotation.theta = demo.rotation.y = demo.frontBackDegrees = self._drone.avatar.mesh.rotation.x;
demo.rotation.leftRight = demo.rotation.roll = demo.rotation.phi = demo.rotation.x = demo.leftRightDegrees = self._drone.avatar.mesh.rotation.z;
demo.rotation.clockwise = demo.rotation.yaw = demo.rotation.psi = demo.rotation.z = demo.clockwiseDegrees = self._drone.avatar.mesh.rotation.y;
demo.velocity.x = demo.xVelocity = self._drone.velocity.z;
demo.velocity.y = demo.yVelocity = self._drone.velocity.x;
demo.velocity.z = demo.zVelocity = self._drone.velocity.y;*/
// todo: calculate altitude
}
}
return self._navdata;
};
Drone.prototype._handleREF = function(dt, drone, cmd) {
var self = this;
if (cmd.args[0] === 512) {
setxyz(drone.resting, false);
if (!self.flying) {
// takeoff!
drone.removeForce(self.game.gravity);
drone.velocity.y += 0.002;
self.flying = true;
tic.timeout(function() { drone.velocity.y = 0; }, 500);
}
} else {
if (self.flying) {
// land!
self.stop();
setxyz(drone.velocity, 0);
setxyz(drone.avatar.children[0].rotation, 0);
drone.subjectTo(self.game.gravity);
self.flying = false;
// TODO: land more realistically
}
}
};
Drone.prototype._handlePCMD = function(dt, drone, cmd) {
if (!this.flying) return;
setxyz(drone.velocity, 0);
// args: flags, leftRight, frontBack, upDown, clockWise
// dont know why leftRight/frontBack are totally switched but they are!
var frontBack = cmd.args[2] || 0;
var leftRight = cmd.args[1] || 0;
var upDown = cmd.args[3] || 0;
var clockwise =
|
random_line_split
|
||
index.js
|
led][0];
obj.material.opacity = self.LED_COLORS[led][1];
obj.material.transparent = (obj.material.opacity < 1) ? true : false;
});
};
Drone.prototype._addLEDs = function(group) {
var leds = [];
for (var i = 0; i < 4; i++) {
var led = new this.game.THREE.Mesh(
new this.game.THREE.CubeGeometry(this.size/20, this.size/20, this.size/20),
new this.game.THREE.MeshLambertMaterial({color:0x000000,ambient:0xffffff,emissive:0x000000})
);
led.translateX((this.size / 3) * (Math.sin(deg2Rad(i * 90) + deg2Rad(45))));
led.translateZ((this.size / 3) * (Math.cos(deg2Rad(i * 90) + deg2Rad(45))));
leds.push(led);
if (group) group.add(led);
}
return leds;
};
Drone.prototype._emitNavdata = function(seq) {
var self = this;
with (self._navdata) {
sequenceNumber = seq;
demo.batteryPercentage = Math.floor((self._batteryLevel / self.batteryCapacity) * 100);
droneState.flying = self.flying ? 1 : 0;
// todo: set this closer to actual states
demo.controlState = self.flying ? 'CTRL_FLYING' : 'CTRL_LANDED';
if (self._drone !== false) {
/*demo.rotation.frontBack = demo.rotation.pitch = demo.rotation.theta = demo.rotation.y = demo.frontBackDegrees = self._drone.avatar.mesh.rotation.x;
demo.rotation.leftRight = demo.rotation.roll = demo.rotation.phi = demo.rotation.x = demo.leftRightDegrees = self._drone.avatar.mesh.rotation.z;
demo.rotation.clockwise = demo.rotation.yaw = demo.rotation.psi = demo.rotation.z = demo.clockwiseDegrees = self._drone.avatar.mesh.rotation.y;
demo.velocity.x = demo.xVelocity = self._drone.velocity.z;
demo.velocity.y = demo.yVelocity = self._drone.velocity.x;
demo.velocity.z = demo.zVelocity = self._drone.velocity.y;*/
// todo: calculate altitude
}
}
return self._navdata;
};
Drone.prototype._handleREF = function(dt, drone, cmd) {
var self = this;
if (cmd.args[0] === 512) {
setxyz(drone.resting, false);
if (!self.flying) {
// takeoff!
drone.removeForce(self.game.gravity);
drone.velocity.y += 0.002;
self.flying = true;
tic.timeout(function() { drone.velocity.y = 0; }, 500);
}
} else {
if (self.flying) {
// land!
self.stop();
setxyz(drone.velocity, 0);
setxyz(drone.avatar.children[0].rotation, 0);
drone.subjectTo(self.game.gravity);
self.flying = false;
// TODO: land more realistically
}
}
};
Drone.prototype._handlePCMD = function(dt, drone, cmd) {
if (!this.flying) return;
setxyz(drone.velocity, 0);
// args: flags, leftRight, frontBack, upDown, clockWise
// dont know why leftRight/frontBack are totally switched but they are!
var frontBack = cmd.args[2] || 0;
var leftRight = cmd.args[1] || 0;
var upDown = cmd.args[3] || 0;
var clockwise = cmd.args[4] || 0;
// reduce speed
var tilt = this.tilt / 100;
var verticalSpeed = this.verticalSpeed / 100;
var rot = drone.avatar.children[0];
// todo: figure auto leveling out
// when it hits 0, it doesnt level for some reason
rot.rotation.x = anim(dt, rot.rotation.x, -frontBack/2);
if (frontBack !== 0) drone.velocity.z = frontBack * tilt;
else if (!this._animating) rot.rotation.x = 0;
rot.rotation.z = anim(dt, rot.rotation.z, -leftRight/2);
if (leftRight !== 0) drone.velocity.x = -leftRight * tilt;
else if (!this._animating) rot.rotation.z = 0;
if (upDown !== 0) drone.velocity.y += upDown * verticalSpeed;
if (clockwise !== 0) drone.rotation.y += clockwise * this.yawSpeed;
// tmp fallback level out
if (frontBack === 0 && leftRight === 0 && !this._animating) {
rot.rotation.x = 0;
rot.rotation.z = 0;
}
// cap the amount of tilt
if (Math.abs(rot.rotation.z) >= 1 && !this._animating) {
rot.rotation.z = rot.rotation.z < 0 ? -1 : 1;
}
if (Math.abs(rot.rotation.x) >= 1 && !this._animating) {
rot.rotation.x = rot.rotation.x < 0 ? -1 : 1;
}
};
// Handle AT*CONFIG
Drone.prototype._handleCONFIG = function(dt, drone, cmd) {
switch (cmd.args[0]) {
case 'control:flight_anim':
this._handleANIM(dt, drone, cmd);
break;
case 'leds:leds_anim':
this._handleLED(dt, drone, cmd);
break;
}
};
// Handle AT*CONFG=1,control:flight_anim
Drone.prototype._handleANIM = function(dt, drone, cmd) {
var self = this;
if (!self.flying || this._animating) return;
// todo: tweak this closer to actual drone
var duration = Number(cmd.args[2]) * 10;
var type = this.ANIMATIONS[parseInt(cmd.args[1])];
self._animating = true;
tic.timeout(function() { self._animating = false; }, duration);
switch (type) {
case 'flipLeft': case 'flipRight':
case 'flipAhead': case 'flipBehind':
// todo: for longer durations this gets out of hand. should only happen once.
drone.velocity.y += 0.0045;
tic.timeout(function() {
var amt = (type === 'flipLeft' || type === 'flipAhead') ? deg2Rad(360) : -deg2Rad(360);
var dir = (type === 'flipLeft' || type === 'flipRight') ? 'x' : 'z';
drone.avatar.children[0].rotation[dir] = anim(dt, drone.avatar.children[0].rotation[dir], amt, duration);
}, duration / 5);
// todo: better adjust above to mimic actual drone
// where it flies up dramatically flips and comes down
tic.timeout(function() {
drone.velocity.y -= 0.002;
}, duration - (duration / 10));
break;
// todo: handle the other animations
}
};
// Handle AT*CONFG=1,control:leds_anim
// todo: this is totally not correct!
Drone.prototype._handleLED = function(dt, drone, cmd) {
var self = this;
if (this._ledanimating) return;
var type = this.LED_ANIMATIONS[parseInt(cmd.args[1])];
var hz = Number(cmd.args[2]);
var duration = Number(cmd.args[3]) * 1000;
var on = 0;
var i = 0;
self.leds('blank');
var clearInterval = tic.interval(function() {
if (!self._ledanimating) return;
switch (type) {
case 'blinkRed':
case 'blinkGreen':
case 'blinkOrange':
var n = type === 'blinkRed' ? 1 : type === 'blinkGreen' ? 2 : 3;
on = Math.sin(TAU * hz * i) > 0 ? n : 0;
self.leds([on, on, on, on]);
break;
case 'blinkStandard':
self.leds(Math.sin(TAU * hz * i) > 0 ? 'standard' : 'blank');
break;
case 'blinkGreenRed':
self.leds(Math.sin(TAU * hz * i) > 0 ? 'green' : 'red');
break;
default:
self.leds(type);
break;
// todo: handle other leds animations
}
i += 0.01;
}, 100);
self._ledanimating = true;
tic.timeout(function() {
clearInterval();
self.leds('standard');
self._ledanimating = false;
}, duration);
};
function setxyz(item, x, y, z)
|
{
if (arguments.length < 3) {
y = x; z = x;
}
item.x = x; item.y = y; item.z = z;
}
|
identifier_body
|
|
function.py
|
out
# =================特征计算===============================================================
# calture_normal_feature: 对于一张图片,计算256尺寸下 center crop特征 (1+2 论文中的baseline)
# calture_pool_feature: 对于一张图片,计算设定尺寸下 以一定步长截取部分图片的特征 然后maxpool
# get_feature_scala_256: 计算整个数据库normal_feature
# get_pool_feature: 计算整个数据库pool_feature
# get_cam_feature:
# ========================================================================================
def calture_normal_feature(pic, caffenet, mean_data):
transformer = imageTransformer(caffenet, mean_data)
feature = np.zeros((1, 4096))
# cv2.imshow('0',pic)
sp = pic.shape
y = sp[0]
x = sp[1]
if x < y:
y = (int)((y * 256) / x)
x = 256
else:
x = (int)((x * 256) / y)
y = 256
im_256 = cv2.resize(pic, (x, y))
# cv2.imshow('1',im_256)
# cv2.waitKey(0)
im_224 = im_256[((int)(y / 2) - 112):((int)(y / 2) + 112), ((int)(x / 2) - 112):((int)(x / 2) + 112)]
im = transformer.preprocess('data', im_224)
'''
im = np.transpose(im_224, (2, 0, 1))
im = im - mean_data
'''
im = np.resize(im, (1, 3, 224, 224))
caffenet.blobs['data'].data[...] = im
caffenet.forward()
feature[0] = caffenet.blobs['fc7'].data[0]
return feature
def calture_pool_feature(pic, picsize, cropsize, steplength, caffenet, mean_data, parallelnum=1, dropLabel=False):
im = np.zeros((parallelnum, 3, cropsize, cropsize))
transformer = imageTransformer(caffenet, mean_data)
feature_max = np.zeros((parallelnum, 4096))
feature_max = feature_max - 9999
# 对每个尺度框图并提取特征
step = (picsize - cropsize) // steplength
for m in range(step + 1):
for n in range(step + 1):
x = m * steplength
y = n * steplength
if x > picsize - cropsize:
x = picsize - cropsize
if y > picsize - cropsize:
y = picsize - cropsize
crop = pic[:, y:y + cropsize, x:x + cropsize, :] ### crop是四维的数组 n h w c
if dropLabel == True:
dropInt = random.randint(1, 100)
if dropInt > 75:
continue
for i in range(parallelnum):
im[i] = transformer.preprocess('data', crop[i])
'''
im = np.transpose(crop, (0, 3, 1, 2))
im = im - mean_data
'''
caffenet.blobs['data'].data[...] = im
caffenet.forward()
tmp = caffenet.blobs['fc7'].data
for i in range(parallelnum):
for j in range(4096):
if tmp[i][j] >= feature_max[i][j]:
feature_max[i][j] = tmp[i][j]
# tmp[i][j] = tmp[i][j]/(step+1)*(step+1)
# feature_mean[i][j] = feature_mean[i][j] + tmp[i][j]
return feature_max
def get_feature_scala_256(db, cursor, caffenet, tbname, rownum, datafloder, mean_data, featurename):
feature_all = []
for i in range(rownum):
print '============current id is :%d ==============' % (i + 1)
sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1)
cursor.execute(sql)
result = cursor.fetchall()
url = datafloder + result[0][0]
im_ori = cv2.imread(url)
cur_feature = calture_normal_feature(im_ori, caffenet, mean_data)
feature_all.extend(cur_feature)
feature_all = np.asarray(feature_all, dtype='float32')
print feature_all.shape
# 写入数据库
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_all)
def get_pool_feature(db, cursor, tbname, rownum, picsize, cropsize, steplength, caffenet, datafloder, mean_data,
featurename, parallelnum=1):
feature_max = []
for i in range(int(rownum / parallelnum)):
print '============current id is :%d ==============' % (i * parallelnum + 1)
sql = "SELECT URL FROM " + tbname + " WHERE ID >= '%d' and ID <= '%d'" % (
i * parallelnum + 1, (i + 1) * parallelnum)
cursor.execute(sql)
result = cursor.fetchall()
im = np.zeros((parallelnum, picsize, picsize, 3))
for j in range(parallelnum):
url = datafloder + result[j][0]
im_ori = cv2.imread(url)
im[j, :, :, :] = cv2.resize(im_ori, (picsize, picsize))
current_max = calture_pool_feature(im, picsize, cropsize, steplength, caffenet, mean_data, parallelnum)
feature_max.extend(current_max)
feature_max = np.array(feature_max, dtype='float32')
print feature_max.shape
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
def get_cam_feature(db, cursor, tbname, file_url, caffenet, datafloder, mean_data, featurename):
transformer = imageTransformer(caffenet, mean_data)
feature_max = []
file = open(file_url, 'r')
count = 0
while True:
current_max = np.zeros((1, 4096))
current_max -= 9999
line = file.readline()
line = line.strip()
if not line:
break
count += 1
print '----------------------current ID is: {}---------------------'.format(count)
url = datafloder + line
img = cv2.imread(url)
x = float(img.shape[0])
y = float(img.shape[1])
if x < 224 or y < 224:
scale1 = x / y
scale2 = y / x
if scale1 < scale2:
img = cv2.resize(img, (int(scale2 * 224), 224))
else:
img = cv2.resize(img, (224, int(scale1 * 224)))
x = img.shape[0]
y = img.shape[1]
if x > 451 and y > 451:
steplength = 70
else:
steplength = 35
step_x = (x - 224) / steplength + 1
step_y = (y - 224) / steplength + 1
for i in range(step_x):
for j in range
|
x = i * steplength
y = j * steplength
crop = img[x:x + 224, y:y + 224, :]
im = transformer.preprocess('data', crop)
'''
im = np.transpose(crop, (2, 0, 1))
im = im - mean_data
'''
im = np.resize(im, (1, 3, 224, 224))
caffenet.blobs['data'].data[...] = im
caffenet.forward()
tmp = caffenet.blobs['fc7'].data
for k in range(4096):
if tmp[0][k] >= current_max[0][k]:
current_max[0][k] = tmp[0][k]
feature_max.extend(current_max)
feature_max = np.array(feature_max, dtype='float32')
print feature_max.shape
file.close()
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
# ======================提取特征============================
# ==========================================================
# feature6
|
(step_y):
|
conditional_block
|
function.py
|
out
# =================特征计算===============================================================
# calture_normal_feature: 对于一张图片,计算256尺寸下 center crop特征 (1+2 论文中的baseline)
# calture_pool_feature: 对于一张图片,计算设定尺寸下 以一定步长截取部分图片的特征 然后maxpool
# get_feature_scala_256: 计算整个数据库normal_feature
# get_pool_feature: 计算整个数据库pool_feature
# get_cam_feature:
# ========================================================================================
def calture_normal_feature(pic, caffenet, mean_data):
transformer = imageTransformer(caffenet, mean_data)
feature = np.zeros((1, 4096))
# cv2.imshow('0',pic)
sp = pic.shape
y = sp[0]
x = sp[1]
if x < y:
y = (int)((y * 256) / x)
x = 256
else:
x = (int)((x * 256) / y)
y = 256
im_256 = cv2.resize(pic, (x, y))
# cv2.imshow('1',im_256)
# cv2.waitKey(0)
im_224 = im_256[((int)(y / 2) - 112):((int)(y / 2) + 112), ((int)(x / 2) - 112):((int)(x / 2) + 112)]
im = transformer.preprocess('data', im_224)
'''
im = np.transpose(im_224, (2, 0, 1))
im = im - mean_data
'''
im = np.resize(im, (1, 3, 224, 224))
caffenet.blobs['data'].data[...] = im
caffenet.forward()
feature[0] = caffenet.blobs['fc7'].data[0]
return feature
def calture_pool_feature(pic, picsize, cropsize, steplength, caffenet, mean_data, parallelnum=1, dropLabel=False):
im = np.zeros((parallelnum, 3, cropsize, cropsize))
transformer = imageTransformer(caffenet, mean_data)
feature_max = np.zeros((parallelnum, 4096))
feature_max = feature_max - 9999
# 对每个尺度框图并提取特征
step = (picsize - cropsize) // steplength
for m in range(step + 1):
for n in range(step + 1):
x = m * steplength
y = n * steplength
if x > picsize - cropsize:
x = picsize - cropsize
if y > picsize - cropsize:
y = picsize - cropsize
crop = pic[:, y:y + cropsize, x:x + cropsize, :] ### crop是四维的数组 n h w c
if dropLabel == True:
dropInt = random.randint(1, 100)
if dropInt > 75:
continue
for i in range(parallelnum):
im[i] = transformer.preprocess('data', crop[i])
'''
im = np.transpose(crop, (0, 3, 1, 2))
im = im - mean_data
'''
caffenet.blobs['data'].data[...] = im
caffenet.forward()
tmp = caffenet.blobs['fc7'].data
for i in range(parallelnum):
for j in range(4096):
if tmp[i][j] >= feature_max[i][j]:
feature_max[i][j] = tmp[i][j]
# tmp[i][j] = tmp[i][j]/(step+1)*(step+1)
# feature_mean[i][j] = feature_mean[i][j] + tmp[i][j]
return feature_max
def get_feature_scala_256(db, cursor, caffenet, tbname, rownum, datafloder, mean_data, featurename):
feature_all = []
for i in range(rownum):
print '============current id is :%d ==========
|
sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1)
cursor.execute(sql)
result = cursor.fetchall()
url = datafloder + result[0][0]
im_ori = cv2.imread(url)
cur_feature = calture_normal_feature(im_ori, caffenet, mean_data)
feature_all.extend(cur_feature)
feature_all = np.asarray(feature_all, dtype='float32')
print feature_all.shape
# 写入数据库
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_all)
def get_pool_feature(db, cursor, tbname, rownum, picsize, cropsize, steplength, caffenet, datafloder, mean_data,
featurename, parallelnum=1):
feature_max = []
for i in range(int(rownum / parallelnum)):
print '============current id is :%d ==============' % (i * parallelnum + 1)
sql = "SELECT URL FROM " + tbname + " WHERE ID >= '%d' and ID <= '%d'" % (
i * parallelnum + 1, (i + 1) * parallelnum)
cursor.execute(sql)
result = cursor.fetchall()
im = np.zeros((parallelnum, picsize, picsize, 3))
for j in range(parallelnum):
url = datafloder + result[j][0]
im_ori = cv2.imread(url)
im[j, :, :, :] = cv2.resize(im_ori, (picsize, picsize))
current_max = calture_pool_feature(im, picsize, cropsize, steplength, caffenet, mean_data, parallelnum)
feature_max.extend(current_max)
feature_max = np.array(feature_max, dtype='float32')
print feature_max.shape
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
def get_cam_feature(db, cursor, tbname, file_url, caffenet, datafloder, mean_data, featurename):
transformer = imageTransformer(caffenet, mean_data)
feature_max = []
file = open(file_url, 'r')
count = 0
while True:
current_max = np.zeros((1, 4096))
current_max -= 9999
line = file.readline()
line = line.strip()
if not line:
break
count += 1
print '----------------------current ID is: {}---------------------'.format(count)
url = datafloder + line
img = cv2.imread(url)
x = float(img.shape[0])
y = float(img.shape[1])
if x < 224 or y < 224:
scale1 = x / y
scale2 = y / x
if scale1 < scale2:
img = cv2.resize(img, (int(scale2 * 224), 224))
else:
img = cv2.resize(img, (224, int(scale1 * 224)))
x = img.shape[0]
y = img.shape[1]
if x > 451 and y > 451:
steplength = 70
else:
steplength = 35
step_x = (x - 224) / steplength + 1
step_y = (y - 224) / steplength + 1
for i in range(step_x):
for j in range(step_y):
x = i * steplength
y = j * steplength
crop = img[x:x + 224, y:y + 224, :]
im = transformer.preprocess('data', crop)
'''
im = np.transpose(crop, (2, 0, 1))
im = im - mean_data
'''
im = np.resize(im, (1, 3, 224, 224))
caffenet.blobs['data'].data[...] = im
caffenet.forward()
tmp = caffenet.blobs['fc7'].data
for k in range(4096):
if tmp[0][k] >= current_max[0][k]:
current_max[0][k] = tmp[0][k]
feature_max.extend(current_max)
feature_max = np.array(feature_max, dtype='float32')
print feature_max.shape
file.close()
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
# ======================提取特征============================
# ==========================================================
# feature6
|
====' % (i + 1)
|
identifier_name
|
function.py
|
# =================特征计算===============================================================
# calture_normal_feature: 对于一张图片,计算256尺寸下 center crop特征 (1+2 论文中的baseline)
# calture_pool_feature: 对于一张图片,计算设定尺寸下 以一定步长截取部分图片的特征 然后maxpool
# get_feature_scala_256: 计算整个数据库normal_feature
# get_pool_feature: 计算整个数据库pool_feature
# get_cam_feature:
# ========================================================================================
def calture_normal_feature(pic, caffenet, mean_data):
transformer = imageTransformer(caffenet, mean_data)
feature = np.zeros((1, 4096))
# cv2.imshow('0',pic)
sp = pic.shape
y = sp[0]
x = sp[1]
if x < y:
y = (int)((y * 256) / x)
x = 256
else:
x = (int)((x * 256) / y)
y = 256
im_256 = cv2.resize(pic, (x, y))
# cv2.imshow('1',im_256)
# cv2.waitKey(0)
im_224 = im_256[((int)(y / 2) - 112):((int)(y / 2) + 112), ((int)(x / 2) - 112):((int)(x / 2) + 112)]
im = transformer.preprocess('data', im_224)
'''
im = np.transpose(im_224, (2, 0, 1))
im = im - mean_data
'''
im = np.resize(im, (1, 3, 224, 224))
caffenet.blobs['data'].data[...] = im
caffenet.forward()
feature[0] = caffenet.blobs['fc7'].data[0]
return feature
def calture_pool_feature(pic, picsize, cropsize, steplength, caffenet, mean_data, parallelnum=1, dropLabel=False):
im = np.zeros((parallelnum, 3, cropsize, cropsize))
transformer = imageTransformer(caffenet, mean_data)
feature_max = np.zeros((parallelnum, 4096))
feature_max = feature_max - 9999
# 对每个尺度框图并提取特征
step = (picsize - cropsize) // steplength
for m in range(step + 1):
for n in range(step + 1):
x = m * steplength
y = n * steplength
if x > picsize - cropsize:
x = picsize - cropsize
if y > picsize - cropsize:
y = picsize - cropsize
crop = pic[:, y:y + cropsize, x:x + cropsize, :] ### crop是四维的数组 n h w c
if dropLabel == True:
dropInt = random.randint(1, 100)
if dropInt > 75:
continue
for i in range(parallelnum):
im[i] = transformer.preprocess('data', crop[i])
'''
im = np.transpose(crop, (0, 3, 1, 2))
im = im - mean_data
'''
caffenet.blobs['data'].data[...] = im
caffenet.forward()
tmp = caffenet.blobs['fc7'].data
for i in range(parallelnum):
for j in range(4096):
if tmp[i][j] >= feature_max[i][j]:
feature_max[i][j] = tmp[i][j]
# tmp[i][j] = tmp[i][j]/(step+1)*(step+1)
# feature_mean[i][j] = feature_mean[i][j] + tmp[i][j]
return feature_max
def get_feature_scala_256(db, cursor, caffenet, tbname, rownum, datafloder, mean_data, featurename):
feature_all = []
for i in range(rownum):
print '============current id is :%d ==============' % (i + 1)
sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1)
cur
|
t(rownum / parallelnum)):
print '============current id is :%d ==============' % (i * parallelnum + 1)
sql = "SELECT URL FROM " + tbname + " WHERE ID >= '%d' and ID <= '%d'" % (
i * parallelnum + 1, (i + 1) * parallelnum)
cursor.execute(sql)
result = cursor.fetchall()
im = np.zeros((parallelnum, picsize, picsize, 3))
for j in range(parallelnum):
url = datafloder + result[j][0]
im_ori = cv2.imread(url)
im[j, :, :, :] = cv2.resize(im_ori, (picsize, picsize))
current_max = calture_pool_feature(im, picsize, cropsize, steplength, caffenet, mean_data, parallelnum)
feature_max.extend(current_max)
feature_max = np.array(feature_max, dtype='float32')
print feature_max.shape
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
def get_cam_feature(db, cursor, tbname, file_url, caffenet, datafloder, mean_data, featurename):
transformer = imageTransformer(caffenet, mean_data)
feature_max = []
file = open(file_url, 'r')
count = 0
while True:
current_max = np.zeros((1, 4096))
current_max -= 9999
line = file.readline()
line = line.strip()
if not line:
break
count += 1
print '----------------------current ID is: {}---------------------'.format(count)
url = datafloder + line
img = cv2.imread(url)
x = float(img.shape[0])
y = float(img.shape[1])
if x < 224 or y < 224:
scale1 = x / y
scale2 = y / x
if scale1 < scale2:
img = cv2.resize(img, (int(scale2 * 224), 224))
else:
img = cv2.resize(img, (224, int(scale1 * 224)))
x = img.shape[0]
y = img.shape[1]
if x > 451 and y > 451:
steplength = 70
else:
steplength = 35
step_x = (x - 224) / steplength + 1
step_y = (y - 224) / steplength + 1
for i in range(step_x):
for j in range(step_y):
x = i * steplength
y = j * steplength
crop = img[x:x + 224, y:y + 224, :]
im = transformer.preprocess('data', crop)
'''
im = np.transpose(crop, (2, 0, 1))
im = im - mean_data
'''
im = np.resize(im, (1, 3, 224, 224))
caffenet.blobs['data'].data[...] = im
caffenet.forward()
tmp = caffenet.blobs['fc7'].data
for k in range(4096):
if tmp[0][k] >= current_max[0][k]:
current_max[0][k] = tmp[0][k]
feature_max.extend(current_max)
feature_max = np.array(feature_max, dtype='float32')
print feature_max.shape
file.close()
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
# ======================提取特征============================
# ==========================================================
# feature
|
sor.execute(sql)
result = cursor.fetchall()
url = datafloder + result[0][0]
im_ori = cv2.imread(url)
cur_feature = calture_normal_feature(im_ori, caffenet, mean_data)
feature_all.extend(cur_feature)
feature_all = np.asarray(feature_all, dtype='float32')
print feature_all.shape
# 写入数据库
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_all)
def get_pool_feature(db, cursor, tbname, rownum, picsize, cropsize, steplength, caffenet, datafloder, mean_data,
featurename, parallelnum=1):
feature_max = []
for i in range(in
|
identifier_body
|
function.py
|
# =================特征计算===============================================================
# calture_normal_feature: 对于一张图片,计算256尺寸下 center crop特征 (1+2 论文中的baseline)
# calture_pool_feature: 对于一张图片,计算设定尺寸下 以一定步长截取部分图片的特征 然后maxpool
# get_feature_scala_256: 计算整个数据库normal_feature
# get_pool_feature: 计算整个数据库pool_feature
# get_cam_feature:
# ========================================================================================
def calture_normal_feature(pic, caffenet, mean_data):
transformer = imageTransformer(caffenet, mean_data)
feature = np.zeros((1, 4096))
# cv2.imshow('0',pic)
sp = pic.shape
y = sp[0]
x = sp[1]
if x < y:
y = (int)((y * 256) / x)
x = 256
else:
x = (int)((x * 256) / y)
y = 256
im_256 = cv2.resize(pic, (x, y))
# cv2.imshow('1',im_256)
# cv2.waitKey(0)
im_224 = im_256[((int)(y / 2) - 112):((int)(y / 2) + 112), ((int)(x / 2) - 112):((int)(x / 2) + 112)]
im = transformer.preprocess('data', im_224)
'''
im = np.transpose(im_224, (2, 0, 1))
im = im - mean_data
'''
im = np.resize(im, (1, 3, 224, 224))
caffenet.blobs['data'].data[...] = im
caffenet.forward()
feature[0] = caffenet.blobs['fc7'].data[0]
return feature
def calture_pool_feature(pic, picsize, cropsize, steplength, caffenet, mean_data, parallelnum=1, dropLabel=False):
im = np.zeros((parallelnum, 3, cropsize, cropsize))
transformer = imageTransformer(caffenet, mean_data)
feature_max = np.zeros((parallelnum, 4096))
feature_max = feature_max - 9999
# 对每个尺度框图并提取特征
step = (picsize - cropsize) // steplength
for m in range(step + 1):
for n in range(step + 1):
x = m * steplength
y = n * steplength
if x > picsize - cropsize:
x = picsize - cropsize
if y > picsize - cropsize:
y = picsize - cropsize
crop = pic[:, y:y + cropsize, x:x + cropsize, :] ### crop是四维的数组 n h w c
if dropLabel == True:
dropInt = random.randint(1, 100)
if dropInt > 75:
continue
for i in range(parallelnum):
im[i] = transformer.preprocess('data', crop[i])
'''
im = np.transpose(crop, (0, 3, 1, 2))
im = im - mean_data
'''
caffenet.blobs['data'].data[...] = im
caffenet.forward()
tmp = caffenet.blobs['fc7'].data
for i in range(parallelnum):
for j in range(4096):
if tmp[i][j] >= feature_max[i][j]:
feature_max[i][j] = tmp[i][j]
# tmp[i][j] = tmp[i][j]/(step+1)*(step+1)
# feature_mean[i][j] = feature_mean[i][j] + tmp[i][j]
return feature_max
def get_feature_scala_256(db, cursor, caffenet, tbname, rownum, datafloder, mean_data, featurename):
feature_all = []
for i in range(rownum):
print '============current id is :%d ==============' % (i + 1)
sql = "SELECT URL FROM " + tbname + " WHERE ID = '%d'" % (i + 1)
cursor.execute(sql)
result = cursor.fetchall()
url = datafloder + result[0][0]
im_ori = cv2.imread(url)
cur_feature = calture_normal_feature(im_ori, caffenet, mean_data)
feature_all.extend(cur_feature)
feature_all = np.asarray(feature_all, dtype='float32')
print feature_all.shape
# 写入数据库
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_all)
def get_pool_feature(db, cursor, tbname, rownum, picsize, cropsize, steplength, caffenet, datafloder, mean_data,
featurename, parallelnum=1):
feature_max = []
for i in range(int(rownum / parallelnum)):
print '============current id is :%d ==============' % (i * parallelnum + 1)
sql = "SELECT URL FROM " + tbname + " WHERE ID >= '%d' and ID <= '%d'" % (
i * parallelnum + 1, (i + 1) * parallelnum)
cursor.execute(sql)
result = cursor.fetchall()
im = np.zeros((parallelnum, picsize, picsize, 3))
for j in range(parallelnum):
url = datafloder + result[j][0]
im_ori = cv2.imread(url)
im[j, :, :, :] = cv2.resize(im_ori, (picsize, picsize))
current_max = calture_pool_feature(im, picsize, cropsize, steplength, caffenet, mean_data, parallelnum)
feature_max.extend(current_max)
feature_max = np.array(feature_max, dtype='float32')
print feature_max.shape
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
|
feature_max = []
file = open(file_url, 'r')
count = 0
while True:
current_max = np.zeros((1, 4096))
current_max -= 9999
line = file.readline()
line = line.strip()
if not line:
break
count += 1
print '----------------------current ID is: {}---------------------'.format(count)
url = datafloder + line
img = cv2.imread(url)
x = float(img.shape[0])
y = float(img.shape[1])
if x < 224 or y < 224:
scale1 = x / y
scale2 = y / x
if scale1 < scale2:
img = cv2.resize(img, (int(scale2 * 224), 224))
else:
img = cv2.resize(img, (224, int(scale1 * 224)))
x = img.shape[0]
y = img.shape[1]
if x > 451 and y > 451:
steplength = 70
else:
steplength = 35
step_x = (x - 224) / steplength + 1
step_y = (y - 224) / steplength + 1
for i in range(step_x):
for j in range(step_y):
x = i * steplength
y = j * steplength
crop = img[x:x + 224, y:y + 224, :]
im = transformer.preprocess('data', crop)
'''
im = np.transpose(crop, (2, 0, 1))
im = im - mean_data
'''
im = np.resize(im, (1, 3, 224, 224))
caffenet.blobs['data'].data[...] = im
caffenet.forward()
tmp = caffenet.blobs['fc7'].data
for k in range(4096):
if tmp[0][k] >= current_max[0][k]:
current_max[0][k] = tmp[0][k]
feature_max.extend(current_max)
feature_max = np.array(feature_max, dtype='float32')
print feature_max.shape
file.close()
write_feature_to_db(db=db, cursor=cursor, table_name=tbname, featurename=featurename, feature=feature_max)
# ======================提取特征============================
# ==========================================================
# feature
|
def get_cam_feature(db, cursor, tbname, file_url, caffenet, datafloder, mean_data, featurename):
transformer = imageTransformer(caffenet, mean_data)
|
random_line_split
|
vue2jsx.ts
|
{
constructor(public tagName: string = "", public parentNode: Nullable<ParsedNode> = null) { }
localVariables: string[] = [];
childNodes: ParsedNode[] = [];
startText: string = "";
endText: string = "";
startIf: boolean = false;
condition: string = "";
postProcessor: { (text: string): string } = t => t;
appendChild(tagName: string) {
var newNode = new ParsedNode(tagName, this);
this.childNodes.push(newNode);
return newNode;
}
render() {
let jsx;
if (this.startText == "<template>")
{
jsx = '[ ';
for(let i = 0; i < this.childNodes.length; i++) {
const child = this.childNodes[i];
jsx += child.render();
if (child.tagName != "#text")
jsx += ", ";
}
jsx = jsx.replace(/,(\s*)$/, '$1') + ' ]';
}
else
{
jsx = this.startText;
for(let i = 0; i < this.childNodes.length; i++) {
const child = this.childNodes[i];
jsx += child.render();
}
jsx += this.endText;
}
return this.postProcessor(jsx);
}
}
function vue2jsx(html: string) {
var startTagRegex = /^<(!?[-A-Za-z0-9_]+)((?:\s+[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)>/,
endTagRegex = /^<\/([-A-Za-z0-9_]+)[^>]*>/;
var attrRegex = /\s*[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?/g;
var special: Dictionary<number> = { script: 1, style: 1 };
var index, chars, match, stack: any[] & { last?: Function } = [], last = html;
stack.last = function () {
return this[this.length - 1];
};
var currentNode = new ParsedNode();
var rootNode = currentNode;
while (html) {
chars = true;
// Make sure we're not in a script or style element
if (!stack.last() || !special[stack.last()]) {
// Comment
if (html.indexOf("<!--") == 0) {
index = html.indexOf("-->");
if (index >= 0) {
html = html.substring(index + 3);
chars = false;
}
// end tag
} else if (html.indexOf("</") == 0) {
match = html.match(endTagRegex);
if (match) {
html = html.substring(match[0].length);
currentNode.endText = match[0];
currentNode = currentNode.parentNode!;
chars = false;
}
// start tag
} else if (html.indexOf("<") == 0) {
match = html.match(startTagRegex);
if (match) {
html = html.substring(match[0].length);
currentNode = currentNode.appendChild(match[1]);
let startTagJsx = "";
let attrsMatch = match[2].match(attrRegex);
let attrsJsx = "";
if (attrsMatch) {
for (var i = 0; i < attrsMatch.length; i++) {
if (attrsMatch[i].replace(/^\s+/, '') == '')
continue;
let tagName = match[1];
let name = attrsMatch[i].replace(/=.*/, '').replace(/^\s+/, '');
let value = attrsMatch[i].replace(/^[^=]+=/, '');
if (attrsMatch[i].indexOf('=') === -1)
value = true;
let attrJsx = processAttr(tagName, name, value, currentNode);
if (attrJsx)
attrsJsx += " " + attrJsx;
}
}
startTagJsx += "<" + match[1] + attrsJsx + match[match.length - 1] + ">";
currentNode.startText = startTagJsx;
if (match[match.length - 1] == "/")
currentNode = currentNode.parentNode!;
chars = false;
}
}
if (chars) {
index = html.indexOf("<");
var text = index < 0 ? html : html.substring(0, index);
html = index < 0 ? "" : html.substring(index);
let textNode = currentNode.appendChild("#text");
textNode.startText = text.replace(/{{\s*([^}]+)\s*}}/g, "{ $1 }");
}
} else {
html = html.substring(html.indexOf("</" + stack.last() + ">"));
}
if (html == last) {
throw new Error("Parse Error at: " + html)
}
last = html;
}
return rootNode;
}
function processAttr(tagName: string, name: string, value: string | true, currentNode: ParsedNode) {
let jsxAttr = name + "=" + value;
if (value === true) {
jsxAttr = name;
}
else if (name.indexOf("v-on:") == 0) {
name = "on" + name.substr(5);
value = processJs(value.slice(1, -1).replace(/^\s+/, ''), currentNode);
let param = "()";
let condition = "";
if (name.endsWith(".enter")) {
name = name.slice(0, -6);
param = "e";
condition = "e.keyCode == 13";
}
if (value.indexOf(';') === -1)
value = `${param} => ${condition ? condition + " && " : ""}${value}`;
else if (condition)
value = `${param} => { if (${condition}) { ${value} } }`;
else
value = `${param} => { ${value} }`;
jsxAttr = name + "={ " + value + " }";
}
else if (name.indexOf("v-bind:") == 0) {
name = name.substr(7);
jsxAttr = name + "={ " + processJs(value.slice(1, -1), currentNode) + " }";
}
else if (name == "v-for") {
let [elem, elems] = value.slice(1, -1).split(' in ');
if (elem.indexOf(',') > -1 && elem.indexOf('(') == -1)
elem = "(" + elem + ")";
jsxAttr = "";
currentNode.localVariables = elem.replace(/^\(|\)$|\s+/g, '').split(',');
currentNode.postProcessor = t => `{ this.${elems}.map(${elem} => ${t}) }`;
}
else if (name == "v-if") {
jsxAttr = "";
const condition = processJs(value.slice(1, -1), currentNode);
currentNode.startIf = true;
currentNode.condition = condition;
currentNode.postProcessor = t => `{ ${condition} && ${t} }`;
}
else if (name == "v-else-if") {
jsxAttr = "";
const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text");
const prevNode = children[children.length - 2];
const condition = processJs(value.slice(1, -1), currentNode);
currentNode.condition = condition;
if (prevNode.startIf)
prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`;
else
prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`;
currentNode.postProcessor = t => ` : ${condition} ? ${t} : null }`;
}
else if (name == "v-else") {
jsxAttr = "";
const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text");
const prevNode = children[children.length - 2];
if (prevNode.startIf)
prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`;
else
prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`;
currentNode.postProcessor = t => ` : ${t} }`;
}
else if (name == "v-model") {
const oninput = tagName == 'input' || tagName == 'textarea' ? "oninput" : "onchange";
const model = processJs(value.slice(1, -1), currentNode);
jsxAttr = `value={ ${model} } ${oninput}={ e => ${model} = e.target.value }`;
}
return jsxAttr;
}
function processJs(jsCode: string, currentNode: ParsedNode)
{
let fileNode = ts.createSourceFile("test.ts", "(" + jsCode + ")", ts.ScriptTarget.ES5);
let localVariables = [];
while (currentNode.parentNode) {
currentNode = currentNode.parentNode;
localVariables = localVariables.concat(currentNode.localVariables);
}
let positions: number[] = [];
analyse(fileNode);
positions
.map(p => fixPos(--p))
.filter(p => /[a-z$_]/.test(jsCode.substr(p, 1)))
.filter(p => localVariables.indexOf(jsCode.substr(p).match(/^[a-zA-Z$_]+/)[0]) == -1)
.sort((a, b)
|
ParsedNode
|
identifier_name
|
|
vue2jsx.ts
|
jsx(html: string) {
var startTagRegex = /^<(!?[-A-Za-z0-9_]+)((?:\s+[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)>/,
endTagRegex = /^<\/([-A-Za-z0-9_]+)[^>]*>/;
var attrRegex = /\s*[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?/g;
var special: Dictionary<number> = { script: 1, style: 1 };
var index, chars, match, stack: any[] & { last?: Function } = [], last = html;
stack.last = function () {
return this[this.length - 1];
};
var currentNode = new ParsedNode();
var rootNode = currentNode;
while (html) {
chars = true;
// Make sure we're not in a script or style element
if (!stack.last() || !special[stack.last()]) {
// Comment
if (html.indexOf("<!--") == 0) {
index = html.indexOf("-->");
if (index >= 0) {
html = html.substring(index + 3);
chars = false;
}
// end tag
} else if (html.indexOf("</") == 0) {
match = html.match(endTagRegex);
if (match) {
html = html.substring(match[0].length);
currentNode.endText = match[0];
currentNode = currentNode.parentNode!;
chars = false;
}
// start tag
} else if (html.indexOf("<") == 0) {
match = html.match(startTagRegex);
if (match) {
html = html.substring(match[0].length);
currentNode = currentNode.appendChild(match[1]);
let startTagJsx = "";
let attrsMatch = match[2].match(attrRegex);
let attrsJsx = "";
if (attrsMatch) {
for (var i = 0; i < attrsMatch.length; i++) {
if (attrsMatch[i].replace(/^\s+/, '') == '')
continue;
let tagName = match[1];
let name = attrsMatch[i].replace(/=.*/, '').replace(/^\s+/, '');
let value = attrsMatch[i].replace(/^[^=]+=/, '');
if (attrsMatch[i].indexOf('=') === -1)
value = true;
let attrJsx = processAttr(tagName, name, value, currentNode);
if (attrJsx)
attrsJsx += " " + attrJsx;
}
}
startTagJsx += "<" + match[1] + attrsJsx + match[match.length - 1] + ">";
currentNode.startText = startTagJsx;
if (match[match.length - 1] == "/")
currentNode = currentNode.parentNode!;
chars = false;
}
}
if (chars) {
index = html.indexOf("<");
var text = index < 0 ? html : html.substring(0, index);
html = index < 0 ? "" : html.substring(index);
let textNode = currentNode.appendChild("#text");
textNode.startText = text.replace(/{{\s*([^}]+)\s*}}/g, "{ $1 }");
}
} else {
html = html.substring(html.indexOf("</" + stack.last() + ">"));
}
if (html == last) {
throw new Error("Parse Error at: " + html)
}
last = html;
}
return rootNode;
}
function processAttr(tagName: string, name: string, value: string | true, currentNode: ParsedNode) {
let jsxAttr = name + "=" + value;
if (value === true) {
jsxAttr = name;
}
else if (name.indexOf("v-on:") == 0) {
name = "on" + name.substr(5);
value = processJs(value.slice(1, -1).replace(/^\s+/, ''), currentNode);
let param = "()";
let condition = "";
if (name.endsWith(".enter")) {
name = name.slice(0, -6);
param = "e";
condition = "e.keyCode == 13";
}
if (value.indexOf(';') === -1)
value = `${param} => ${condition ? condition + " && " : ""}${value}`;
else if (condition)
value = `${param} => { if (${condition}) { ${value} } }`;
else
value = `${param} => { ${value} }`;
jsxAttr = name + "={ " + value + " }";
}
else if (name.indexOf("v-bind:") == 0) {
name = name.substr(7);
jsxAttr = name + "={ " + processJs(value.slice(1, -1), currentNode) + " }";
}
else if (name == "v-for") {
let [elem, elems] = value.slice(1, -1).split(' in ');
if (elem.indexOf(',') > -1 && elem.indexOf('(') == -1)
elem = "(" + elem + ")";
jsxAttr = "";
currentNode.localVariables = elem.replace(/^\(|\)$|\s+/g, '').split(',');
currentNode.postProcessor = t => `{ this.${elems}.map(${elem} => ${t}) }`;
}
else if (name == "v-if") {
jsxAttr = "";
const condition = processJs(value.slice(1, -1), currentNode);
currentNode.startIf = true;
currentNode.condition = condition;
currentNode.postProcessor = t => `{ ${condition} && ${t} }`;
}
else if (name == "v-else-if") {
jsxAttr = "";
const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text");
const prevNode = children[children.length - 2];
const condition = processJs(value.slice(1, -1), currentNode);
currentNode.condition = condition;
if (prevNode.startIf)
prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`;
else
prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`;
currentNode.postProcessor = t => ` : ${condition} ? ${t} : null }`;
}
else if (name == "v-else") {
jsxAttr = "";
const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text");
const prevNode = children[children.length - 2];
if (prevNode.startIf)
prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`;
else
prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`;
currentNode.postProcessor = t => ` : ${t} }`;
}
else if (name == "v-model") {
const oninput = tagName == 'input' || tagName == 'textarea' ? "oninput" : "onchange";
const model = processJs(value.slice(1, -1), currentNode);
jsxAttr = `value={ ${model} } ${oninput}={ e => ${model} = e.target.value }`;
}
return jsxAttr;
}
function processJs(jsCode: string, currentNode: ParsedNode)
{
let fileNode = ts.createSourceFile("test.ts", "(" + jsCode + ")", ts.ScriptTarget.ES5);
let localVariables = [];
while (currentNode.parentNode) {
currentNode = currentNode.parentNode;
localVariables = localVariables.concat(currentNode.localVariables);
}
let positions: number[] = [];
analyse(fileNode);
positions
.map(p => fixPos(--p))
.filter(p => /[a-z$_]/.test(jsCode.substr(p, 1)))
.filter(p => localVariables.indexOf(jsCode.substr(p).match(/^[a-zA-Z$_]+/)[0]) == -1)
.sort((a, b) => b - a)
.forEach(p => jsCode = jsCode.substr(0, p) + "this." + jsCode.substr(p));
return jsCode;
function analyse(node: ts.Node)
{
if (node.kind == ts.SyntaxKind.ParenthesizedExpression) {
const expr = <ts.ParenthesizedExpression>node;
if (expr.expression.kind == ts.SyntaxKind.Identifier)
positions.push(expr.expression.pos);
}
if (node.kind == ts.SyntaxKind.ElementAccessExpression
|| node.kind == ts.SyntaxKind.PropertyAccessExpression) {
positions.push(node.pos);
return;
}
if (node.kind == ts.SyntaxKind.CallExpression && (<ts.CallExpression>node).expression.kind == ts.SyntaxKind.Identifier)
positions.push(node.pos);
if (node.kind == ts.SyntaxKind.BinaryExpression) {
const binExpr = <ts.BinaryExpression>node;
if (binExpr.right.kind == ts.SyntaxKind.Identifier)
positions.push(binExpr.right.pos);
if (binExpr.left.kind == ts.SyntaxKind.Identifier)
positions.push(binExpr.left.pos);
}
ts.forEachChild(node, analyse);
}
function fixPos(pos) {
while(/\s/.test(jsCode.substr(pos, 1)) && pos < jsCode.length)
|
pos++;
return pos;
}
|
random_line_split
|
|
vue2jsx.ts
|
(html: string) {
var startTagRegex = /^<(!?[-A-Za-z0-9_]+)((?:\s+[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)>/,
endTagRegex = /^<\/([-A-Za-z0-9_]+)[^>]*>/;
var attrRegex = /\s*[\w\-\:\.]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?/g;
var special: Dictionary<number> = { script: 1, style: 1 };
var index, chars, match, stack: any[] & { last?: Function } = [], last = html;
stack.last = function () {
return this[this.length - 1];
};
var currentNode = new ParsedNode();
var rootNode = currentNode;
while (html) {
chars = true;
// Make sure we're not in a script or style element
if (!stack.last() || !special[stack.last()]) {
// Comment
if (html.indexOf("<!--") == 0) {
index = html.indexOf("-->");
if (index >= 0) {
html = html.substring(index + 3);
chars = false;
}
// end tag
} else if (html.indexOf("</") == 0) {
match = html.match(endTagRegex);
if (match) {
html = html.substring(match[0].length);
currentNode.endText = match[0];
currentNode = currentNode.parentNode!;
chars = false;
}
// start tag
} else if (html.indexOf("<") == 0) {
match = html.match(startTagRegex);
if (match) {
html = html.substring(match[0].length);
currentNode = currentNode.appendChild(match[1]);
let startTagJsx = "";
let attrsMatch = match[2].match(attrRegex);
let attrsJsx = "";
if (attrsMatch) {
for (var i = 0; i < attrsMatch.length; i++) {
if (attrsMatch[i].replace(/^\s+/, '') == '')
continue;
let tagName = match[1];
let name = attrsMatch[i].replace(/=.*/, '').replace(/^\s+/, '');
let value = attrsMatch[i].replace(/^[^=]+=/, '');
if (attrsMatch[i].indexOf('=') === -1)
value = true;
let attrJsx = processAttr(tagName, name, value, currentNode);
if (attrJsx)
attrsJsx += " " + attrJsx;
}
}
startTagJsx += "<" + match[1] + attrsJsx + match[match.length - 1] + ">";
currentNode.startText = startTagJsx;
if (match[match.length - 1] == "/")
currentNode = currentNode.parentNode!;
chars = false;
}
}
if (chars) {
index = html.indexOf("<");
var text = index < 0 ? html : html.substring(0, index);
html = index < 0 ? "" : html.substring(index);
let textNode = currentNode.appendChild("#text");
textNode.startText = text.replace(/{{\s*([^}]+)\s*}}/g, "{ $1 }");
}
} else {
html = html.substring(html.indexOf("</" + stack.last() + ">"));
}
if (html == last) {
throw new Error("Parse Error at: " + html)
}
last = html;
}
return rootNode;
}
function processAttr(tagName: string, name: string, value: string | true, currentNode: ParsedNode) {
let jsxAttr = name + "=" + value;
if (value === true) {
jsxAttr = name;
}
else if (name.indexOf("v-on:") == 0) {
name = "on" + name.substr(5);
value = processJs(value.slice(1, -1).replace(/^\s+/, ''), currentNode);
let param = "()";
let condition = "";
if (name.endsWith(".enter")) {
name = name.slice(0, -6);
param = "e";
condition = "e.keyCode == 13";
}
if (value.indexOf(';') === -1)
value = `${param} => ${condition ? condition + " && " : ""}${value}`;
else if (condition)
value = `${param} => { if (${condition}) { ${value} } }`;
else
value = `${param} => { ${value} }`;
jsxAttr = name + "={ " + value + " }";
}
else if (name.indexOf("v-bind:") == 0) {
name = name.substr(7);
jsxAttr = name + "={ " + processJs(value.slice(1, -1), currentNode) + " }";
}
else if (name == "v-for") {
let [elem, elems] = value.slice(1, -1).split(' in ');
if (elem.indexOf(',') > -1 && elem.indexOf('(') == -1)
elem = "(" + elem + ")";
jsxAttr = "";
currentNode.localVariables = elem.replace(/^\(|\)$|\s+/g, '').split(',');
currentNode.postProcessor = t => `{ this.${elems}.map(${elem} => ${t}) }`;
}
else if (name == "v-if") {
jsxAttr = "";
const condition = processJs(value.slice(1, -1), currentNode);
currentNode.startIf = true;
currentNode.condition = condition;
currentNode.postProcessor = t => `{ ${condition} && ${t} }`;
}
else if (name == "v-else-if") {
jsxAttr = "";
const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text");
const prevNode = children[children.length - 2];
const condition = processJs(value.slice(1, -1), currentNode);
currentNode.condition = condition;
if (prevNode.startIf)
prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`;
else
prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`;
currentNode.postProcessor = t => ` : ${condition} ? ${t} : null }`;
}
else if (name == "v-else") {
jsxAttr = "";
const children = currentNode.parentNode.childNodes.filter(n => n.tagName != "#text");
const prevNode = children[children.length - 2];
if (prevNode.startIf)
prevNode.postProcessor = t => `{ ${prevNode.condition} ? ${t}`;
else
prevNode.postProcessor = t => ` : ${prevNode.condition} ? ${t}`;
currentNode.postProcessor = t => ` : ${t} }`;
}
else if (name == "v-model") {
const oninput = tagName == 'input' || tagName == 'textarea' ? "oninput" : "onchange";
const model = processJs(value.slice(1, -1), currentNode);
jsxAttr = `value={ ${model} } ${oninput}={ e => ${model} = e.target.value }`;
}
return jsxAttr;
}
function processJs(jsCode: string, currentNode: ParsedNode)
{
let fileNode = ts.createSourceFile("test.ts", "(" + jsCode + ")", ts.ScriptTarget.ES5);
let localVariables = [];
while (currentNode.parentNode) {
currentNode = currentNode.parentNode;
localVariables = localVariables.concat(currentNode.localVariables);
}
let positions: number[] = [];
analyse(fileNode);
positions
.map(p => fixPos(--p))
.filter(p => /[a-z$_]/.test(jsCode.substr(p, 1)))
.filter(p => localVariables.indexOf(jsCode.substr(p).match(/^[a-zA-Z$_]+/)[0]) == -1)
.sort((a, b) => b - a)
.forEach(p => jsCode = jsCode.substr(0, p) + "this." + jsCode.substr(p));
return jsCode;
function analyse(node: ts.Node)
{
if (node.kind == ts.SyntaxKind.ParenthesizedExpression) {
const expr = <ts.ParenthesizedExpression>node;
if (expr.expression.kind == ts.SyntaxKind.Identifier)
positions.push(expr.expression.pos);
}
if (node.kind == ts.SyntaxKind.ElementAccessExpression
|| node.kind == ts.SyntaxKind.PropertyAccessExpression) {
positions.push(node.pos);
return;
}
if (node.kind == ts.SyntaxKind.CallExpression && (<ts.CallExpression>node).expression.kind == ts.SyntaxKind.Identifier)
positions.push(node.pos);
if (node.kind == ts.SyntaxKind.BinaryExpression) {
const binExpr = <ts.BinaryExpression>node;
if (binExpr.right.kind == ts.SyntaxKind.Identifier)
positions.push(binExpr.right.pos);
if (binExpr.left.kind == ts.SyntaxKind.Identifier)
positions.push(binExpr.left.pos);
}
ts.forEachChild(node, analyse);
}
function fixPos(pos)
|
{
while(/\s/.test(jsCode.substr(pos, 1)) && pos < jsCode.length)
pos++;
return pos;
}
|
identifier_body
|
|
cme_stats.py
|
IENCE_KEYWORD = 0, # used the CMR parameters
KEYWORD = 1, # used a free text search inside CMR
BOTH = 2 # Merge the results from science keyword and plain text search
# format a comma separated list into a semi-colon separated list
def format_lot(lot):
lot_str = str(lot)
lot_str = re.sub(r'[\[\]\(\)]', '', lot_str)
lot_str = re.sub(r', (\d+)', '(\\1)', lot_str)
lot_str = re.sub(r',', ';', lot_str)
return lot_str
# Given the true datasets and the predicted datasets, determine the true positives (correct), false negatives (missed),
# and false positivies (extraneous)
def correct_missed_extraneous(ground_truths, predictions):
ground_truths = set(ground_truths)
correct = predictions & ground_truths
missed = ground_truths - predictions
extraneous = predictions - ground_truths
return correct, missed, extraneous
# csv is a string which will be written to a csv file at the end
# running_cme_stats is a dictionary which gets modified in place and will be written to a json file at the end
def dump_data(key, features, csv, manually_reviewed=None, title='', running_cme_stats=None, n=1, dataset_search_type=None, include_singles=False):
# extract the platform/ins couples and models from the features
summary_stats = features['summary_stats']
couples = sorted(list(summary_stats['valid_couples'].items()), key=lambda x: x[1], reverse=True)
models = sorted(list(summary_stats['models'].items()), key=lambda x: x[1], reverse=True)
title = re.sub(',', '', title)
# write key, title, platform/ins couples, and models to csv string
csv += f'{key},{title},{format_lot(couples)}, {format_lot(models)},'
# add a column with the manually reviewed datasets if the paper was manually reviewed
if manually_reviewed:
manual_ground_truths = ';'.join(manually_reviewed['manually_reviewed'])
csv += f'{manual_ground_truths}'
# get TOP-N CMR results from pairs
cmr_results = set()
for inner_key, inner_value in features['cmr_results']['pairs'].items():
# the features dict contains both science keyword (using cmr parameters) and keyword (free text) searches.
# get the predicted datasets from the appropriate search
if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD:
datasets = inner_value['science_keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.KEYWORD:
datasets = inner_value['keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.BOTH:
# merge the two lists together, alternating order
l1 = inner_value['science_keyword_search']['dataset']
l2 = inner_value['keyword_search']['dataset']
i, j, datasets_temp = 0, 0, []
while i < len(l1) and j < len(l2):
datasets_temp.append(l1[i])
datasets_temp.append(l2[j])
i += 1
j += 1
if i < len(l1):
datasets_temp += l1[i:]
elif j < len(l2):
datasets_temp += l2[j:]
# remove duplicates
seen = set()
datasets = []
for i in range(len(datasets_temp)):
if datasets_temp[i] in seen:
continue
seen.add(datasets_temp[i])
datasets.append(datasets_temp[i])
if len(datasets) >= 1:
for predic in datasets[:n]:
cmr_results.add(predic)
# cmr queries based on the single instruments and not just the couples
if include_singles:
for inner_key, inner_value in features['cmr_results']['singles'].items():
if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD:
single_datasets = inner_value['science_keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.KEYWORD:
single_datasets = inner_value['keyword_search']['dataset']
else:
single_datasets = None
if single_datasets:
for predic in single_datasets[:n]:
if predic not in cmr_results:
cmr_results.add(predic)
# create semi-colon delineated string with the predicted datasets from CMR and add to csv string
cmr_list = ';'.join(list(cmr_results))
csv += f',{cmr_list}'
# If the paper was manually reviewed update the dictionary containing overall stats about how many datasets were
# correct, missed, and extraneous.
if manually_reviewed:
correct, missed, extraneous = correct_missed_extraneous(manually_reviewed['manually_reviewed'], cmr_results)
running_cme_stats['correct_count'] += len(correct)
running_cme_stats['missed_count'] += len(missed)
running_cme_stats['extraneous_count'] += len(extraneous)
# keep counts of how often each dataset was correct. (ie: at the end we'll have something like, we predicted
# ML2O3 correctly 54 times)
for corr in correct:
running_cme_stats['correct_dict'][corr] += 1
for miss in missed:
running_cme_stats['missed_dict'][miss] += 1
for extra in extraneous:
running_cme_stats['extraneous_dict'][extra] += 1
csv += f',,,{len(correct)}, {len(missed)}, {len(extraneous)}'
return csv + "\n"
if __name__ == '__main__':
# User Parameters
features_location = 'cmr_results/giovanni/giovanni_papers_features.json' # the extracted features
key_title_ground_truth_location = 'cmr_results/giovanni/giovanni_papers_key_title_ground_truth.json' # includes the ground truth if applicables
n = 1 # range of Top-n results to search. Ie n=1, max_n=9 means analyze results for top-1, top-2, top-3, ..., top-9
max_n = 9
cmr_search_type = CMRSearchType.SCIENCE_KEYWORD # use cmr parameters in search of use free text. See enum definition
include_singles = False # include results from NoPlatform/Instrument science keyword CMR searches
# Declare the name of the output file
output_title = 'giovanni_' # change this
include_singles_string = 'with_singles_' if include_singles else ''
sub_folder = f'{output_title}{include_singles_string}{cmr_search_type.name.lower()}/'
base_location = 'stats_and_csv/giovanni/' + sub_folder # change this
with open(features_location, encoding='utf-8') as f:
features = json.load(f)
with open(key_title_ground_truth_location, encoding='utf-8') as f:
key_title_ground_truth = json.load(f)
correct, missed, extraneous = [], [], []
# make a folder if one doesn't exist
if not os.path.exists(base_location):
os.makedirs(base_location)
# run the top-n results for all values of n
while n <= max_n:
filename = base_location + f'{output_title}top_{n}_{cmr_search_type.name.lower()}'
added_pdfs = set()
running_cme_stats = {
"correct_count": 0,
"missed_count": 0,
"extraneous_count": 0,
"correct_dict": defaultdict(int),
"missed_dict": defaultdict(int),
"extraneous_dict": defaultdict(int)
}
|
for parent_key, value in key_title_ground_truth.items():
pdf_key = value['pdf']
added_pdfs.add(pdf_key)
if pdf_key in features:
# update both csv file and json file
csv = dump_data(pdf_key, features[pdf_key], csv, manually_reviewed=value, title=value['title'], running_cme_stats=running_cme_stats,
n=n, dataset_search_type=cmr_search_type)
# loop through the papers that were not manually reviewed
for key, value in features.items():
if key not in added_pdfs:
# update only csv file
csv = dump_data(key, value, csv, dataset_search_type=cmr_search_type)
# sort the individual counts of number of times that a dataset was correct, missed, or extraneous
running_cme_stats['correct_dict'] = dict(sorted(running_cme_stats['correct_dict'].items(), key=lambda x: x[1], reverse=True))
running_cme_stats['missed_dict'] = dict(sorted(running_cme_stats['missed_dict'].items(), key=lambda x: x[1], reverse=True))
running_cme_stats['extraneous_dict'] = dict(sorted(running_cme_stats['extraneous_dict'].items(), key=lambda x: x[1], reverse=True))
# DON'T overwrite an existing file. Exit out in this case
if os.path.exists(filename
|
csv = "paper, title, mission/instruments, models, manually reviewed, CMR datasets,,,correct, missed, extraneous\n"
# iterate through the manually reviewed papers. Add data into csv and json files via dump_data method
|
random_line_split
|
cme_stats.py
|
_KEYWORD = 0, # used the CMR parameters
KEYWORD = 1, # used a free text search inside CMR
BOTH = 2 # Merge the results from science keyword and plain text search
# format a comma separated list into a semi-colon separated list
def format_lot(lot):
lot_str = str(lot)
lot_str = re.sub(r'[\[\]\(\)]', '', lot_str)
lot_str = re.sub(r', (\d+)', '(\\1)', lot_str)
lot_str = re.sub(r',', ';', lot_str)
return lot_str
# Given the true datasets and the predicted datasets, determine the true positives (correct), false negatives (missed),
# and false positivies (extraneous)
def correct_missed_extraneous(ground_truths, predictions):
ground_truths = set(ground_truths)
correct = predictions & ground_truths
missed = ground_truths - predictions
extraneous = predictions - ground_truths
return correct, missed, extraneous
# csv is a string which will be written to a csv file at the end
# running_cme_stats is a dictionary which gets modified in place and will be written to a json file at the end
def
|
(key, features, csv, manually_reviewed=None, title='', running_cme_stats=None, n=1, dataset_search_type=None, include_singles=False):
# extract the platform/ins couples and models from the features
summary_stats = features['summary_stats']
couples = sorted(list(summary_stats['valid_couples'].items()), key=lambda x: x[1], reverse=True)
models = sorted(list(summary_stats['models'].items()), key=lambda x: x[1], reverse=True)
title = re.sub(',', '', title)
# write key, title, platform/ins couples, and models to csv string
csv += f'{key},{title},{format_lot(couples)}, {format_lot(models)},'
# add a column with the manually reviewed datasets if the paper was manually reviewed
if manually_reviewed:
manual_ground_truths = ';'.join(manually_reviewed['manually_reviewed'])
csv += f'{manual_ground_truths}'
# get TOP-N CMR results from pairs
cmr_results = set()
for inner_key, inner_value in features['cmr_results']['pairs'].items():
# the features dict contains both science keyword (using cmr parameters) and keyword (free text) searches.
# get the predicted datasets from the appropriate search
if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD:
datasets = inner_value['science_keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.KEYWORD:
datasets = inner_value['keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.BOTH:
# merge the two lists together, alternating order
l1 = inner_value['science_keyword_search']['dataset']
l2 = inner_value['keyword_search']['dataset']
i, j, datasets_temp = 0, 0, []
while i < len(l1) and j < len(l2):
datasets_temp.append(l1[i])
datasets_temp.append(l2[j])
i += 1
j += 1
if i < len(l1):
datasets_temp += l1[i:]
elif j < len(l2):
datasets_temp += l2[j:]
# remove duplicates
seen = set()
datasets = []
for i in range(len(datasets_temp)):
if datasets_temp[i] in seen:
continue
seen.add(datasets_temp[i])
datasets.append(datasets_temp[i])
if len(datasets) >= 1:
for predic in datasets[:n]:
cmr_results.add(predic)
# cmr queries based on the single instruments and not just the couples
if include_singles:
for inner_key, inner_value in features['cmr_results']['singles'].items():
if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD:
single_datasets = inner_value['science_keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.KEYWORD:
single_datasets = inner_value['keyword_search']['dataset']
else:
single_datasets = None
if single_datasets:
for predic in single_datasets[:n]:
if predic not in cmr_results:
cmr_results.add(predic)
# create semi-colon delineated string with the predicted datasets from CMR and add to csv string
cmr_list = ';'.join(list(cmr_results))
csv += f',{cmr_list}'
# If the paper was manually reviewed update the dictionary containing overall stats about how many datasets were
# correct, missed, and extraneous.
if manually_reviewed:
correct, missed, extraneous = correct_missed_extraneous(manually_reviewed['manually_reviewed'], cmr_results)
running_cme_stats['correct_count'] += len(correct)
running_cme_stats['missed_count'] += len(missed)
running_cme_stats['extraneous_count'] += len(extraneous)
# keep counts of how often each dataset was correct. (ie: at the end we'll have something like, we predicted
# ML2O3 correctly 54 times)
for corr in correct:
running_cme_stats['correct_dict'][corr] += 1
for miss in missed:
running_cme_stats['missed_dict'][miss] += 1
for extra in extraneous:
running_cme_stats['extraneous_dict'][extra] += 1
csv += f',,,{len(correct)}, {len(missed)}, {len(extraneous)}'
return csv + "\n"
if __name__ == '__main__':
# User Parameters
features_location = 'cmr_results/giovanni/giovanni_papers_features.json' # the extracted features
key_title_ground_truth_location = 'cmr_results/giovanni/giovanni_papers_key_title_ground_truth.json' # includes the ground truth if applicables
n = 1 # range of Top-n results to search. Ie n=1, max_n=9 means analyze results for top-1, top-2, top-3, ..., top-9
max_n = 9
cmr_search_type = CMRSearchType.SCIENCE_KEYWORD # use cmr parameters in search of use free text. See enum definition
include_singles = False # include results from NoPlatform/Instrument science keyword CMR searches
# Declare the name of the output file
output_title = 'giovanni_' # change this
include_singles_string = 'with_singles_' if include_singles else ''
sub_folder = f'{output_title}{include_singles_string}{cmr_search_type.name.lower()}/'
base_location = 'stats_and_csv/giovanni/' + sub_folder # change this
with open(features_location, encoding='utf-8') as f:
features = json.load(f)
with open(key_title_ground_truth_location, encoding='utf-8') as f:
key_title_ground_truth = json.load(f)
correct, missed, extraneous = [], [], []
# make a folder if one doesn't exist
if not os.path.exists(base_location):
os.makedirs(base_location)
# run the top-n results for all values of n
while n <= max_n:
filename = base_location + f'{output_title}top_{n}_{cmr_search_type.name.lower()}'
added_pdfs = set()
running_cme_stats = {
"correct_count": 0,
"missed_count": 0,
"extraneous_count": 0,
"correct_dict": defaultdict(int),
"missed_dict": defaultdict(int),
"extraneous_dict": defaultdict(int)
}
csv = "paper, title, mission/instruments, models, manually reviewed, CMR datasets,,,correct, missed, extraneous\n"
# iterate through the manually reviewed papers. Add data into csv and json files via dump_data method
for parent_key, value in key_title_ground_truth.items():
pdf_key = value['pdf']
added_pdfs.add(pdf_key)
if pdf_key in features:
# update both csv file and json file
csv = dump_data(pdf_key, features[pdf_key], csv, manually_reviewed=value, title=value['title'], running_cme_stats=running_cme_stats,
n=n, dataset_search_type=cmr_search_type)
# loop through the papers that were not manually reviewed
for key, value in features.items():
if key not in added_pdfs:
# update only csv file
csv = dump_data(key, value, csv, dataset_search_type=cmr_search_type)
# sort the individual counts of number of times that a dataset was correct, missed, or extraneous
running_cme_stats['correct_dict'] = dict(sorted(running_cme_stats['correct_dict'].items(), key=lambda x: x[1], reverse=True))
running_cme_stats['missed_dict'] = dict(sorted(running_cme_stats['missed_dict'].items(), key=lambda x: x[1], reverse=True))
running_cme_stats['extraneous_dict'] = dict(sorted(running_cme_stats['extraneous_dict'].items(), key=lambda x: x[1], reverse=True))
# DON'T overwrite an existing file. Exit out in this case
if os.path.exists
|
dump_data
|
identifier_name
|
cme_stats.py
|
_KEYWORD = 0, # used the CMR parameters
KEYWORD = 1, # used a free text search inside CMR
BOTH = 2 # Merge the results from science keyword and plain text search
# format a comma separated list into a semi-colon separated list
def format_lot(lot):
lot_str = str(lot)
lot_str = re.sub(r'[\[\]\(\)]', '', lot_str)
lot_str = re.sub(r', (\d+)', '(\\1)', lot_str)
lot_str = re.sub(r',', ';', lot_str)
return lot_str
# Given the true datasets and the predicted datasets, determine the true positives (correct), false negatives (missed),
# and false positivies (extraneous)
def correct_missed_extraneous(ground_truths, predictions):
ground_truths = set(ground_truths)
correct = predictions & ground_truths
missed = ground_truths - predictions
extraneous = predictions - ground_truths
return correct, missed, extraneous
# csv is a string which will be written to a csv file at the end
# running_cme_stats is a dictionary which gets modified in place and will be written to a json file at the end
def dump_data(key, features, csv, manually_reviewed=None, title='', running_cme_stats=None, n=1, dataset_search_type=None, include_singles=False):
# extract the platform/ins couples and models from the features
|
datasets = inner_value['keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.BOTH:
# merge the two lists together, alternating order
l1 = inner_value['science_keyword_search']['dataset']
l2 = inner_value['keyword_search']['dataset']
i, j, datasets_temp = 0, 0, []
while i < len(l1) and j < len(l2):
datasets_temp.append(l1[i])
datasets_temp.append(l2[j])
i += 1
j += 1
if i < len(l1):
datasets_temp += l1[i:]
elif j < len(l2):
datasets_temp += l2[j:]
# remove duplicates
seen = set()
datasets = []
for i in range(len(datasets_temp)):
if datasets_temp[i] in seen:
continue
seen.add(datasets_temp[i])
datasets.append(datasets_temp[i])
if len(datasets) >= 1:
for predic in datasets[:n]:
cmr_results.add(predic)
# cmr queries based on the single instruments and not just the couples
if include_singles:
for inner_key, inner_value in features['cmr_results']['singles'].items():
if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD:
single_datasets = inner_value['science_keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.KEYWORD:
single_datasets = inner_value['keyword_search']['dataset']
else:
single_datasets = None
if single_datasets:
for predic in single_datasets[:n]:
if predic not in cmr_results:
cmr_results.add(predic)
# create semi-colon delineated string with the predicted datasets from CMR and add to csv string
cmr_list = ';'.join(list(cmr_results))
csv += f',{cmr_list}'
# If the paper was manually reviewed update the dictionary containing overall stats about how many datasets were
# correct, missed, and extraneous.
if manually_reviewed:
correct, missed, extraneous = correct_missed_extraneous(manually_reviewed['manually_reviewed'], cmr_results)
running_cme_stats['correct_count'] += len(correct)
running_cme_stats['missed_count'] += len(missed)
running_cme_stats['extraneous_count'] += len(extraneous)
# keep counts of how often each dataset was correct. (ie: at the end we'll have something like, we predicted
# ML2O3 correctly 54 times)
for corr in correct:
running_cme_stats['correct_dict'][corr] += 1
for miss in missed:
running_cme_stats['missed_dict'][miss] += 1
for extra in extraneous:
running_cme_stats['extraneous_dict'][extra] += 1
csv += f',,,{len(correct)}, {len(missed)}, {len(extraneous)}'
return csv + "\n"
if __name__ == '__main__':
# User Parameters
features_location = 'cmr_results/giovanni/giovanni_papers_features.json' # the extracted features
key_title_ground_truth_location = 'cmr_results/giovanni/giovanni_papers_key_title_ground_truth.json' # includes the ground truth if applicables
n = 1 # range of Top-n results to search. Ie n=1, max_n=9 means analyze results for top-1, top-2, top-3, ..., top-9
max_n = 9
cmr_search_type = CMRSearchType.SCIENCE_KEYWORD # use cmr parameters in search of use free text. See enum definition
include_singles = False # include results from NoPlatform/Instrument science keyword CMR searches
# Declare the name of the output file
output_title = 'giovanni_' # change this
include_singles_string = 'with_singles_' if include_singles else ''
sub_folder = f'{output_title}{include_singles_string}{cmr_search_type.name.lower()}/'
base_location = 'stats_and_csv/giovanni/' + sub_folder # change this
with open(features_location, encoding='utf-8') as f:
features = json.load(f)
with open(key_title_ground_truth_location, encoding='utf-8') as f:
key_title_ground_truth = json.load(f)
correct, missed, extraneous = [], [], []
# make a folder if one doesn't exist
if not os.path.exists(base_location):
os.makedirs(base_location)
# run the top-n results for all values of n
while n <= max_n:
filename = base_location + f'{output_title}top_{n}_{cmr_search_type.name.lower()}'
added_pdfs = set()
running_cme_stats = {
"correct_count": 0,
"missed_count": 0,
"extraneous_count": 0,
"correct_dict": defaultdict(int),
"missed_dict": defaultdict(int),
"extraneous_dict": defaultdict(int)
}
csv = "paper, title, mission/instruments, models, manually reviewed, CMR datasets,,,correct, missed, extraneous\n"
# iterate through the manually reviewed papers. Add data into csv and json files via dump_data method
for parent_key, value in key_title_ground_truth.items():
pdf_key = value['pdf']
added_pdfs.add(pdf_key)
if pdf_key in features:
# update both csv file and json file
csv = dump_data(pdf_key, features[pdf_key], csv, manually_reviewed=value, title=value['title'], running_cme_stats=running_cme_stats,
n=n, dataset_search_type=cmr_search_type)
# loop through the papers that were not manually reviewed
for key, value in features.items():
if key not in added_pdfs:
# update only csv file
csv = dump_data(key, value, csv, dataset_search_type=cmr_search_type)
# sort the individual counts of number of times that a dataset was correct, missed, or extraneous
running_cme_stats['correct_dict'] = dict(sorted(running_cme_stats['correct_dict'].items(), key=lambda x: x[1], reverse=True))
running_cme_stats['missed_dict'] = dict(sorted(running_cme_stats['missed_dict'].items(), key=lambda x: x[1], reverse=True))
running_cme_stats['extraneous_dict'] = dict(sorted(running_cme_stats['extraneous_dict'].items(), key=lambda x: x[1], reverse=True))
# DON'T overwrite an existing file. Exit out in this case
if os.path.exists(filename
|
summary_stats = features['summary_stats']
couples = sorted(list(summary_stats['valid_couples'].items()), key=lambda x: x[1], reverse=True)
models = sorted(list(summary_stats['models'].items()), key=lambda x: x[1], reverse=True)
title = re.sub(',', '', title)
# write key, title, platform/ins couples, and models to csv string
csv += f'{key},{title},{format_lot(couples)}, {format_lot(models)},'
# add a column with the manually reviewed datasets if the paper was manually reviewed
if manually_reviewed:
manual_ground_truths = ';'.join(manually_reviewed['manually_reviewed'])
csv += f'{manual_ground_truths}'
# get TOP-N CMR results from pairs
cmr_results = set()
for inner_key, inner_value in features['cmr_results']['pairs'].items():
# the features dict contains both science keyword (using cmr parameters) and keyword (free text) searches.
# get the predicted datasets from the appropriate search
if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD:
datasets = inner_value['science_keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.KEYWORD:
|
identifier_body
|
cme_stats.py
|
_KEYWORD = 0, # used the CMR parameters
KEYWORD = 1, # used a free text search inside CMR
BOTH = 2 # Merge the results from science keyword and plain text search
# format a comma separated list into a semi-colon separated list
def format_lot(lot):
lot_str = str(lot)
lot_str = re.sub(r'[\[\]\(\)]', '', lot_str)
lot_str = re.sub(r', (\d+)', '(\\1)', lot_str)
lot_str = re.sub(r',', ';', lot_str)
return lot_str
# Given the true datasets and the predicted datasets, determine the true positives (correct), false negatives (missed),
# and false positivies (extraneous)
def correct_missed_extraneous(ground_truths, predictions):
ground_truths = set(ground_truths)
correct = predictions & ground_truths
missed = ground_truths - predictions
extraneous = predictions - ground_truths
return correct, missed, extraneous
# csv is a string which will be written to a csv file at the end
# running_cme_stats is a dictionary which gets modified in place and will be written to a json file at the end
def dump_data(key, features, csv, manually_reviewed=None, title='', running_cme_stats=None, n=1, dataset_search_type=None, include_singles=False):
# extract the platform/ins couples and models from the features
summary_stats = features['summary_stats']
couples = sorted(list(summary_stats['valid_couples'].items()), key=lambda x: x[1], reverse=True)
models = sorted(list(summary_stats['models'].items()), key=lambda x: x[1], reverse=True)
title = re.sub(',', '', title)
# write key, title, platform/ins couples, and models to csv string
csv += f'{key},{title},{format_lot(couples)}, {format_lot(models)},'
# add a column with the manually reviewed datasets if the paper was manually reviewed
if manually_reviewed:
manual_ground_truths = ';'.join(manually_reviewed['manually_reviewed'])
csv += f'{manual_ground_truths}'
# get TOP-N CMR results from pairs
cmr_results = set()
for inner_key, inner_value in features['cmr_results']['pairs'].items():
# the features dict contains both science keyword (using cmr parameters) and keyword (free text) searches.
# get the predicted datasets from the appropriate search
if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD:
datasets = inner_value['science_keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.KEYWORD:
datasets = inner_value['keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.BOTH:
# merge the two lists together, alternating order
l1 = inner_value['science_keyword_search']['dataset']
l2 = inner_value['keyword_search']['dataset']
i, j, datasets_temp = 0, 0, []
while i < len(l1) and j < len(l2):
datasets_temp.append(l1[i])
datasets_temp.append(l2[j])
i += 1
j += 1
if i < len(l1):
datasets_temp += l1[i:]
elif j < len(l2):
datasets_temp += l2[j:]
# remove duplicates
seen = set()
datasets = []
for i in range(len(datasets_temp)):
if datasets_temp[i] in seen:
continue
seen.add(datasets_temp[i])
datasets.append(datasets_temp[i])
if len(datasets) >= 1:
for predic in datasets[:n]:
cmr_results.add(predic)
# cmr queries based on the single instruments and not just the couples
if include_singles:
for inner_key, inner_value in features['cmr_results']['singles'].items():
if dataset_search_type == CMRSearchType.SCIENCE_KEYWORD:
single_datasets = inner_value['science_keyword_search']['dataset']
elif dataset_search_type == CMRSearchType.KEYWORD:
single_datasets = inner_value['keyword_search']['dataset']
else:
single_datasets = None
if single_datasets:
|
# create semi-colon delineated string with the predicted datasets from CMR and add to csv string
cmr_list = ';'.join(list(cmr_results))
csv += f',{cmr_list}'
# If the paper was manually reviewed update the dictionary containing overall stats about how many datasets were
# correct, missed, and extraneous.
if manually_reviewed:
correct, missed, extraneous = correct_missed_extraneous(manually_reviewed['manually_reviewed'], cmr_results)
running_cme_stats['correct_count'] += len(correct)
running_cme_stats['missed_count'] += len(missed)
running_cme_stats['extraneous_count'] += len(extraneous)
# keep counts of how often each dataset was correct. (ie: at the end we'll have something like, we predicted
# ML2O3 correctly 54 times)
for corr in correct:
running_cme_stats['correct_dict'][corr] += 1
for miss in missed:
running_cme_stats['missed_dict'][miss] += 1
for extra in extraneous:
running_cme_stats['extraneous_dict'][extra] += 1
csv += f',,,{len(correct)}, {len(missed)}, {len(extraneous)}'
return csv + "\n"
if __name__ == '__main__':
# User Parameters
features_location = 'cmr_results/giovanni/giovanni_papers_features.json' # the extracted features
key_title_ground_truth_location = 'cmr_results/giovanni/giovanni_papers_key_title_ground_truth.json' # includes the ground truth if applicables
n = 1 # range of Top-n results to search. Ie n=1, max_n=9 means analyze results for top-1, top-2, top-3, ..., top-9
max_n = 9
cmr_search_type = CMRSearchType.SCIENCE_KEYWORD # use cmr parameters in search of use free text. See enum definition
include_singles = False # include results from NoPlatform/Instrument science keyword CMR searches
# Declare the name of the output file
output_title = 'giovanni_' # change this
include_singles_string = 'with_singles_' if include_singles else ''
sub_folder = f'{output_title}{include_singles_string}{cmr_search_type.name.lower()}/'
base_location = 'stats_and_csv/giovanni/' + sub_folder # change this
with open(features_location, encoding='utf-8') as f:
features = json.load(f)
with open(key_title_ground_truth_location, encoding='utf-8') as f:
key_title_ground_truth = json.load(f)
correct, missed, extraneous = [], [], []
# make a folder if one doesn't exist
if not os.path.exists(base_location):
os.makedirs(base_location)
# run the top-n results for all values of n
while n <= max_n:
filename = base_location + f'{output_title}top_{n}_{cmr_search_type.name.lower()}'
added_pdfs = set()
running_cme_stats = {
"correct_count": 0,
"missed_count": 0,
"extraneous_count": 0,
"correct_dict": defaultdict(int),
"missed_dict": defaultdict(int),
"extraneous_dict": defaultdict(int)
}
csv = "paper, title, mission/instruments, models, manually reviewed, CMR datasets,,,correct, missed, extraneous\n"
# iterate through the manually reviewed papers. Add data into csv and json files via dump_data method
for parent_key, value in key_title_ground_truth.items():
pdf_key = value['pdf']
added_pdfs.add(pdf_key)
if pdf_key in features:
# update both csv file and json file
csv = dump_data(pdf_key, features[pdf_key], csv, manually_reviewed=value, title=value['title'], running_cme_stats=running_cme_stats,
n=n, dataset_search_type=cmr_search_type)
# loop through the papers that were not manually reviewed
for key, value in features.items():
if key not in added_pdfs:
# update only csv file
csv = dump_data(key, value, csv, dataset_search_type=cmr_search_type)
# sort the individual counts of number of times that a dataset was correct, missed, or extraneous
running_cme_stats['correct_dict'] = dict(sorted(running_cme_stats['correct_dict'].items(), key=lambda x: x[1], reverse=True))
running_cme_stats['missed_dict'] = dict(sorted(running_cme_stats['missed_dict'].items(), key=lambda x: x[1], reverse=True))
running_cme_stats['extraneous_dict'] = dict(sorted(running_cme_stats['extraneous_dict'].items(), key=lambda x: x[1], reverse=True))
# DON'T overwrite an existing file. Exit out in this case
if os.path.exists
|
for predic in single_datasets[:n]:
if predic not in cmr_results:
cmr_results.add(predic)
|
conditional_block
|
mod.rs
|
result.push(salt);
Ok(result)
}
// Default file path for Account Keys written to isolated persistent storage.
const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json";
/// Attempts to read and parse the contents of the persistent storage at the provided `path`.
/// Returns an `AccountKeyList` on success, Error otherwise.
fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> {
let mut this = Self {
keys: LruCache::new(MAX_ACCOUNT_KEYS),
path: path::PathBuf::from(path.as_ref()),
account_key_count: Default::default(),
};
this.load_internal()?;
Ok(this)
}
/// Attempts to update the locally-saved set of keys from persistent storage.
/// Returns Error if the storage file is unable to be opened.
fn load_internal(&mut self) -> Result<(), Error> {
match fs::File::open(&self.path) {
Ok(file) => {
// Build the LRU cache from the contents of the file. Because keys are stored in
// LRU order, we build the cache in the same order to preserve LRU status.
debug!("Reading Account Keys from existing file");
let key_list = KeyList::load(file)?;
key_list.0.into_iter().for_each(|k| {
let _ = self.keys.insert(k, ());
});
Ok(())
}
Err(error) if error.kind() == io::ErrorKind::NotFound => {
debug!("Persistent storage file not found");
Ok(())
}
Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")),
}
}
/// Commits the current set of Account Keys to isolated persistent storage.
/// Keys are stored in LRU order.
fn store(&self) -> Result<(), Error> {
let path = path::Path::new(&self.path);
let file_name = path.file_name().ok_or(Error::key_storage(
io::ErrorKind::InvalidInput.into(),
"couldn't build file name from path",
))?;
let file_path = path.with_file_name(file_name.to_os_string());
let file = fs::File::create(&file_path)
.map_err(|e| Error::key_storage(e, "couldn't create file"))?;
let values = KeyList(self.keys().cloned().collect());
serde_json::to_writer(file, &values)?;
Ok(())
}
}
/// Convenience type for the serialization and deserialization of Account Keys.
#[derive(Serialize, Deserialize)]
struct KeyList(Vec<AccountKey>);
impl KeyList {
fn load<R: io::Read>(reader: R) -> Result<Self, Error> {
serde_json::from_reader(reader).map_err(Into::into)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use assert_matches::assert_matches;
/// Loads the set of saved Account Keys from storage and verifies that it's equal to the
/// provided `expected_keys`.
#[track_caller]
pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>(
path: P,
expected_keys: Vec<AccountKey>,
) {
let read_keys = AccountKeyList::load_from_path(path).expect("can read from file");
assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys);
}
#[test]
fn model_id_from_u32() {
let normal_id = 0x1234;
let id = ModelId::try_from(normal_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x12, 0x34]);
let zero_id = 0;
let id = ModelId::try_from(zero_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x00, 0x00]);
let max_id = 0xffffff;
let id = ModelId::try_from(max_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0xff, 0xff, 0xff]);
}
#[test]
fn invalid_model_id_conversion_is_error() {
let invalid_id = 0x1ffabcd;
assert_matches!(ModelId::try_from(invalid_id), Err(_));
}
#[test]
fn empty_account_key_list_service_data() {
let empty = AccountKeyList::with_capacity_and_keys(1, vec![]);
let service_data = empty.service_data().expect("can build service data");
let expected = [0x00];
assert_eq!(service_data, expected);
}
#[test]
fn oversized_service_data_is_error() {
// Building an AccountKeyList of 11 elements will result in an oversized service data.
// In the future, this test will be obsolete as the AccountKeyList will be bounded in its
// construction.
let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect();
let oversized = AccountKeyList::with_capacity_and_keys(15, keys);
let result = oversized.service_data();
assert_matches!(result, Err(Error::InternalError(_)));
}
#[test]
fn account_key_list_service_data() {
let example_key = AccountKey::new([1; 16]);
let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]);
let salt = 0x14;
// Because the service data is generated with a random salt value, we test the internal
// method with a controlled salt value so that the test is deterministic.
let service_data = keys.service_data_internal(salt).expect("can build service_data");
let expected = [
0x40, // Length = 4, Show UI indication
0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list
0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14)
];
assert_eq!(service_data, expected);
}
/// Tests AES-128 encryption & decryption using an Account Key as the Secret Key.
/// The contents of this test case are pulled from the GFPS specification.
/// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption
#[test]
fn aes_128_encryption_roundtrip() {
let message = [
0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA,
0x97, 0xEA,
];
let account_key = AccountKey::new([
0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3,
0x32, 0x1D,
]);
let encrypted = account_key.shared_secret().encrypt(&message);
let expected = [
0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0,
0x9E, 0x9C,
];
assert_eq!(encrypted, expected);
let decrypted = account_key.shared_secret().decrypt(&encrypted);
assert_eq!(decrypted, message);
}
#[test]
fn account_key_lru_eviction()
|
{
let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]);
let max: u8 = MAX_ACCOUNT_KEYS as u8;
for i in 1..max + 1 {
let key = AccountKey::new([i; 16]);
list.save(key.clone());
assert_eq!(list.keys().len(), i as usize);
assert!(list.keys.contains_key(&key));
}
// Adding a new key results in the eviction of the LRU key.
assert_eq!(list.keys().len(), max as usize);
let new_key = AccountKey::new([max + 1; 16]);
list.save(new_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&new_key));
// LRU Key is no longer stored.
let first_key = AccountKey::new([1; 16]);
assert!(!list.keys.contains_key(&first_key));
|
identifier_body
|
|
mod.rs
|
/// A long-lived key that allows the Provider to be recognized as belonging to a certain user
/// account.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct AccountKey(SharedSecret);
impl AccountKey {
pub fn new(bytes: [u8; 16]) -> Self {
Self(SharedSecret::new(bytes))
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0.as_bytes()
}
pub fn shared_secret(&self) -> &SharedSecret {
&self.0
}
}
impl From<&SharedSecret> for AccountKey {
fn from(src: &SharedSecret) -> AccountKey {
AccountKey(src.clone())
}
}
/// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys
/// will be evicted in an LRU manner as described in the GFPS specification.
/// This limit is chosen as the minimum required by any implementation and provides ample space
/// in the LE advertisement packet.
/// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList
/// for more details.
const MAX_ACCOUNT_KEYS: usize = 5;
/// Manages the set of saved Account Keys.
///
/// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the
/// `AccountKeyList` will evict the least recently used Account Key.
///
/// Account Keys are written to isolated persistent storage and are maintained across reboots. The
/// set of saved keys will only be erased on device factory resets.
/// To avoid writing to persistent storage too often, only new Account Keys are written to storage.
/// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be
/// updated in the backing storage file.
pub struct AccountKeyList {
/// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value
/// as we only care about maintaining the keys.
keys: LruCache<AccountKey, ()>,
/// The file path pointing to the isolated persistent storage which saves the Account Keys.
path: path::PathBuf,
/// The number of keys currently saved in the AccountKeyList.
account_key_count: inspect::UintProperty,
}
impl Inspect for &mut AccountKeyList {
fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> {
self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64);
Ok(())
}
}
impl AccountKeyList {
/// Attempts to load the current set of saved Account Keys from isolated persistent storage.
/// Returns the updated AccountKeyList of keys on success, Error otherwise.
pub fn load() -> Result<Self, Error> {
Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH)
}
/// Builds an AccountKey list with the provided `keys`.
/// A random test file path is used to avoid concurrently running tests from reading/writing
/// from/to the same file.
#[cfg(test)]
pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self {
let mut cache = LruCache::new(capacity);
keys.into_iter().for_each(|k| {
let _ = cache.insert(k, ());
});
let val = rand::thread_rng().gen::<u64>();
let path = format!("data/test_account_keys{}.json", val);
Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() }
}
#[cfg(test)]
pub fn path(&self) -> String {
self.path.clone().into_os_string().into_string().expect("valid path string")
}
fn update_inspect(&self) {
self.account_key_count.set(self.keys.len() as u64);
}
/// Returns an Iterator over the saved Account Keys.
/// Note: Access via Iterator does not modify LRU state.
pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator {
self.keys.iter().map(|(k, _)| k)
}
/// Marks the provided `key` as used in the LRU cache.
/// Returns Error if the key does not exist in the cache.
pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> {
self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used"))
}
/// Save an Account Key to the persisted set of keys.
pub fn save(&mut self, key: AccountKey) {
// If the `key` already exists, it will be updated in the LRU cache. If the cache is
// full, the least-recently used (LRU) key will be evicted.
if self.keys.insert(key, ()).is_some() {
debug!("Account Key already saved");
}
// Store the updated set of keys in persistent storage.
if let Err(e) = self.store() {
warn!("Couldn't update key list in isolated persistent storage: {:?}", e);
}
self.update_inspect();
}
/// Returns the service data payload associated with the current set of Account Keys.
pub fn service_data(&self) -> Result<Vec<u8>, Error> {
if self.keys.is_empty() {
return Ok(vec![0x0]);
}
let salt = rand::thread_rng().gen::<u8>();
self.service_data_internal(salt)
}
fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> {
let account_keys_bytes = bloom_filter(self.keys(), salt)?;
let mut result = Vec::new();
// First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to
// show UI notification, 0b0010 to hide it). The maximum amount of account key data that can
// be represented is 15 bytes (u4::MAX).
let length: u8 = match account_keys_bytes.len().try_into() {
Ok(len) if len <= 15 => len,
_ => return Err(Error::internal("Account key data too large")),
};
// For now, we will always request to show the UI notification (TTTT = 0b0000).
result.push(length << 4);
// Next n bytes are the Bloom-filtered Account Key list.
result.extend(account_keys_bytes);
// The descriptor value associated with the Salt section of the LE advertisement payload.
// Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed.
const SALT_DESCRIPTOR: u8 = 0x11;
result.push(SALT_DESCRIPTOR);
// Final byte is the Salt value.
result.push(salt);
Ok(result)
}
// Default file path for Account Keys written to isolated persistent storage.
const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json";
/// Attempts to read and parse the contents of the persistent storage at the provided `path`.
/// Returns an `AccountKeyList` on success, Error otherwise.
fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> {
let mut this = Self {
keys: LruCache::new(MAX_ACCOUNT_KEYS),
path: path::PathBuf::from(path.as_ref()),
account_key_count: Default::default(),
};
this.load_internal()?;
Ok(this)
}
/// Attempts to update the locally-saved set of keys from persistent storage.
/// Returns Error if the storage file is unable to be opened.
fn load_internal(&mut self) -> Result<(), Error> {
match fs::File::open(&self.path) {
Ok(file) => {
// Build the LRU cache from the contents of the file. Because keys are stored in
// LRU order, we build the cache in the same order to preserve LRU status.
debug!("Reading Account Keys from existing file");
let key_list = KeyList::load(file)?;
key_list.0.into_iter().for_each(|k| {
let _ = self.keys.insert(k, ());
});
Ok(())
}
Err(error) if error.kind() == io::ErrorKind::NotFound => {
debug!("Persistent storage file not found");
Ok(())
}
Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")),
}
}
/// Commits the current set of Account Keys to isolated persistent storage.
/// Keys are stored in LRU order.
fn store(&self) -> Result<(), Error> {
let path = path::Path::new(&self.path);
let file_name = path.file_name().ok_or(Error::key_storage(
io::ErrorKind::InvalidInput.into(),
"couldn't build file name from path",
))?;
let file_path = path.with_file_name(file_name.to_os_string());
let file = fs::File::create(&file_path)
.map_err(|e| Error::key_storage(e, "couldn't create file"))?;
let values = KeyList(self.keys().cloned().collect());
serde
|
}
|
random_line_split
|
|
mod.rs
|
Ok(Self(src))
}
}
impl From<ModelId> for [u8; 3] {
fn from(src: ModelId) -> [u8; 3] {
let mut bytes = [0; 3];
bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]);
bytes
}
}
/// A key used during the Fast Pair Pairing Procedure.
/// This key is a temporary value that lives for the lifetime of a procedure.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct SharedSecret([u8; 16]);
impl SharedSecret {
pub fn new(bytes: [u8; 16]) -> Self {
Self(bytes)
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0
}
/// Decrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the decrypted payload.
pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.decrypt_block(&mut block);
block.into()
}
/// Encrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the encrypted payload.
pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.encrypt_block(&mut block);
block.into()
}
}
/// A long-lived key that allows the Provider to be recognized as belonging to a certain user
/// account.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct AccountKey(SharedSecret);
impl AccountKey {
pub fn new(bytes: [u8; 16]) -> Self {
Self(SharedSecret::new(bytes))
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0.as_bytes()
}
pub fn shared_secret(&self) -> &SharedSecret {
&self.0
}
}
impl From<&SharedSecret> for AccountKey {
fn from(src: &SharedSecret) -> AccountKey {
AccountKey(src.clone())
}
}
/// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys
/// will be evicted in an LRU manner as described in the GFPS specification.
/// This limit is chosen as the minimum required by any implementation and provides ample space
/// in the LE advertisement packet.
/// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList
/// for more details.
const MAX_ACCOUNT_KEYS: usize = 5;
/// Manages the set of saved Account Keys.
///
/// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the
/// `AccountKeyList` will evict the least recently used Account Key.
///
/// Account Keys are written to isolated persistent storage and are maintained across reboots. The
/// set of saved keys will only be erased on device factory resets.
/// To avoid writing to persistent storage too often, only new Account Keys are written to storage.
/// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be
/// updated in the backing storage file.
pub struct AccountKeyList {
/// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value
/// as we only care about maintaining the keys.
keys: LruCache<AccountKey, ()>,
/// The file path pointing to the isolated persistent storage which saves the Account Keys.
path: path::PathBuf,
/// The number of keys currently saved in the AccountKeyList.
account_key_count: inspect::UintProperty,
}
impl Inspect for &mut AccountKeyList {
fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> {
self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64);
Ok(())
}
}
impl AccountKeyList {
/// Attempts to load the current set of saved Account Keys from isolated persistent storage.
/// Returns the updated AccountKeyList of keys on success, Error otherwise.
pub fn load() -> Result<Self, Error> {
Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH)
}
/// Builds an AccountKey list with the provided `keys`.
/// A random test file path is used to avoid concurrently running tests from reading/writing
/// from/to the same file.
#[cfg(test)]
pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self {
let mut cache = LruCache::new(capacity);
keys.into_iter().for_each(|k| {
let _ = cache.insert(k, ());
});
let val = rand::thread_rng().gen::<u64>();
let path = format!("data/test_account_keys{}.json", val);
Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() }
}
#[cfg(test)]
pub fn path(&self) -> String {
self.path.clone().into_os_string().into_string().expect("valid path string")
}
fn update_inspect(&self) {
self.account_key_count.set(self.keys.len() as u64);
}
/// Returns an Iterator over the saved Account Keys.
/// Note: Access via Iterator does not modify LRU state.
pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator {
self.keys.iter().map(|(k, _)| k)
}
/// Marks the provided `key` as used in the LRU cache.
/// Returns Error if the key does not exist in the cache.
pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> {
self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used"))
}
/// Save an Account Key to the persisted set of keys.
pub fn save(&mut self, key: AccountKey) {
// If the `key` already exists, it will be updated in the LRU cache. If the cache is
// full, the least-recently used (LRU) key will be evicted.
if self.keys.insert(key, ()).is_some() {
debug!("Account Key already saved");
}
// Store the updated set of keys in persistent storage.
if let Err(e) = self.store() {
warn!("Couldn't update key list in isolated persistent storage: {:?}", e);
}
self.update_inspect();
}
/// Returns the service data payload associated with the current set of Account Keys.
pub fn service_data(&self) -> Result<Vec<u8>, Error> {
if self.keys.is_empty() {
return Ok(vec![0x0]);
}
let salt = rand::thread_rng().gen::<u8>();
self.service_data_internal(salt)
}
fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> {
let account_keys_bytes = bloom_filter(self.keys(), salt)?;
let mut result = Vec::new();
// First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to
// show UI notification, 0b0010 to hide it). The maximum amount of account key data that can
// be represented is 15 bytes (u4::MAX).
let length: u8 = match account_keys_bytes.len().try_into() {
Ok(len) if len <= 15 => len,
_ => return Err(Error::internal("Account key data too large")),
};
// For now, we will always request to show the UI notification (TTTT = 0b0000).
result.push(length << 4);
// Next n bytes are the Bloom-filtered Account Key list.
result.extend(account_keys_bytes);
// The descriptor value associated with the Salt section of the LE advertisement payload.
// Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed.
const SALT_DESCRIPTOR: u8 = 0x11;
result.push(SALT_DESCRIPTOR);
// Final byte is the Salt value.
result.push(salt);
Ok(result)
}
// Default file path for Account Keys written to isolated persistent storage.
const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json";
/// Attempts to read and parse the contents of the persistent storage at the provided `path`.
/// Returns an `AccountKeyList` on success, Error otherwise.
fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> {
let mut this = Self {
keys: Lru
|
{
return Err(Error::InvalidModelId(src));
}
|
conditional_block
|
|
mod.rs
|
<AccountKey, ()>,
/// The file path pointing to the isolated persistent storage which saves the Account Keys.
path: path::PathBuf,
/// The number of keys currently saved in the AccountKeyList.
account_key_count: inspect::UintProperty,
}
impl Inspect for &mut AccountKeyList {
fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> {
self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64);
Ok(())
}
}
impl AccountKeyList {
/// Attempts to load the current set of saved Account Keys from isolated persistent storage.
/// Returns the updated AccountKeyList of keys on success, Error otherwise.
pub fn load() -> Result<Self, Error> {
Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH)
}
/// Builds an AccountKey list with the provided `keys`.
/// A random test file path is used to avoid concurrently running tests from reading/writing
/// from/to the same file.
#[cfg(test)]
pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self {
let mut cache = LruCache::new(capacity);
keys.into_iter().for_each(|k| {
let _ = cache.insert(k, ());
});
let val = rand::thread_rng().gen::<u64>();
let path = format!("data/test_account_keys{}.json", val);
Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() }
}
#[cfg(test)]
pub fn path(&self) -> String {
self.path.clone().into_os_string().into_string().expect("valid path string")
}
fn update_inspect(&self) {
self.account_key_count.set(self.keys.len() as u64);
}
/// Returns an Iterator over the saved Account Keys.
/// Note: Access via Iterator does not modify LRU state.
pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator {
self.keys.iter().map(|(k, _)| k)
}
/// Marks the provided `key` as used in the LRU cache.
/// Returns Error if the key does not exist in the cache.
pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> {
self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used"))
}
/// Save an Account Key to the persisted set of keys.
pub fn save(&mut self, key: AccountKey) {
// If the `key` already exists, it will be updated in the LRU cache. If the cache is
// full, the least-recently used (LRU) key will be evicted.
if self.keys.insert(key, ()).is_some() {
debug!("Account Key already saved");
}
// Store the updated set of keys in persistent storage.
if let Err(e) = self.store() {
warn!("Couldn't update key list in isolated persistent storage: {:?}", e);
}
self.update_inspect();
}
/// Returns the service data payload associated with the current set of Account Keys.
pub fn service_data(&self) -> Result<Vec<u8>, Error> {
if self.keys.is_empty() {
return Ok(vec![0x0]);
}
let salt = rand::thread_rng().gen::<u8>();
self.service_data_internal(salt)
}
fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> {
let account_keys_bytes = bloom_filter(self.keys(), salt)?;
let mut result = Vec::new();
// First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to
// show UI notification, 0b0010 to hide it). The maximum amount of account key data that can
// be represented is 15 bytes (u4::MAX).
let length: u8 = match account_keys_bytes.len().try_into() {
Ok(len) if len <= 15 => len,
_ => return Err(Error::internal("Account key data too large")),
};
// For now, we will always request to show the UI notification (TTTT = 0b0000).
result.push(length << 4);
// Next n bytes are the Bloom-filtered Account Key list.
result.extend(account_keys_bytes);
// The descriptor value associated with the Salt section of the LE advertisement payload.
// Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed.
const SALT_DESCRIPTOR: u8 = 0x11;
result.push(SALT_DESCRIPTOR);
// Final byte is the Salt value.
result.push(salt);
Ok(result)
}
// Default file path for Account Keys written to isolated persistent storage.
const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json";
/// Attempts to read and parse the contents of the persistent storage at the provided `path`.
/// Returns an `AccountKeyList` on success, Error otherwise.
fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> {
let mut this = Self {
keys: LruCache::new(MAX_ACCOUNT_KEYS),
path: path::PathBuf::from(path.as_ref()),
account_key_count: Default::default(),
};
this.load_internal()?;
Ok(this)
}
/// Attempts to update the locally-saved set of keys from persistent storage.
/// Returns Error if the storage file is unable to be opened.
fn load_internal(&mut self) -> Result<(), Error> {
match fs::File::open(&self.path) {
Ok(file) => {
// Build the LRU cache from the contents of the file. Because keys are stored in
// LRU order, we build the cache in the same order to preserve LRU status.
debug!("Reading Account Keys from existing file");
let key_list = KeyList::load(file)?;
key_list.0.into_iter().for_each(|k| {
let _ = self.keys.insert(k, ());
});
Ok(())
}
Err(error) if error.kind() == io::ErrorKind::NotFound => {
debug!("Persistent storage file not found");
Ok(())
}
Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")),
}
}
/// Commits the current set of Account Keys to isolated persistent storage.
/// Keys are stored in LRU order.
fn store(&self) -> Result<(), Error> {
let path = path::Path::new(&self.path);
let file_name = path.file_name().ok_or(Error::key_storage(
io::ErrorKind::InvalidInput.into(),
"couldn't build file name from path",
))?;
let file_path = path.with_file_name(file_name.to_os_string());
let file = fs::File::create(&file_path)
.map_err(|e| Error::key_storage(e, "couldn't create file"))?;
let values = KeyList(self.keys().cloned().collect());
serde_json::to_writer(file, &values)?;
Ok(())
}
}
/// Convenience type for the serialization and deserialization of Account Keys.
#[derive(Serialize, Deserialize)]
struct KeyList(Vec<AccountKey>);
impl KeyList {
fn load<R: io::Read>(reader: R) -> Result<Self, Error> {
serde_json::from_reader(reader).map_err(Into::into)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use assert_matches::assert_matches;
/// Loads the set of saved Account Keys from storage and verifies that it's equal to the
/// provided `expected_keys`.
#[track_caller]
pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>(
path: P,
expected_keys: Vec<AccountKey>,
) {
let read_keys = AccountKeyList::load_from_path(path).expect("can read from file");
assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys);
}
#[test]
fn model_id_from_u32() {
let normal_id = 0x1234;
let id = ModelId::try_from(normal_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x12, 0x34]);
let zero_id = 0;
let id = ModelId::try_from(zero_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x00, 0x00]);
let max_id = 0xffffff;
let id = ModelId::try_from(max_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0xff, 0xff, 0xff]);
}
#[test]
fn
|
invalid_model_id_conversion_is_error
|
identifier_name
|
|
utils.rs
|
color format")?;
Ok(())
}
/// Load a system font.
fn load_font(font_family: &str) -> Vec<u8> {
let font_family_property = system_fonts::FontPropertyBuilder::new()
.family(font_family)
.build();
let (loaded_font, _) =
if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) {
(loaded_font, index)
} else {
eprintln!("Family not found, falling back to first Monospace font");
let mut font_monospace_property =
system_fonts::FontPropertyBuilder::new().monospace().build();
let sysfonts = system_fonts::query_specific(&mut font_monospace_property);
eprintln!("Falling back to font '{font}'", font = sysfonts[0]);
let (loaded_font, index) =
system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font");
(loaded_font, index)
};
loaded_font
}
/// Parse a color into a tuple of floats.
fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) {
(
f64::from(color_str.r) / 255.0,
f64::from(color_str.g) / 255.0,
f64::from(color_str.b) / 255.0,
f64::from(color_str.a),
)
}
/// Parse app arguments.
pub fn parse_args() -> AppConfig {
let matches = App::new(crate_name!())
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.global_setting(AppSettings::ColoredHelp)
.arg(
Arg::with_name("font")
.short("f")
.long("font")
.takes_value(true)
.validator(is_truetype_font)
.default_value("Mono:72")
.help("Use a specific TrueType font with this format: family:size"))
.arg(
Arg::with_name("hint_chars")
.short("c")
.long("chars")
.takes_value(true)
.default_value("sadfjklewcmpgh")
.help("Define a set of possbile values to use as hint characters"))
.arg(
Arg::with_name("margin")
.short("m")
.long("margin")
.takes_value(true)
.default_value("0.2")
.help("Add an additional margin around the text box (value is a factor of the box size)"))
.arg(
Arg::with_name("text_color")
.long("textcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("#dddddd")
.display_order(49)
.help("Text color (CSS notation)"))
.arg(
Arg::with_name("text_color_alt")
.long("textcoloralt")
.takes_value(true)
.validator(is_valid_color)
.default_value("#666666")
.display_order(50)
.help("Text color alternate (CSS notation)"))
.arg(
Arg::with_name("bg_color")
.long("bgcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("rgba(30, 30, 30, 0.9)")
.display_order(51)
.help("Background color (CSS notation)"))
.arg(
Arg::with_name("horizontal_align")
.long("halign")
.takes_value(true)
.possible_values(&["left", "center", "right"])
.default_value("left")
.display_order(100)
.help("Horizontal alignment of the box inside the window"))
.arg(
Arg::with_name("vertical_align")
.long("valign")
.takes_value(true)
.possible_values(&["top", "center", "bottom"])
.default_value("top")
.display_order(101)
.help("Vertical alignment of the box inside the window"))
.arg(
Arg::with_name("fill")
.long("fill")
.conflicts_with_all(&["horizontal_align", "vertical_align", "margin"])
.display_order(102)
.help("Completely fill out windows"))
.arg(
Arg::with_name("print_only")
.short("p")
.long("printonly")
.help("Print the window id only but don't change focus"))
.get_matches();
let font = value_t!(matches, "font", String).unwrap();
let v: Vec<_> = font.split(':').collect();
let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap());
let hint_chars = value_t!(matches, "hint_chars", String).unwrap();
let margin = value_t!(matches, "margin", f32).unwrap();
let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap();
let text_color = parse_color(text_color_unparsed);
let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap();
let text_color_alt = parse_color(text_color_alt_unparsed);
let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap();
let bg_color = parse_color(bg_color_unparsed);
let fill = matches.is_present("fill");
let print_only = matches.is_present("print_only");
let (horizontal_align, vertical_align) = if fill {
(HorizontalAlign::Center, VerticalAlign::Center)
} else
|
;
let loaded_font = load_font(&font_family);
AppConfig {
font_family,
font_size,
loaded_font,
hint_chars,
margin,
text_color,
text_color_alt,
bg_color,
fill,
print_only,
horizontal_align,
vertical_align,
}
}
/// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination
/// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible
/// number of hints we need.
pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String {
// Figure out which size we need.
let mut size_required = 1;
while hint_chars.len().pow(size_required) < max_count {
size_required += 1;
}
let mut ret = hint_chars
.chars()
.next()
.expect("No hint_chars found")
.to_string();
let it = iter::repeat(hint_chars.chars().rev())
.take(size_required as usize)
.multi_cartesian_product();
for c in it {
let folded = c.into_iter().collect();
if !current_hints.contains(&&folded) {
ret = folded;
}
}
debug!("Returning next hint: {}", ret);
ret
}
pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> {
for screen in conn.get_setup().roots() {
for depth in screen.allowed_depths() {
for vis in depth.visuals() {
if visual == vis.visual_id() {
return Some(vis);
}
}
}
}
None
}
pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents {
// Create a buffer image that should be large enough.
// TODO: Figure out the maximum size from the largest window on the desktop.
// For now we'll use made-up maximum values.
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024)
.expect("Couldn't create ImageSurface");
let cr = cairo::Context::new(&surface);
cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal);
cr.set_font_size(size);
cr.text_extents(text)
}
/// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a
/// different color to show that they were in fact typed.
pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) {
// Paint background.
rw.cairo_context.set_operator(cairo::Operator::Source);
rw.cairo_context.set_source_rgb(
app_config.bg_color.0,
app_config.bg_color.1,
app_config.bg_color.2,
);
rw.cairo_context.paint();
rw.cairo_context.set_operator(cairo::Operator::Over);
rw.cairo_context.select_font_face(
&app_config.font_family,
FontSlant::Normal,
FontWeight::Normal,
);
rw.cairo_context.set_font_size(app_config.font_size);
rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1);
if text.starts
|
{
(
value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(),
value_t!(matches, "vertical_align", VerticalAlign).unwrap(),
)
}
|
conditional_block
|
utils.rs
|
let text_color_alt = parse_color(text_color_alt_unparsed);
let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap();
let bg_color = parse_color(bg_color_unparsed);
let fill = matches.is_present("fill");
let print_only = matches.is_present("print_only");
let (horizontal_align, vertical_align) = if fill {
(HorizontalAlign::Center, VerticalAlign::Center)
} else {
(
value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(),
value_t!(matches, "vertical_align", VerticalAlign).unwrap(),
)
};
let loaded_font = load_font(&font_family);
AppConfig {
font_family,
font_size,
loaded_font,
hint_chars,
margin,
text_color,
text_color_alt,
bg_color,
fill,
print_only,
horizontal_align,
vertical_align,
}
}
/// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination
/// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible
/// number of hints we need.
pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String {
// Figure out which size we need.
let mut size_required = 1;
while hint_chars.len().pow(size_required) < max_count {
size_required += 1;
}
let mut ret = hint_chars
.chars()
.next()
.expect("No hint_chars found")
.to_string();
let it = iter::repeat(hint_chars.chars().rev())
.take(size_required as usize)
.multi_cartesian_product();
for c in it {
let folded = c.into_iter().collect();
if !current_hints.contains(&&folded) {
ret = folded;
}
}
debug!("Returning next hint: {}", ret);
ret
}
pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> {
for screen in conn.get_setup().roots() {
for depth in screen.allowed_depths() {
for vis in depth.visuals() {
if visual == vis.visual_id() {
return Some(vis);
}
}
}
}
None
}
pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents {
// Create a buffer image that should be large enough.
// TODO: Figure out the maximum size from the largest window on the desktop.
// For now we'll use made-up maximum values.
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024)
.expect("Couldn't create ImageSurface");
let cr = cairo::Context::new(&surface);
cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal);
cr.set_font_size(size);
cr.text_extents(text)
}
/// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a
/// different color to show that they were in fact typed.
pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) {
// Paint background.
rw.cairo_context.set_operator(cairo::Operator::Source);
rw.cairo_context.set_source_rgb(
app_config.bg_color.0,
app_config.bg_color.1,
app_config.bg_color.2,
);
rw.cairo_context.paint();
rw.cairo_context.set_operator(cairo::Operator::Over);
rw.cairo_context.select_font_face(
&app_config.font_family,
FontSlant::Normal,
FontWeight::Normal,
);
rw.cairo_context.set_font_size(app_config.font_size);
rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1);
if text.starts_with(current_hints) {
// Paint already selected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color_alt.0,
app_config.text_color_alt.1,
app_config.text_color_alt.2,
app_config.text_color_alt.3,
);
for c in current_hints.chars() {
rw.cairo_context.show_text(&c.to_string());
}
}
// Paint unselected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color.0,
app_config.text_color.1,
app_config.text_color.2,
app_config.text_color.3,
);
let re = Regex::new(&format!("^{}", current_hints)).unwrap();
for c in re.replace(text, "").chars() {
rw.cairo_context.show_text(&c.to_string());
}
rw.cairo_context.get_target().flush();
}
/// Try to grab the keyboard until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global keyboard input without it failing
/// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying
/// until we eventually succeed.
pub fn snatch_keyboard(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_keyboard_cookie = xcb::xproto::grab_keyboard(
&conn,
true,
screen.root(),
xcb::CURRENT_TIME,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
);
let grab_keyboard_reply = grab_keyboard_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Try to grab the mouse until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global mouse input without it failing sometimes
/// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we
/// eventually succeed.
pub fn snatch_mouse(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_pointer_cookie = xcb::xproto::grab_pointer(
&conn,
true,
screen.root(),
xcb::EVENT_MASK_BUTTON_PRESS as u16,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
xcb::NONE,
xcb::NONE,
xcb::CURRENT_TIME,
);
let grab_pointer_reply = grab_pointer_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Sort list of `DesktopWindow`s by position.
///
/// This sorts by column first and row second.
pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> {
dws.sort_by_key(|w| w.pos.0);
dws.sort_by_key(|w| w.pos.1);
dws
}
/// Returns true if `r1` and `r2` overlap.
fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool {
let left_corner_inside = r1.0 < r2.0 + r2.2;
let right_corner_inside = r1.0 + r1.2 > r2.0;
let top_corner_inside = r1.1 < r2.1 + r2.3;
let bottom_corner_inside = r1.1 + r1.3 > r2.1;
left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside
}
/// Finds overlaps and returns a list of those rects in the format (x, y, w, h).
pub fn find_overlaps(
rws: Vec<&RenderWindow>,
rect: (i32, i32, i32, i32),
) -> Vec<(i32, i32, i32, i32)> {
let mut overlaps = vec![];
for rw in rws {
if intersects(rw.rect, rect) {
overlaps.push(rw.rect);
}
}
overlaps
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_intersects() {
assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64)));
}
#[test]
fn
|
test_no_intersect
|
identifier_name
|
|
utils.rs
|
.default_value("#666666")
.display_order(50)
.help("Text color alternate (CSS notation)"))
.arg(
Arg::with_name("bg_color")
.long("bgcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("rgba(30, 30, 30, 0.9)")
.display_order(51)
.help("Background color (CSS notation)"))
.arg(
Arg::with_name("horizontal_align")
.long("halign")
.takes_value(true)
.possible_values(&["left", "center", "right"])
.default_value("left")
.display_order(100)
.help("Horizontal alignment of the box inside the window"))
.arg(
Arg::with_name("vertical_align")
.long("valign")
.takes_value(true)
.possible_values(&["top", "center", "bottom"])
.default_value("top")
.display_order(101)
.help("Vertical alignment of the box inside the window"))
.arg(
Arg::with_name("fill")
.long("fill")
.conflicts_with_all(&["horizontal_align", "vertical_align", "margin"])
.display_order(102)
.help("Completely fill out windows"))
.arg(
Arg::with_name("print_only")
.short("p")
.long("printonly")
.help("Print the window id only but don't change focus"))
.get_matches();
let font = value_t!(matches, "font", String).unwrap();
let v: Vec<_> = font.split(':').collect();
let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap());
let hint_chars = value_t!(matches, "hint_chars", String).unwrap();
let margin = value_t!(matches, "margin", f32).unwrap();
let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap();
let text_color = parse_color(text_color_unparsed);
let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap();
let text_color_alt = parse_color(text_color_alt_unparsed);
let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap();
let bg_color = parse_color(bg_color_unparsed);
let fill = matches.is_present("fill");
let print_only = matches.is_present("print_only");
let (horizontal_align, vertical_align) = if fill {
(HorizontalAlign::Center, VerticalAlign::Center)
} else {
(
value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(),
value_t!(matches, "vertical_align", VerticalAlign).unwrap(),
)
};
let loaded_font = load_font(&font_family);
AppConfig {
font_family,
font_size,
loaded_font,
hint_chars,
margin,
text_color,
text_color_alt,
bg_color,
fill,
print_only,
horizontal_align,
vertical_align,
}
}
/// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination
/// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible
/// number of hints we need.
pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String {
// Figure out which size we need.
let mut size_required = 1;
while hint_chars.len().pow(size_required) < max_count {
size_required += 1;
}
let mut ret = hint_chars
.chars()
.next()
.expect("No hint_chars found")
.to_string();
let it = iter::repeat(hint_chars.chars().rev())
.take(size_required as usize)
.multi_cartesian_product();
for c in it {
let folded = c.into_iter().collect();
if !current_hints.contains(&&folded) {
ret = folded;
}
}
debug!("Returning next hint: {}", ret);
ret
}
pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> {
for screen in conn.get_setup().roots() {
for depth in screen.allowed_depths() {
for vis in depth.visuals() {
if visual == vis.visual_id() {
return Some(vis);
}
}
}
}
None
}
pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents {
// Create a buffer image that should be large enough.
// TODO: Figure out the maximum size from the largest window on the desktop.
// For now we'll use made-up maximum values.
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024)
.expect("Couldn't create ImageSurface");
let cr = cairo::Context::new(&surface);
cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal);
cr.set_font_size(size);
cr.text_extents(text)
}
/// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a
/// different color to show that they were in fact typed.
pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) {
// Paint background.
rw.cairo_context.set_operator(cairo::Operator::Source);
rw.cairo_context.set_source_rgb(
app_config.bg_color.0,
app_config.bg_color.1,
app_config.bg_color.2,
);
rw.cairo_context.paint();
rw.cairo_context.set_operator(cairo::Operator::Over);
rw.cairo_context.select_font_face(
&app_config.font_family,
FontSlant::Normal,
FontWeight::Normal,
);
rw.cairo_context.set_font_size(app_config.font_size);
rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1);
if text.starts_with(current_hints) {
// Paint already selected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color_alt.0,
app_config.text_color_alt.1,
app_config.text_color_alt.2,
app_config.text_color_alt.3,
);
for c in current_hints.chars() {
rw.cairo_context.show_text(&c.to_string());
}
}
// Paint unselected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color.0,
app_config.text_color.1,
app_config.text_color.2,
app_config.text_color.3,
);
let re = Regex::new(&format!("^{}", current_hints)).unwrap();
for c in re.replace(text, "").chars() {
rw.cairo_context.show_text(&c.to_string());
}
rw.cairo_context.get_target().flush();
}
/// Try to grab the keyboard until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global keyboard input without it failing
/// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying
/// until we eventually succeed.
pub fn snatch_keyboard(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_keyboard_cookie = xcb::xproto::grab_keyboard(
&conn,
true,
screen.root(),
xcb::CURRENT_TIME,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
);
let grab_keyboard_reply = grab_keyboard_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Try to grab the mouse until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global mouse input without it failing sometimes
/// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we
/// eventually succeed.
pub fn snatch_mouse(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String>
|
{
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_pointer_cookie = xcb::xproto::grab_pointer(
&conn,
true,
screen.root(),
xcb::EVENT_MASK_BUTTON_PRESS as u16,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
xcb::NONE,
xcb::NONE,
xcb::CURRENT_TIME,
);
|
identifier_body
|
|
utils.rs
|
"bottom" => Ok(VerticalAlign::Bottom),
_ => Err(()),
}
}
}
/// Checks whether the provided fontconfig font `f` is valid.
fn is_truetype_font(f: String) -> Result<(), String> {
let v: Vec<_> = f.split(':').collect();
let (family, size) = (v.get(0), v.get(1));
if family.is_none() || size.is_none() {
return Err("From font format".to_string());
}
if let Err(e) = size.unwrap().parse::<f32>() {
return Err(e.description().to_string());
}
Ok(())
}
/// Validate a color.
fn is_valid_color(c: String) -> Result<(), String> {
c.parse::<CssColor>().map_err(|_| "Invalid color format")?;
Ok(())
}
/// Load a system font.
fn load_font(font_family: &str) -> Vec<u8> {
let font_family_property = system_fonts::FontPropertyBuilder::new()
.family(font_family)
.build();
let (loaded_font, _) =
if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) {
(loaded_font, index)
} else {
eprintln!("Family not found, falling back to first Monospace font");
let mut font_monospace_property =
system_fonts::FontPropertyBuilder::new().monospace().build();
let sysfonts = system_fonts::query_specific(&mut font_monospace_property);
eprintln!("Falling back to font '{font}'", font = sysfonts[0]);
let (loaded_font, index) =
system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font");
(loaded_font, index)
};
loaded_font
}
/// Parse a color into a tuple of floats.
fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) {
(
f64::from(color_str.r) / 255.0,
f64::from(color_str.g) / 255.0,
f64::from(color_str.b) / 255.0,
f64::from(color_str.a),
)
}
/// Parse app arguments.
pub fn parse_args() -> AppConfig {
let matches = App::new(crate_name!())
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.global_setting(AppSettings::ColoredHelp)
.arg(
Arg::with_name("font")
.short("f")
.long("font")
.takes_value(true)
.validator(is_truetype_font)
.default_value("Mono:72")
.help("Use a specific TrueType font with this format: family:size"))
.arg(
Arg::with_name("hint_chars")
.short("c")
.long("chars")
.takes_value(true)
.default_value("sadfjklewcmpgh")
.help("Define a set of possbile values to use as hint characters"))
.arg(
Arg::with_name("margin")
.short("m")
.long("margin")
.takes_value(true)
.default_value("0.2")
.help("Add an additional margin around the text box (value is a factor of the box size)"))
.arg(
Arg::with_name("text_color")
.long("textcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("#dddddd")
.display_order(49)
.help("Text color (CSS notation)"))
.arg(
Arg::with_name("text_color_alt")
.long("textcoloralt")
.takes_value(true)
.validator(is_valid_color)
.default_value("#666666")
.display_order(50)
.help("Text color alternate (CSS notation)"))
.arg(
Arg::with_name("bg_color")
.long("bgcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("rgba(30, 30, 30, 0.9)")
.display_order(51)
.help("Background color (CSS notation)"))
.arg(
Arg::with_name("horizontal_align")
.long("halign")
.takes_value(true)
.possible_values(&["left", "center", "right"])
.default_value("left")
.display_order(100)
.help("Horizontal alignment of the box inside the window"))
.arg(
Arg::with_name("vertical_align")
.long("valign")
.takes_value(true)
.possible_values(&["top", "center", "bottom"])
.default_value("top")
.display_order(101)
.help("Vertical alignment of the box inside the window"))
.arg(
Arg::with_name("fill")
.long("fill")
.conflicts_with_all(&["horizontal_align", "vertical_align", "margin"])
.display_order(102)
.help("Completely fill out windows"))
.arg(
Arg::with_name("print_only")
.short("p")
.long("printonly")
.help("Print the window id only but don't change focus"))
.get_matches();
let font = value_t!(matches, "font", String).unwrap();
let v: Vec<_> = font.split(':').collect();
let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap());
let hint_chars = value_t!(matches, "hint_chars", String).unwrap();
let margin = value_t!(matches, "margin", f32).unwrap();
let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap();
let text_color = parse_color(text_color_unparsed);
let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap();
let text_color_alt = parse_color(text_color_alt_unparsed);
let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap();
let bg_color = parse_color(bg_color_unparsed);
let fill = matches.is_present("fill");
let print_only = matches.is_present("print_only");
let (horizontal_align, vertical_align) = if fill {
(HorizontalAlign::Center, VerticalAlign::Center)
} else {
(
value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(),
value_t!(matches, "vertical_align", VerticalAlign).unwrap(),
)
};
let loaded_font = load_font(&font_family);
AppConfig {
font_family,
font_size,
loaded_font,
hint_chars,
margin,
text_color,
text_color_alt,
bg_color,
fill,
print_only,
horizontal_align,
vertical_align,
}
}
/// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination
/// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible
/// number of hints we need.
pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String {
// Figure out which size we need.
let mut size_required = 1;
while hint_chars.len().pow(size_required) < max_count {
size_required += 1;
}
let mut ret = hint_chars
.chars()
.next()
.expect("No hint_chars found")
.to_string();
let it = iter::repeat(hint_chars.chars().rev())
.take(size_required as usize)
.multi_cartesian_product();
for c in it {
let folded = c.into_iter().collect();
if !current_hints.contains(&&folded) {
ret = folded;
}
}
debug!("Returning next hint: {}", ret);
ret
}
pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> {
for screen in conn.get_setup().roots() {
for depth in screen.allowed_depths() {
for vis in depth.visuals() {
if visual == vis.visual_id() {
return Some(vis);
}
}
}
}
None
}
pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents {
// Create a buffer image that should be large enough.
// TODO: Figure out the maximum size from the largest window on the desktop.
// For now we'll use made-up maximum values.
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024)
.expect("Couldn't create ImageSurface");
let cr = cairo::Context::new(&surface);
cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal);
cr.set_font_size(size);
cr.text_extents(text)
}
/// Draw a `text` onto `rw`. In case any `current_hints` are
|
"top" => Ok(VerticalAlign::Top),
"center" => Ok(VerticalAlign::Center),
|
random_line_split
|
|
mysql_interactive_worker.rs
|
08, 0x00, 0x00, 0x00])
}
fn default_auth_plugin(&self) -> &str {
"mysql_native_password"
}
fn auth_plugin_for_username(&self, _user: &[u8]) -> &str {
"mysql_native_password"
}
fn salt(&self) -> [u8; 20] {
self.salt
}
fn authenticate(
&self,
auth_plugin: &str,
username: &[u8],
salt: &[u8],
auth_data: &[u8],
) -> bool {
let username = String::from_utf8_lossy(username);
let info = CertifiedInfo::create(&username, auth_data, &self.client_addr);
let authenticate = self.base.authenticate(auth_plugin, salt, info);
futures::executor::block_on(async move {
match authenticate.await {
Ok(res) => res,
Err(failure) => {
log::error!(
"MySQL handler authenticate failed, \
user_name: {}, \
client_address: {}, \
failure_cause: {}",
username,
self.client_addr,
failure
);
false
}
}
})
}
fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_prepare(query, writer)
}
fn on_execute(
&mut self,
id: u32,
param: ParamParser,
writer: QueryResultWriter<W>,
) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_execute(id, param, writer)
}
fn on_close(&mut self, id: u32) {
self.base.do_close(id);
}
fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
let mut writer = DFQueryResultWriter::create(writer);
match InteractiveWorkerBase::<W>::build_runtime() {
Ok(runtime) => {
let instant = Instant::now();
let blocks = runtime.block_on(self.base.do_query(query));
let mut write_result = writer.write(blocks);
if let Err(cause) = write_result {
let suffix = format!("(while in query {})", query);
write_result = Err(cause.add_message_back(suffix));
}
histogram!(
super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION,
instant.elapsed()
);
write_result
}
Err(error) => writer.write(Err(error)),
}
}
fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
DFInitResultWriter::create(writer).write(self.base.do_init(database_name))
}
}
impl<W: std::io::Write> InteractiveWorkerBase<W> {
async fn authenticate(
&self,
auth_plugin: &str,
salt: &[u8],
info: CertifiedInfo,
) -> Result<bool> {
let user_name = &info.user_name;
let address = &info.user_client_address;
let user_manager = self.session.get_user_manager();
let user_info = user_manager.get_user(user_name).await?;
let input = &info.user_password;
let saved = &user_info.password;
let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?;
user_manager
.auth_user(CertifiedInfo::create(user_name, encode_password, address))
.await
}
fn encoding_password(
auth_plugin: &str,
salt: &[u8],
input: &[u8],
user_password: &[u8],
) -> Result<Vec<u8>> {
match auth_plugin {
"mysql_native_password" if input.is_empty() => Ok(vec![]),
"mysql_native_password" => {
// SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) )
let mut m = sha1::Sha1::new();
m.update(salt);
m.update(user_password);
let result = m.digest().bytes();
if input.len() != result.len() {
return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed"));
}
let mut s = Vec::with_capacity(result.len());
for i in 0..result.len() {
s.push(input[i] ^ result[i]);
}
Ok(s)
}
_ => Ok(input.to_vec()),
}
}
fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Prepare is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_execute(
&mut self,
_: u32,
_: ParamParser<'_>,
writer: QueryResultWriter<'_, W>,
) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Execute is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_close(&mut self, _: u32) {}
async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> {
log::debug!("{}", query);
let context = self.session.create_context().await?;
context.attach_query_str(query);
let query_parser = PlanParser::create(context.clone());
let (plan, hints) = query_parser.build_with_hint_from_sql(query);
match hints
.iter()
.find(|v| v.error_code.is_some())
.and_then(|x| x.error_code)
{
None => Self::exec_query(plan, &context).await,
Some(hint_error_code) => match Self::exec_query(plan, &context).await {
Ok(_) => Err(ErrorCode::UnexpectedError(format!(
"Expected server error code: {} but got: Ok.",
hint_error_code
))),
Err(error_code) => {
if hint_error_code == error_code.code() {
Ok((vec![DataBlock::empty()], String::from("")))
} else {
let actual_code = error_code.code();
Err(error_code.add_message(format!(
"Expected server error code: {} but got: {}.",
hint_error_code, actual_code
)))
}
}
},
}
}
async fn exec_query(
plan: Result<PlanNode>,
context: &DatabendQueryContextRef,
) -> Result<(Vec<DataBlock>, String)> {
let instant = Instant::now();
let interpreter = InterpreterFactory::get(context.clone(), plan?)?;
let data_stream = interpreter.execute().await?;
histogram!(
super::mysql_metrics::METRIC_INTERPRETER_USEDTIME,
instant.elapsed()
);
let collector = data_stream.collect::<Result<Vec<DataBlock>>>();
let query_result = collector.await;
query_result.map(|data| (data, Self::extra_info(context, instant)))
}
fn extra_info(context: &DatabendQueryContextRef, instant: Instant) -> String {
let progress = context.get_progress_value();
let seconds = instant.elapsed().as_nanos() as f64 / 1e9f64;
format!(
"Read {} rows, {} in {:.3} sec., {} rows/sec., {}/sec.",
progress.read_rows,
convert_byte_size(progress.read_bytes as f64),
seconds,
convert_number_size((progress.read_rows as f64) / (seconds as f64)),
convert_byte_size((progress.read_bytes as f64) / (seconds as f64)),
)
}
fn do_init(&mut self, database_name: &str) -> Result<()> {
let init_query = format!("USE {};", database_name);
let do_query = self.do_query(&init_query);
match Self::build_runtime() {
Err(error_code) => Err(error_code),
Ok(runtime) => match runtime.block_on(do_query) {
Ok(_) => Ok(()),
Err(error_code) => Err(error_code),
},
}
|
}
|
random_line_split
|
|
mysql_interactive_worker.rs
|
_srv::ParamParser;
use msql_srv::QueryResultWriter;
use msql_srv::StatementMetaWriter;
use rand::RngCore;
use tokio_stream::StreamExt;
use crate::interpreters::InterpreterFactory;
use crate::servers::mysql::writers::DFInitResultWriter;
use crate::servers::mysql::writers::DFQueryResultWriter;
use crate::sessions::DatabendQueryContextRef;
use crate::sessions::SessionRef;
use crate::sql::PlanParser;
use crate::users::CertifiedInfo;
struct InteractiveWorkerBase<W: std::io::Write> {
session: SessionRef,
generic_hold: PhantomData<W>,
}
pub struct InteractiveWorker<W: std::io::Write> {
session: SessionRef,
base: InteractiveWorkerBase<W>,
version: String,
salt: [u8; 20],
client_addr: String,
}
impl<W: std::io::Write> MysqlShim<W> for InteractiveWorker<W> {
type Error = ErrorCode;
fn version(&self) -> &str {
self.version.as_str()
}
fn connect_id(&self) -> u32 {
u32::from_le_bytes([0x08, 0x00, 0x00, 0x00])
}
fn
|
(&self) -> &str {
"mysql_native_password"
}
fn auth_plugin_for_username(&self, _user: &[u8]) -> &str {
"mysql_native_password"
}
fn salt(&self) -> [u8; 20] {
self.salt
}
fn authenticate(
&self,
auth_plugin: &str,
username: &[u8],
salt: &[u8],
auth_data: &[u8],
) -> bool {
let username = String::from_utf8_lossy(username);
let info = CertifiedInfo::create(&username, auth_data, &self.client_addr);
let authenticate = self.base.authenticate(auth_plugin, salt, info);
futures::executor::block_on(async move {
match authenticate.await {
Ok(res) => res,
Err(failure) => {
log::error!(
"MySQL handler authenticate failed, \
user_name: {}, \
client_address: {}, \
failure_cause: {}",
username,
self.client_addr,
failure
);
false
}
}
})
}
fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_prepare(query, writer)
}
fn on_execute(
&mut self,
id: u32,
param: ParamParser,
writer: QueryResultWriter<W>,
) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_execute(id, param, writer)
}
fn on_close(&mut self, id: u32) {
self.base.do_close(id);
}
fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
let mut writer = DFQueryResultWriter::create(writer);
match InteractiveWorkerBase::<W>::build_runtime() {
Ok(runtime) => {
let instant = Instant::now();
let blocks = runtime.block_on(self.base.do_query(query));
let mut write_result = writer.write(blocks);
if let Err(cause) = write_result {
let suffix = format!("(while in query {})", query);
write_result = Err(cause.add_message_back(suffix));
}
histogram!(
super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION,
instant.elapsed()
);
write_result
}
Err(error) => writer.write(Err(error)),
}
}
fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
DFInitResultWriter::create(writer).write(self.base.do_init(database_name))
}
}
impl<W: std::io::Write> InteractiveWorkerBase<W> {
async fn authenticate(
&self,
auth_plugin: &str,
salt: &[u8],
info: CertifiedInfo,
) -> Result<bool> {
let user_name = &info.user_name;
let address = &info.user_client_address;
let user_manager = self.session.get_user_manager();
let user_info = user_manager.get_user(user_name).await?;
let input = &info.user_password;
let saved = &user_info.password;
let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?;
user_manager
.auth_user(CertifiedInfo::create(user_name, encode_password, address))
.await
}
fn encoding_password(
auth_plugin: &str,
salt: &[u8],
input: &[u8],
user_password: &[u8],
) -> Result<Vec<u8>> {
match auth_plugin {
"mysql_native_password" if input.is_empty() => Ok(vec![]),
"mysql_native_password" => {
// SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) )
let mut m = sha1::Sha1::new();
m.update(salt);
m.update(user_password);
let result = m.digest().bytes();
if input.len() != result.len() {
return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed"));
}
let mut s = Vec::with_capacity(result.len());
for i in 0..result.len() {
s.push(input[i] ^ result[i]);
}
Ok(s)
}
_ => Ok(input.to_vec()),
}
}
fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Prepare is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_execute(
&mut self,
_: u32,
_: ParamParser<'_>,
writer: QueryResultWriter<'_, W>,
) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Execute is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_close(&mut self, _: u32) {}
async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> {
log::debug!("{}", query);
let context = self.session.create_context().await?;
context.attach_query_str(query);
let query_parser = PlanParser::create(context.clone());
let (plan, hints) = query_parser.build_with_hint_from_sql(query);
match hints
.iter()
.find(|v| v.error_code.is_some())
.and_then(|x| x.error_code)
{
None => Self::exec_query(plan, &context).await,
Some(hint_error_code) => match Self::exec_query(plan, &context).await {
Ok(_) => Err(ErrorCode::UnexpectedError(format!(
"Expected server error code: {} but got: Ok.",
hint_error_code
))),
Err(error_code) => {
if hint_error_code == error_code.code() {
Ok((vec![DataBlock::empty()], String::from("")))
} else {
let actual_code = error_code.code();
Err(error_code.add_message(format!(
"Expected server error code: {} but got: {}.",
hint_error_code, actual_code
)))
}
}
},
}
}
async fn exec_query(
plan: Result<PlanNode>,
context: &DatabendQueryContextRef,
) -> Result<(Vec<DataBlock>, String)> {
let instant = Instant::now();
let interpreter = InterpreterFactory::get(context.clone(), plan?)?;
let data_stream = interpreter.execute().await?;
histogram!(
super::mysql_metrics::METRIC_INTERPRETER_USEDTIME,
instant.elapsed()
);
let collector = data_stream.collect::<Result<Vec<DataBlock>>>();
let query_result = collector.await;
query
|
default_auth_plugin
|
identifier_name
|
mysql_interactive_worker.rs
|
::ParamParser;
use msql_srv::QueryResultWriter;
use msql_srv::StatementMetaWriter;
use rand::RngCore;
use tokio_stream::StreamExt;
use crate::interpreters::InterpreterFactory;
use crate::servers::mysql::writers::DFInitResultWriter;
use crate::servers::mysql::writers::DFQueryResultWriter;
use crate::sessions::DatabendQueryContextRef;
use crate::sessions::SessionRef;
use crate::sql::PlanParser;
use crate::users::CertifiedInfo;
struct InteractiveWorkerBase<W: std::io::Write> {
session: SessionRef,
generic_hold: PhantomData<W>,
}
pub struct InteractiveWorker<W: std::io::Write> {
session: SessionRef,
base: InteractiveWorkerBase<W>,
version: String,
salt: [u8; 20],
client_addr: String,
}
impl<W: std::io::Write> MysqlShim<W> for InteractiveWorker<W> {
type Error = ErrorCode;
fn version(&self) -> &str {
self.version.as_str()
}
fn connect_id(&self) -> u32 {
u32::from_le_bytes([0x08, 0x00, 0x00, 0x00])
}
fn default_auth_plugin(&self) -> &str {
"mysql_native_password"
}
fn auth_plugin_for_username(&self, _user: &[u8]) -> &str {
"mysql_native_password"
}
fn salt(&self) -> [u8; 20] {
self.salt
}
fn authenticate(
&self,
auth_plugin: &str,
username: &[u8],
salt: &[u8],
auth_data: &[u8],
) -> bool {
let username = String::from_utf8_lossy(username);
let info = CertifiedInfo::create(&username, auth_data, &self.client_addr);
let authenticate = self.base.authenticate(auth_plugin, salt, info);
futures::executor::block_on(async move {
match authenticate.await {
Ok(res) => res,
Err(failure) => {
log::error!(
"MySQL handler authenticate failed, \
user_name: {}, \
client_address: {}, \
failure_cause: {}",
username,
self.client_addr,
failure
);
false
}
}
})
}
fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_prepare(query, writer)
}
fn on_execute(
&mut self,
id: u32,
param: ParamParser,
writer: QueryResultWriter<W>,
) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_execute(id, param, writer)
}
fn on_close(&mut self, id: u32) {
self.base.do_close(id);
}
fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
let mut writer = DFQueryResultWriter::create(writer);
match InteractiveWorkerBase::<W>::build_runtime() {
Ok(runtime) => {
let instant = Instant::now();
let blocks = runtime.block_on(self.base.do_query(query));
let mut write_result = writer.write(blocks);
if let Err(cause) = write_result {
let suffix = format!("(while in query {})", query);
write_result = Err(cause.add_message_back(suffix));
}
histogram!(
super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION,
instant.elapsed()
);
write_result
}
Err(error) => writer.write(Err(error)),
}
}
fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
DFInitResultWriter::create(writer).write(self.base.do_init(database_name))
}
}
impl<W: std::io::Write> InteractiveWorkerBase<W> {
async fn authenticate(
&self,
auth_plugin: &str,
salt: &[u8],
info: CertifiedInfo,
) -> Result<bool>
|
fn encoding_password(
auth_plugin: &str,
salt: &[u8],
input: &[u8],
user_password: &[u8],
) -> Result<Vec<u8>> {
match auth_plugin {
"mysql_native_password" if input.is_empty() => Ok(vec![]),
"mysql_native_password" => {
// SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) )
let mut m = sha1::Sha1::new();
m.update(salt);
m.update(user_password);
let result = m.digest().bytes();
if input.len() != result.len() {
return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed"));
}
let mut s = Vec::with_capacity(result.len());
for i in 0..result.len() {
s.push(input[i] ^ result[i]);
}
Ok(s)
}
_ => Ok(input.to_vec()),
}
}
fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Prepare is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_execute(
&mut self,
_: u32,
_: ParamParser<'_>,
writer: QueryResultWriter<'_, W>,
) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Execute is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_close(&mut self, _: u32) {}
async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> {
log::debug!("{}", query);
let context = self.session.create_context().await?;
context.attach_query_str(query);
let query_parser = PlanParser::create(context.clone());
let (plan, hints) = query_parser.build_with_hint_from_sql(query);
match hints
.iter()
.find(|v| v.error_code.is_some())
.and_then(|x| x.error_code)
{
None => Self::exec_query(plan, &context).await,
Some(hint_error_code) => match Self::exec_query(plan, &context).await {
Ok(_) => Err(ErrorCode::UnexpectedError(format!(
"Expected server error code: {} but got: Ok.",
hint_error_code
))),
Err(error_code) => {
if hint_error_code == error_code.code() {
Ok((vec![DataBlock::empty()], String::from("")))
} else {
let actual_code = error_code.code();
Err(error_code.add_message(format!(
"Expected server error code: {} but got: {}.",
hint_error_code, actual_code
)))
}
}
},
}
}
async fn exec_query(
plan: Result<PlanNode>,
context: &DatabendQueryContextRef,
) -> Result<(Vec<DataBlock>, String)> {
let instant = Instant::now();
let interpreter = InterpreterFactory::get(context.clone(), plan?)?;
let data_stream = interpreter.execute().await?;
histogram!(
super::mysql_metrics::METRIC_INTERPRETER_USEDTIME,
instant.elapsed()
);
let collector = data_stream.collect::<Result<Vec<DataBlock>>>();
let query_result = collector.await;
query
|
{
let user_name = &info.user_name;
let address = &info.user_client_address;
let user_manager = self.session.get_user_manager();
let user_info = user_manager.get_user(user_name).await?;
let input = &info.user_password;
let saved = &user_info.password;
let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?;
user_manager
.auth_user(CertifiedInfo::create(user_name, encode_password, address))
.await
}
|
identifier_body
|
lib.rs
|
_qual<T, U>(
left: &[T],
left_q: &[U],
right: &[T],
right_q: &[U],
) -> Vec<(T, U)>
where
T: Ord,
T: Clone,
U: Clone,
{
let l = left
.iter()
.zip(left_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let r = right
.iter()
.zip(right_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let mut x: Vec<(T, U)> = l.chain(r).collect();
x.sort_by_key(|(a, _b)| a.clone());
x
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence(
record: &bam::Record,
input_positions: &[i64],
) -> Vec<i64> {
// reverse positions if needed
let positions: Vec<i64> = if record.is_reverse() {
let seq_len = i64::try_from(record.seq_len()).unwrap();
input_positions
.iter()
.rev()
.map(|p| seq_len - p - 1)
.collect()
} else {
input_positions.to_vec()
};
positions
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence_in_place(
record: &bam::Record,
input_positions: &mut Vec<i64>,
part_of_range: bool,
) {
if !record.is_reverse() {
return;
}
let seq_len = i64::try_from(record.seq_len()).unwrap();
// need to correct for going from [) to (] if we are part of a range
let offset = if part_of_range { 0 } else { 1 };
for p in input_positions.iter_mut() {
*p = seq_len - *p - offset;
}
input_positions.reverse();
}
#[inline(always)]
pub fn
|
<T>(v: &[T]) -> bool
where
T: Ord,
{
v.windows(2).all(|w| w[0] <= w[1])
}
/// search a sorted array for insertions positions of another sorted array
/// returned index i satisfies
/// left
/// a\[i-1\] < v <= a\[i\]
/// right
/// a\[i-1\] <= v < a\[i\]
/// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html>
/// ```
/// use bamlift::*;
/// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10];
/// let v = vec![0, 1, 3, 4, 11, 11];
/// let indexes = search_sorted(&a, &v);
/// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]);
/// ```
pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize>
where
T: Ord,
T: Display,
[T]: Debug,
{
if !is_sorted(v) {
panic!("v is not sorted: {:?}", v);
}
let mut indexes = Vec::with_capacity(v.len());
let mut a_idx = 0;
for cur_v in v {
while a_idx < a.len() {
// check starting condition
if a_idx == 0 && *cur_v <= a[a_idx] {
indexes.push(0);
break;
} else if a_idx == 0 {
a_idx += 1;
}
// end condition
if a_idx == a.len() - 1 && *cur_v > a[a_idx] {
indexes.push(a_idx + 1);
break;
}
// middle of the array
else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) {
indexes.push(a_idx);
break;
}
a_idx += 1;
}
}
log::trace!("search_sorted: {:?}\n{:?}", v, indexes);
indexes
}
//
// CLOSEST LIFTOVER FUNCTIONS
//
/// this is a helper function for liftover_closest that should only be called from there
/// The exception for this is test cases, where it should be easier to test this function
/// directly.
fn liftover_closest(
positions: &[i64],
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
) -> Vec<Option<i64>> {
// skip empty
if positions.is_empty() {
return vec![];
}
if aligned_block_pairs.is_empty() {
return positions.iter().map(|_x| None).collect();
}
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the closest position for every position
let mut starting_block = 0;
let ending_block = aligned_block_pairs.len();
let mut pos_mapping = HashMap::new();
for cur_pos in positions {
pos_mapping.insert(cur_pos, (-1, i64::MAX));
let mut current_block = 0;
for block_index in starting_block..ending_block {
// get the current alignment block
let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index];
// get the previous closest position
let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap();
// exact match found
if cur_pos >= &q_st && cur_pos < &q_en {
let dist_from_start = cur_pos - q_st;
*best_diff = 0;
*best_r_pos = r_st + dist_from_start;
break;
}
// we are before the start of the block
else if cur_pos < &q_st {
let diff = (q_st - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_st;
}
}
// we are past the end of the block
else if cur_pos >= &q_en {
let diff = (q_en - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_en;
}
// we don't need to return to previous blocks since the input is sorted
starting_block = current_block;
}
current_block += 1;
}
}
let mut rtn = vec![];
for q_pos in positions {
let (r_pos, diff) = pos_mapping.get(q_pos).unwrap();
if *r_pos == -1 && *diff == i64::MAX {
rtn.push(None);
} else {
rtn.push(Some(*r_pos));
}
}
assert_eq!(rtn.len(), positions.len());
rtn
}
/// find the closest reference positions for a list of query positions
pub fn lift_reference_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
query_positions: &[i64],
) -> Vec<Option<i64>> {
liftover_closest(query_positions, aligned_block_pairs)
}
/// find the closest query positions for a list of reference positions
pub fn lift_query_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
// if lifting to the query, we need to reverse the pairs
let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect();
liftover_closest(reference_positions, &aligned_block_pairs)
}
fn lift_range(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
starts: &[i64],
ends: &[i64],
lift_reference_to_query: bool,
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
assert_eq!(starts.len(), ends.len());
let (ref_starts, ref_ends) = if !lift_reference_to_query {
(
lift_reference_positions(aligned_block_pairs, starts),
lift_reference_positions(aligned_block_pairs, ends),
)
} else {
(
lift_query_positions(aligned_block_pairs, starts),
lift_query_positions(aligned_block_pairs, ends),
)
};
assert_eq!(ref_starts.len(), ref_ends.len());
let rtn = ref_starts
.into_iter()
.zip(ref_ends.into_iter())
.map(|(start, end)| match (start, end) {
(Some(start), Some(end)) => {
if start == end {
(None, None, None)
} else {
(Some(start), Some(end), Some(end - start))
}
}
_ => (None, None, None),
})
.collect::<Vec<_>>();
multiunzip(rtn)
}
/// Find the closest range but hopefully better
pub fn lift_query_range(
record: &
|
is_sorted
|
identifier_name
|
lib.rs
|
_with_qual<T, U>(
left: &[T],
left_q: &[U],
right: &[T],
right_q: &[U],
) -> Vec<(T, U)>
where
T: Ord,
T: Clone,
U: Clone,
{
let l = left
.iter()
.zip(left_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let r = right
.iter()
.zip(right_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let mut x: Vec<(T, U)> = l.chain(r).collect();
x.sort_by_key(|(a, _b)| a.clone());
x
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence(
record: &bam::Record,
input_positions: &[i64],
) -> Vec<i64> {
// reverse positions if needed
let positions: Vec<i64> = if record.is_reverse() {
let seq_len = i64::try_from(record.seq_len()).unwrap();
input_positions
.iter()
.rev()
.map(|p| seq_len - p - 1)
.collect()
} else {
input_positions.to_vec()
};
positions
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence_in_place(
record: &bam::Record,
input_positions: &mut Vec<i64>,
part_of_range: bool,
) {
if !record.is_reverse() {
return;
}
let seq_len = i64::try_from(record.seq_len()).unwrap();
// need to correct for going from [) to (] if we are part of a range
let offset = if part_of_range { 0 } else { 1 };
for p in input_positions.iter_mut() {
*p = seq_len - *p - offset;
}
input_positions.reverse();
}
#[inline(always)]
pub fn is_sorted<T>(v: &[T]) -> bool
where
T: Ord,
{
v.windows(2).all(|w| w[0] <= w[1])
}
/// search a sorted array for insertions positions of another sorted array
/// returned index i satisfies
/// left
/// a\[i-1\] < v <= a\[i\]
/// right
/// a\[i-1\] <= v < a\[i\]
/// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html>
/// ```
/// use bamlift::*;
/// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10];
/// let v = vec![0, 1, 3, 4, 11, 11];
/// let indexes = search_sorted(&a, &v);
/// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]);
/// ```
pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize>
where
T: Ord,
T: Display,
[T]: Debug,
{
if !is_sorted(v) {
panic!("v is not sorted: {:?}", v);
}
let mut indexes = Vec::with_capacity(v.len());
let mut a_idx = 0;
for cur_v in v {
while a_idx < a.len() {
// check starting condition
if a_idx == 0 && *cur_v <= a[a_idx] {
indexes.push(0);
break;
} else if a_idx == 0 {
a_idx += 1;
}
// end condition
if a_idx == a.len() - 1 && *cur_v > a[a_idx] {
indexes.push(a_idx + 1);
break;
}
// middle of the array
else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) {
indexes.push(a_idx);
break;
}
a_idx += 1;
}
}
log::trace!("search_sorted: {:?}\n{:?}", v, indexes);
indexes
}
//
// CLOSEST LIFTOVER FUNCTIONS
//
/// this is a helper function for liftover_closest that should only be called from there
/// The exception for this is test cases, where it should be easier to test this function
/// directly.
fn liftover_closest(
positions: &[i64],
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
) -> Vec<Option<i64>> {
// skip empty
if positions.is_empty() {
return vec![];
}
if aligned_block_pairs.is_empty() {
return positions.iter().map(|_x| None).collect();
}
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
|
let ending_block = aligned_block_pairs.len();
let mut pos_mapping = HashMap::new();
for cur_pos in positions {
pos_mapping.insert(cur_pos, (-1, i64::MAX));
let mut current_block = 0;
for block_index in starting_block..ending_block {
// get the current alignment block
let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index];
// get the previous closest position
let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap();
// exact match found
if cur_pos >= &q_st && cur_pos < &q_en {
let dist_from_start = cur_pos - q_st;
*best_diff = 0;
*best_r_pos = r_st + dist_from_start;
break;
}
// we are before the start of the block
else if cur_pos < &q_st {
let diff = (q_st - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_st;
}
}
// we are past the end of the block
else if cur_pos >= &q_en {
let diff = (q_en - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_en;
}
// we don't need to return to previous blocks since the input is sorted
starting_block = current_block;
}
current_block += 1;
}
}
let mut rtn = vec![];
for q_pos in positions {
let (r_pos, diff) = pos_mapping.get(q_pos).unwrap();
if *r_pos == -1 && *diff == i64::MAX {
rtn.push(None);
} else {
rtn.push(Some(*r_pos));
}
}
assert_eq!(rtn.len(), positions.len());
rtn
}
/// find the closest reference positions for a list of query positions
pub fn lift_reference_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
query_positions: &[i64],
) -> Vec<Option<i64>> {
liftover_closest(query_positions, aligned_block_pairs)
}
/// find the closest query positions for a list of reference positions
pub fn lift_query_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
// if lifting to the query, we need to reverse the pairs
let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect();
liftover_closest(reference_positions, &aligned_block_pairs)
}
fn lift_range(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
starts: &[i64],
ends: &[i64],
lift_reference_to_query: bool,
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
assert_eq!(starts.len(), ends.len());
let (ref_starts, ref_ends) = if !lift_reference_to_query {
(
lift_reference_positions(aligned_block_pairs, starts),
lift_reference_positions(aligned_block_pairs, ends),
)
} else {
(
lift_query_positions(aligned_block_pairs, starts),
lift_query_positions(aligned_block_pairs, ends),
)
};
assert_eq!(ref_starts.len(), ref_ends.len());
let rtn = ref_starts
.into_iter()
.zip(ref_ends.into_iter())
.map(|(start, end)| match (start, end) {
(Some(start), Some(end)) => {
if start == end {
(None, None, None)
} else {
(Some(start), Some(end), Some(end - start))
}
}
_ => (None, None, None),
})
.collect::<Vec<_>>();
multiunzip(rtn)
}
/// Find the closest range but hopefully better
pub fn lift_query_range(
record: &
|
);
// find the closest position for every position
let mut starting_block = 0;
|
random_line_split
|
lib.rs
|
fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize>
where
T: Ord,
T: Display,
[T]: Debug,
{
if !is_sorted(v) {
panic!("v is not sorted: {:?}", v);
}
let mut indexes = Vec::with_capacity(v.len());
let mut a_idx = 0;
for cur_v in v {
while a_idx < a.len() {
// check starting condition
if a_idx == 0 && *cur_v <= a[a_idx] {
indexes.push(0);
break;
} else if a_idx == 0 {
a_idx += 1;
}
// end condition
if a_idx == a.len() - 1 && *cur_v > a[a_idx] {
indexes.push(a_idx + 1);
break;
}
// middle of the array
else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) {
indexes.push(a_idx);
break;
}
a_idx += 1;
}
}
log::trace!("search_sorted: {:?}\n{:?}", v, indexes);
indexes
}
//
// CLOSEST LIFTOVER FUNCTIONS
//
/// this is a helper function for liftover_closest that should only be called from there
/// The exception for this is test cases, where it should be easier to test this function
/// directly.
fn liftover_closest(
positions: &[i64],
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
) -> Vec<Option<i64>> {
// skip empty
if positions.is_empty() {
return vec![];
}
if aligned_block_pairs.is_empty() {
return positions.iter().map(|_x| None).collect();
}
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the closest position for every position
let mut starting_block = 0;
let ending_block = aligned_block_pairs.len();
let mut pos_mapping = HashMap::new();
for cur_pos in positions {
pos_mapping.insert(cur_pos, (-1, i64::MAX));
let mut current_block = 0;
for block_index in starting_block..ending_block {
// get the current alignment block
let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index];
// get the previous closest position
let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap();
// exact match found
if cur_pos >= &q_st && cur_pos < &q_en {
let dist_from_start = cur_pos - q_st;
*best_diff = 0;
*best_r_pos = r_st + dist_from_start;
break;
}
// we are before the start of the block
else if cur_pos < &q_st {
let diff = (q_st - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_st;
}
}
// we are past the end of the block
else if cur_pos >= &q_en {
let diff = (q_en - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_en;
}
// we don't need to return to previous blocks since the input is sorted
starting_block = current_block;
}
current_block += 1;
}
}
let mut rtn = vec![];
for q_pos in positions {
let (r_pos, diff) = pos_mapping.get(q_pos).unwrap();
if *r_pos == -1 && *diff == i64::MAX {
rtn.push(None);
} else {
rtn.push(Some(*r_pos));
}
}
assert_eq!(rtn.len(), positions.len());
rtn
}
/// find the closest reference positions for a list of query positions
pub fn lift_reference_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
query_positions: &[i64],
) -> Vec<Option<i64>> {
liftover_closest(query_positions, aligned_block_pairs)
}
/// find the closest query positions for a list of reference positions
pub fn lift_query_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
// if lifting to the query, we need to reverse the pairs
let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect();
liftover_closest(reference_positions, &aligned_block_pairs)
}
fn lift_range(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
starts: &[i64],
ends: &[i64],
lift_reference_to_query: bool,
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
assert_eq!(starts.len(), ends.len());
let (ref_starts, ref_ends) = if !lift_reference_to_query {
(
lift_reference_positions(aligned_block_pairs, starts),
lift_reference_positions(aligned_block_pairs, ends),
)
} else {
(
lift_query_positions(aligned_block_pairs, starts),
lift_query_positions(aligned_block_pairs, ends),
)
};
assert_eq!(ref_starts.len(), ref_ends.len());
let rtn = ref_starts
.into_iter()
.zip(ref_ends.into_iter())
.map(|(start, end)| match (start, end) {
(Some(start), Some(end)) => {
if start == end {
(None, None, None)
} else {
(Some(start), Some(end), Some(end - start))
}
}
_ => (None, None, None),
})
.collect::<Vec<_>>();
multiunzip(rtn)
}
/// Find the closest range but hopefully better
pub fn lift_query_range(
record: &bam::Record,
starts: &[i64],
ends: &[i64],
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
// get the aligned block pairs
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
lift_range(&aligned_block_pairs, starts, ends, false)
}
//
// EXACT LIFTOVER FUNCTIONS
//
/// liftover positions using the cigar string
fn liftover_exact(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
positions: &[i64],
lift_reference_to_query: bool,
) -> Vec<Option<i64>> {
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the shared positions in the reference
let mut return_positions = vec![];
let mut cur_idx = 0;
// ends are not inclusive, I checked.
for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs {
let (st, en) = if !lift_reference_to_query {
(q_st, q_en)
} else {
(r_st, r_en)
};
// check bounds
if cur_idx == positions.len() {
break;
}
let mut cur_pos = positions[cur_idx];
// need to go to the next block
while cur_pos < *en {
if cur_pos >= *st {
let dist_from_start = cur_pos - st;
let rtn_pos = if !lift_reference_to_query {
r_st + dist_from_start
} else {
q_st + dist_from_start
};
return_positions.push(Some(rtn_pos));
} else {
return_positions.push(None);
}
// reset current position
cur_idx += 1;
if cur_idx == positions.len() {
break;
}
cur_pos = positions[cur_idx];
}
}
// add values for things that won't lift at the end
while positions.len() > return_positions.len() {
return_positions.push(None);
}
assert_eq!(positions.len(), return_positions.len());
return_positions
}
pub fn lift_reference_positions_exact(
record: &bam::Record,
query_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() {
query_positions.iter().map(|_x| None).collect()
} else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, query_positions, false)
}
}
pub fn lift_query_positions_exact(
record: &bam::Record,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped()
|
{
reference_positions.iter().map(|_x| None).collect()
}
|
conditional_block
|
|
lib.rs
|
_qual<T, U>(
left: &[T],
left_q: &[U],
right: &[T],
right_q: &[U],
) -> Vec<(T, U)>
where
T: Ord,
T: Clone,
U: Clone,
{
let l = left
.iter()
.zip(left_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let r = right
.iter()
.zip(right_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let mut x: Vec<(T, U)> = l.chain(r).collect();
x.sort_by_key(|(a, _b)| a.clone());
x
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence(
record: &bam::Record,
input_positions: &[i64],
) -> Vec<i64>
|
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence_in_place(
record: &bam::Record,
input_positions: &mut Vec<i64>,
part_of_range: bool,
) {
if !record.is_reverse() {
return;
}
let seq_len = i64::try_from(record.seq_len()).unwrap();
// need to correct for going from [) to (] if we are part of a range
let offset = if part_of_range { 0 } else { 1 };
for p in input_positions.iter_mut() {
*p = seq_len - *p - offset;
}
input_positions.reverse();
}
#[inline(always)]
pub fn is_sorted<T>(v: &[T]) -> bool
where
T: Ord,
{
v.windows(2).all(|w| w[0] <= w[1])
}
/// search a sorted array for insertions positions of another sorted array
/// returned index i satisfies
/// left
/// a\[i-1\] < v <= a\[i\]
/// right
/// a\[i-1\] <= v < a\[i\]
/// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html>
/// ```
/// use bamlift::*;
/// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10];
/// let v = vec![0, 1, 3, 4, 11, 11];
/// let indexes = search_sorted(&a, &v);
/// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]);
/// ```
pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize>
where
T: Ord,
T: Display,
[T]: Debug,
{
if !is_sorted(v) {
panic!("v is not sorted: {:?}", v);
}
let mut indexes = Vec::with_capacity(v.len());
let mut a_idx = 0;
for cur_v in v {
while a_idx < a.len() {
// check starting condition
if a_idx == 0 && *cur_v <= a[a_idx] {
indexes.push(0);
break;
} else if a_idx == 0 {
a_idx += 1;
}
// end condition
if a_idx == a.len() - 1 && *cur_v > a[a_idx] {
indexes.push(a_idx + 1);
break;
}
// middle of the array
else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) {
indexes.push(a_idx);
break;
}
a_idx += 1;
}
}
log::trace!("search_sorted: {:?}\n{:?}", v, indexes);
indexes
}
//
// CLOSEST LIFTOVER FUNCTIONS
//
/// this is a helper function for liftover_closest that should only be called from there
/// The exception for this is test cases, where it should be easier to test this function
/// directly.
fn liftover_closest(
positions: &[i64],
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
) -> Vec<Option<i64>> {
// skip empty
if positions.is_empty() {
return vec![];
}
if aligned_block_pairs.is_empty() {
return positions.iter().map(|_x| None).collect();
}
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the closest position for every position
let mut starting_block = 0;
let ending_block = aligned_block_pairs.len();
let mut pos_mapping = HashMap::new();
for cur_pos in positions {
pos_mapping.insert(cur_pos, (-1, i64::MAX));
let mut current_block = 0;
for block_index in starting_block..ending_block {
// get the current alignment block
let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index];
// get the previous closest position
let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap();
// exact match found
if cur_pos >= &q_st && cur_pos < &q_en {
let dist_from_start = cur_pos - q_st;
*best_diff = 0;
*best_r_pos = r_st + dist_from_start;
break;
}
// we are before the start of the block
else if cur_pos < &q_st {
let diff = (q_st - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_st;
}
}
// we are past the end of the block
else if cur_pos >= &q_en {
let diff = (q_en - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_en;
}
// we don't need to return to previous blocks since the input is sorted
starting_block = current_block;
}
current_block += 1;
}
}
let mut rtn = vec![];
for q_pos in positions {
let (r_pos, diff) = pos_mapping.get(q_pos).unwrap();
if *r_pos == -1 && *diff == i64::MAX {
rtn.push(None);
} else {
rtn.push(Some(*r_pos));
}
}
assert_eq!(rtn.len(), positions.len());
rtn
}
/// find the closest reference positions for a list of query positions
pub fn lift_reference_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
query_positions: &[i64],
) -> Vec<Option<i64>> {
liftover_closest(query_positions, aligned_block_pairs)
}
/// find the closest query positions for a list of reference positions
pub fn lift_query_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
// if lifting to the query, we need to reverse the pairs
let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect();
liftover_closest(reference_positions, &aligned_block_pairs)
}
fn lift_range(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
starts: &[i64],
ends: &[i64],
lift_reference_to_query: bool,
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
assert_eq!(starts.len(), ends.len());
let (ref_starts, ref_ends) = if !lift_reference_to_query {
(
lift_reference_positions(aligned_block_pairs, starts),
lift_reference_positions(aligned_block_pairs, ends),
)
} else {
(
lift_query_positions(aligned_block_pairs, starts),
lift_query_positions(aligned_block_pairs, ends),
)
};
assert_eq!(ref_starts.len(), ref_ends.len());
let rtn = ref_starts
.into_iter()
.zip(ref_ends.into_iter())
.map(|(start, end)| match (start, end) {
(Some(start), Some(end)) => {
if start == end {
(None, None, None)
} else {
(Some(start), Some(end), Some(end - start))
}
}
_ => (None, None, None),
})
.collect::<Vec<_>>();
multiunzip(rtn)
}
/// Find the closest range but hopefully better
pub fn lift_query_range(
record:
|
{
// reverse positions if needed
let positions: Vec<i64> = if record.is_reverse() {
let seq_len = i64::try_from(record.seq_len()).unwrap();
input_positions
.iter()
.rev()
.map(|p| seq_len - p - 1)
.collect()
} else {
input_positions.to_vec()
};
positions
}
|
identifier_body
|
chown.rs
|
reference";
}
static ARG_OWNER: &str = "owner";
static ARG_FILES: &str = "files";
const FTS_COMFOLLOW: u8 = 1;
const FTS_PHYSICAL: u8 = 1 << 1;
const FTS_LOGICAL: u8 = 1 << 2;
fn get_usage() -> String {
format!(
"{0} [OPTION]... [OWNER][:[GROUP]] FILE...\n{0} [OPTION]... --reference=RFILE FILE...",
executable!()
)
}
#[uucore_procs::gen_uumain]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let args = args
.collect_str(InvalidEncodingHandling::Ignore)
.accept_any();
let usage = get_usage();
let matches = uu_app().usage(&usage[..]).get_matches_from(args);
/* First arg is the owner/group */
let owner = matches.value_of(ARG_OWNER).unwrap();
/* Then the list of files */
let files: Vec<String> = matches
.values_of(ARG_FILES)
.map(|v| v.map(ToString::to_string).collect())
.unwrap_or_default();
let preserve_root = matches.is_present(options::preserve_root::PRESERVE);
let mut derefer = if matches.is_present(options::dereference::NO_DEREFERENCE) {
1
} else {
0
};
let mut bit_flag = if matches.is_present(options::traverse::TRAVERSE) {
FTS_COMFOLLOW | FTS_PHYSICAL
} else if matches.is_present(options::traverse::EVERY) {
FTS_LOGICAL
} else {
FTS_PHYSICAL
};
let recursive = matches.is_present(options::RECURSIVE);
if recursive {
if bit_flag == FTS_PHYSICAL {
if derefer == 1 {
return Err(USimpleError::new(1, "-R --dereference requires -H or -L"));
}
derefer = 0;
}
} else {
bit_flag = FTS_PHYSICAL;
}
let verbosity = if matches.is_present(options::verbosity::CHANGES) {
Verbosity::Changes
} else if matches.is_present(options::verbosity::SILENT)
|| matches.is_present(options::verbosity::QUIET)
{
Verbosity::Silent
} else if matches.is_present(options::verbosity::VERBOSE) {
Verbosity::Verbose
} else {
Verbosity::Normal
};
let filter = if let Some(spec) = matches.value_of(options::FROM) {
match parse_spec(spec)? {
(Some(uid), None) => IfFrom::User(uid),
(None, Some(gid)) => IfFrom::Group(gid),
(Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid),
(None, None) => IfFrom::All,
}
} else {
IfFrom::All
};
let dest_uid: Option<u32>;
let dest_gid: Option<u32>;
if let Some(file) = matches.value_of(options::REFERENCE) {
let meta = fs::metadata(&file)
.map_err_context(|| format!("failed to get attributes of '{}'", file))?;
dest_gid = Some(meta.gid());
dest_uid = Some(meta.uid());
} else {
let (u, g) = parse_spec(owner)?;
dest_uid = u;
dest_gid = g;
}
let executor = Chowner {
bit_flag,
dest_uid,
dest_gid,
verbosity,
recursive,
dereference: derefer != 0,
filter,
preserve_root,
files,
};
executor.exec()
}
pub fn uu_app() -> App<'static, 'static> {
App::new(executable!())
.version(crate_version!())
.about(ABOUT)
.arg(
Arg::with_name(options::verbosity::CHANGES)
.short("c")
.long(options::verbosity::CHANGES)
.help("like verbose but report only when a change is made"),
)
.arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help(
"affect the referent of each symbolic link (this is the default), rather than the symbolic link itself",
))
.arg(
Arg::with_name(options::dereference::NO_DEREFERENCE)
.short("h")
.long(options::dereference::NO_DEREFERENCE)
.help(
"affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)",
),
)
.arg(
Arg::with_name(options::FROM)
.long(options::FROM)
.help(
"change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute",
)
.value_name("CURRENT_OWNER:CURRENT_GROUP"),
)
.arg(
Arg::with_name(options::preserve_root::PRESERVE)
.long(options::preserve_root::PRESERVE)
.help("fail to operate recursively on '/'"),
)
.arg(
Arg::with_name(options::preserve_root::NO_PRESERVE)
.long(options::preserve_root::NO_PRESERVE)
.help("do not treat '/' specially (the default)"),
)
.arg(
Arg::with_name(options::verbosity::QUIET)
.long(options::verbosity::QUIET)
.help("suppress most error messages"),
)
.arg(
Arg::with_name(options::RECURSIVE)
.short("R")
.long(options::RECURSIVE)
.help("operate on files and directories recursively"),
)
.arg(
Arg::with_name(options::REFERENCE)
.long(options::REFERENCE)
.help("use RFILE's owner and group rather than specifying OWNER:GROUP values")
.value_name("RFILE")
.min_values(1),
)
.arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT))
.arg(
Arg::with_name(options::traverse::TRAVERSE)
.short(options::traverse::TRAVERSE)
.help("if a command line argument is a symbolic link to a directory, traverse it")
.overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]),
)
.arg(
Arg::with_name(options::traverse::EVERY)
.short(options::traverse::EVERY)
.help("traverse every symbolic link to a directory encountered")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]),
)
.arg(
Arg::with_name(options::traverse::NO_TRAVERSE)
.short(options::traverse::NO_TRAVERSE)
.help("do not traverse any symbolic links (default)")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]),
)
.arg(
Arg::with_name(options::verbosity::VERBOSE)
.long(options::verbosity::VERBOSE)
.help("output a diagnostic for every file processed"),
)
.arg(
Arg::with_name(ARG_OWNER)
.multiple(false)
.takes_value(true)
.required(true),
)
.arg(
Arg::with_name(ARG_FILES)
.multiple(true)
.takes_value(true)
.required(true)
.min_values(1),
)
}
fn
|
(spec: &str) -> UResult<(Option<u32>, Option<u32>)> {
let args = spec.split_terminator(':').collect::<Vec<_>>();
let usr_only = args.len() == 1 && !args[0].is_empty();
let grp_only = args.len() == 2 && args[0].is_empty();
let usr_grp = args.len() == 2 && !args[0].is_empty() && !args[1].is_empty();
let uid = if usr_only || usr_grp {
Some(
Passwd::locate(args[0])
.map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))?
.uid(),
)
} else {
None
};
let gid = if grp_only || usr_grp {
Some(
Group::locate(args[1])
.map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))?
.gid(),
)
} else {
None
};
Ok((uid, gid))
}
enum IfFrom {
All,
User(u32),
Group(u32),
UserGroup(u32, u32),
}
struct Chowner {
dest_uid: Option<u32>,
dest_gid: Option<u32>,
bit_flag: u8,
verbosity: Verbosity,
filter: IfFrom,
files: Vec<String>,
recursive: bool,
preserve_root: bool,
dereference: bool,
}
macro_rules! unwrap {
($m:expr, $e:ident, $err:block) => {
match $m {
Ok(meta) => meta,
Err($
|
parse_spec
|
identifier_name
|
chown.rs
|
reference";
}
static ARG_OWNER: &str = "owner";
static ARG_FILES: &str = "files";
const FTS_COMFOLLOW: u8 = 1;
const FTS_PHYSICAL: u8 = 1 << 1;
const FTS_LOGICAL: u8 = 1 << 2;
fn get_usage() -> String {
format!(
"{0} [OPTION]... [OWNER][:[GROUP]] FILE...\n{0} [OPTION]... --reference=RFILE FILE...",
executable!()
)
}
#[uucore_procs::gen_uumain]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let args = args
.collect_str(InvalidEncodingHandling::Ignore)
.accept_any();
let usage = get_usage();
let matches = uu_app().usage(&usage[..]).get_matches_from(args);
/* First arg is the owner/group */
let owner = matches.value_of(ARG_OWNER).unwrap();
/* Then the list of files */
let files: Vec<String> = matches
.values_of(ARG_FILES)
.map(|v| v.map(ToString::to_string).collect())
.unwrap_or_default();
let preserve_root = matches.is_present(options::preserve_root::PRESERVE);
let mut derefer = if matches.is_present(options::dereference::NO_DEREFERENCE) {
1
} else {
0
};
let mut bit_flag = if matches.is_present(options::traverse::TRAVERSE) {
FTS_COMFOLLOW | FTS_PHYSICAL
} else if matches.is_present(options::traverse::EVERY) {
FTS_LOGICAL
} else {
FTS_PHYSICAL
};
let recursive = matches.is_present(options::RECURSIVE);
if recursive {
if bit_flag == FTS_PHYSICAL {
if derefer == 1 {
return Err(USimpleError::new(1, "-R --dereference requires -H or -L"));
}
derefer = 0;
}
} else {
bit_flag = FTS_PHYSICAL;
}
let verbosity = if matches.is_present(options::verbosity::CHANGES) {
Verbosity::Changes
} else if matches.is_present(options::verbosity::SILENT)
|| matches.is_present(options::verbosity::QUIET)
{
Verbosity::Silent
} else if matches.is_present(options::verbosity::VERBOSE) {
Verbosity::Verbose
} else {
Verbosity::Normal
};
let filter = if let Some(spec) = matches.value_of(options::FROM) {
match parse_spec(spec)? {
(Some(uid), None) => IfFrom::User(uid),
(None, Some(gid)) => IfFrom::Group(gid),
(Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid),
(None, None) => IfFrom::All,
}
} else {
IfFrom::All
};
let dest_uid: Option<u32>;
let dest_gid: Option<u32>;
if let Some(file) = matches.value_of(options::REFERENCE) {
let meta = fs::metadata(&file)
.map_err_context(|| format!("failed to get attributes of '{}'", file))?;
dest_gid = Some(meta.gid());
dest_uid = Some(meta.uid());
} else {
let (u, g) = parse_spec(owner)?;
dest_uid = u;
dest_gid = g;
}
let executor = Chowner {
bit_flag,
dest_uid,
dest_gid,
verbosity,
recursive,
dereference: derefer != 0,
filter,
preserve_root,
files,
};
executor.exec()
}
pub fn uu_app() -> App<'static, 'static> {
App::new(executable!())
.version(crate_version!())
.about(ABOUT)
.arg(
Arg::with_name(options::verbosity::CHANGES)
.short("c")
.long(options::verbosity::CHANGES)
.help("like verbose but report only when a change is made"),
)
.arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help(
"affect the referent of each symbolic link (this is the default), rather than the symbolic link itself",
))
.arg(
Arg::with_name(options::dereference::NO_DEREFERENCE)
.short("h")
.long(options::dereference::NO_DEREFERENCE)
.help(
"affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)",
),
)
.arg(
Arg::with_name(options::FROM)
.long(options::FROM)
.help(
"change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute",
)
.value_name("CURRENT_OWNER:CURRENT_GROUP"),
)
.arg(
Arg::with_name(options::preserve_root::PRESERVE)
.long(options::preserve_root::PRESERVE)
.help("fail to operate recursively on '/'"),
)
.arg(
Arg::with_name(options::preserve_root::NO_PRESERVE)
.long(options::preserve_root::NO_PRESERVE)
.help("do not treat '/' specially (the default)"),
)
.arg(
Arg::with_name(options::verbosity::QUIET)
.long(options::verbosity::QUIET)
.help("suppress most error messages"),
)
.arg(
Arg::with_name(options::RECURSIVE)
.short("R")
.long(options::RECURSIVE)
.help("operate on files and directories recursively"),
)
.arg(
Arg::with_name(options::REFERENCE)
.long(options::REFERENCE)
.help("use RFILE's owner and group rather than specifying OWNER:GROUP values")
.value_name("RFILE")
.min_values(1),
)
.arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT))
.arg(
Arg::with_name(options::traverse::TRAVERSE)
.short(options::traverse::TRAVERSE)
.help("if a command line argument is a symbolic link to a directory, traverse it")
.overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]),
)
|
)
.arg(
Arg::with_name(options::traverse::NO_TRAVERSE)
.short(options::traverse::NO_TRAVERSE)
.help("do not traverse any symbolic links (default)")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]),
)
.arg(
Arg::with_name(options::verbosity::VERBOSE)
.long(options::verbosity::VERBOSE)
.help("output a diagnostic for every file processed"),
)
.arg(
Arg::with_name(ARG_OWNER)
.multiple(false)
.takes_value(true)
.required(true),
)
.arg(
Arg::with_name(ARG_FILES)
.multiple(true)
.takes_value(true)
.required(true)
.min_values(1),
)
}
fn parse_spec(spec: &str) -> UResult<(Option<u32>, Option<u32>)> {
let args = spec.split_terminator(':').collect::<Vec<_>>();
let usr_only = args.len() == 1 && !args[0].is_empty();
let grp_only = args.len() == 2 && args[0].is_empty();
let usr_grp = args.len() == 2 && !args[0].is_empty() && !args[1].is_empty();
let uid = if usr_only || usr_grp {
Some(
Passwd::locate(args[0])
.map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))?
.uid(),
)
} else {
None
};
let gid = if grp_only || usr_grp {
Some(
Group::locate(args[1])
.map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))?
.gid(),
)
} else {
None
};
Ok((uid, gid))
}
enum IfFrom {
All,
User(u32),
Group(u32),
UserGroup(u32, u32),
}
struct Chowner {
dest_uid: Option<u32>,
dest_gid: Option<u32>,
bit_flag: u8,
verbosity: Verbosity,
filter: IfFrom,
files: Vec<String>,
recursive: bool,
preserve_root: bool,
dereference: bool,
}
macro_rules! unwrap {
($m:expr, $e:ident, $err:block) => {
match $m {
Ok(meta) => meta,
Err($e
|
.arg(
Arg::with_name(options::traverse::EVERY)
.short(options::traverse::EVERY)
.help("traverse every symbolic link to a directory encountered")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]),
|
random_line_split
|
chown.rs
|
.uid());
} else {
let (u, g) = parse_spec(owner)?;
dest_uid = u;
dest_gid = g;
}
let executor = Chowner {
bit_flag,
dest_uid,
dest_gid,
verbosity,
recursive,
dereference: derefer != 0,
filter,
preserve_root,
files,
};
executor.exec()
}
pub fn uu_app() -> App<'static, 'static> {
App::new(executable!())
.version(crate_version!())
.about(ABOUT)
.arg(
Arg::with_name(options::verbosity::CHANGES)
.short("c")
.long(options::verbosity::CHANGES)
.help("like verbose but report only when a change is made"),
)
.arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help(
"affect the referent of each symbolic link (this is the default), rather than the symbolic link itself",
))
.arg(
Arg::with_name(options::dereference::NO_DEREFERENCE)
.short("h")
.long(options::dereference::NO_DEREFERENCE)
.help(
"affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)",
),
)
.arg(
Arg::with_name(options::FROM)
.long(options::FROM)
.help(
"change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute",
)
.value_name("CURRENT_OWNER:CURRENT_GROUP"),
)
.arg(
Arg::with_name(options::preserve_root::PRESERVE)
.long(options::preserve_root::PRESERVE)
.help("fail to operate recursively on '/'"),
)
.arg(
Arg::with_name(options::preserve_root::NO_PRESERVE)
.long(options::preserve_root::NO_PRESERVE)
.help("do not treat '/' specially (the default)"),
)
.arg(
Arg::with_name(options::verbosity::QUIET)
.long(options::verbosity::QUIET)
.help("suppress most error messages"),
)
.arg(
Arg::with_name(options::RECURSIVE)
.short("R")
.long(options::RECURSIVE)
.help("operate on files and directories recursively"),
)
.arg(
Arg::with_name(options::REFERENCE)
.long(options::REFERENCE)
.help("use RFILE's owner and group rather than specifying OWNER:GROUP values")
.value_name("RFILE")
.min_values(1),
)
.arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT))
.arg(
Arg::with_name(options::traverse::TRAVERSE)
.short(options::traverse::TRAVERSE)
.help("if a command line argument is a symbolic link to a directory, traverse it")
.overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]),
)
.arg(
Arg::with_name(options::traverse::EVERY)
.short(options::traverse::EVERY)
.help("traverse every symbolic link to a directory encountered")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]),
)
.arg(
Arg::with_name(options::traverse::NO_TRAVERSE)
.short(options::traverse::NO_TRAVERSE)
.help("do not traverse any symbolic links (default)")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]),
)
.arg(
Arg::with_name(options::verbosity::VERBOSE)
.long(options::verbosity::VERBOSE)
.help("output a diagnostic for every file processed"),
)
.arg(
Arg::with_name(ARG_OWNER)
.multiple(false)
.takes_value(true)
.required(true),
)
.arg(
Arg::with_name(ARG_FILES)
.multiple(true)
.takes_value(true)
.required(true)
.min_values(1),
)
}
fn parse_spec(spec: &str) -> UResult<(Option<u32>, Option<u32>)> {
let args = spec.split_terminator(':').collect::<Vec<_>>();
let usr_only = args.len() == 1 && !args[0].is_empty();
let grp_only = args.len() == 2 && args[0].is_empty();
let usr_grp = args.len() == 2 && !args[0].is_empty() && !args[1].is_empty();
let uid = if usr_only || usr_grp {
Some(
Passwd::locate(args[0])
.map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))?
.uid(),
)
} else {
None
};
let gid = if grp_only || usr_grp {
Some(
Group::locate(args[1])
.map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))?
.gid(),
)
} else {
None
};
Ok((uid, gid))
}
enum IfFrom {
All,
User(u32),
Group(u32),
UserGroup(u32, u32),
}
struct Chowner {
dest_uid: Option<u32>,
dest_gid: Option<u32>,
bit_flag: u8,
verbosity: Verbosity,
filter: IfFrom,
files: Vec<String>,
recursive: bool,
preserve_root: bool,
dereference: bool,
}
macro_rules! unwrap {
($m:expr, $e:ident, $err:block) => {
match $m {
Ok(meta) => meta,
Err($e) => $err,
}
};
}
impl Chowner {
fn exec(&self) -> UResult<()> {
let mut ret = 0;
for f in &self.files {
ret |= self.traverse(f);
}
if ret != 0 {
return Err(ret.into());
}
Ok(())
}
fn traverse<P: AsRef<Path>>(&self, root: P) -> i32 {
let follow_arg = self.dereference || self.bit_flag != FTS_PHYSICAL;
let path = root.as_ref();
let meta = match self.obtain_meta(path, follow_arg) {
Some(m) => m,
_ => return 1,
};
// Prohibit only if:
// (--preserve-root and -R present) &&
// (
// (argument is not symlink && resolved to be '/') ||
// (argument is symlink && should follow argument && resolved to be '/')
// )
if self.recursive && self.preserve_root {
let may_exist = if follow_arg {
path.canonicalize().ok()
} else {
let real = resolve_relative_path(path);
if real.is_dir() {
Some(real.canonicalize().expect("failed to get real path"))
} else {
Some(real.into_owned())
}
};
if let Some(p) = may_exist {
if p.parent().is_none() {
show_error!("it is dangerous to operate recursively on '/'");
show_error!("use --no-preserve-root to override this failsafe");
return 1;
}
}
}
let ret = if self.matched(meta.uid(), meta.gid()) {
match wrap_chown(
path,
&meta,
self.dest_uid,
self.dest_gid,
follow_arg,
self.verbosity.clone(),
) {
Ok(n) => {
if !n.is_empty() {
show_error!("{}", n);
}
0
}
Err(e) => {
if self.verbosity != Verbosity::Silent {
show_error!("{}", e);
}
1
}
}
} else {
0
};
if !self.recursive {
ret
} else {
ret | self.dive_into(&root)
}
}
fn dive_into<P: AsRef<Path>>(&self, root: P) -> i32 {
let mut ret = 0;
let root = root.as_ref();
let follow = self.dereference || self.bit_flag & FTS_LOGICAL != 0;
for entry in WalkDir::new(root).follow_links(follow).min_depth(1) {
let entry = unwrap!(entry, e, {
ret = 1;
show_error!("{}", e);
continue;
});
let path = entry.path();
let meta = match self.obtain_meta(path, follow) {
Some(m) => m,
_ => {
ret = 1;
continue;
}
};
if !self.matched(meta.uid(), meta.gid()) {
continue;
}
ret = match wrap_chown(
path,
&meta,
self.dest_uid,
self.dest_gid,
follow,
self.verbosity.clone(),
) {
Ok(n) =>
|
{
if !n.is_empty() {
show_error!("{}", n);
}
0
}
|
conditional_block
|
|
bls12_377_scalar.rs
|
= -|F|^-1 mod 2^64.
const MU: u64 = 725501752471715839;
pub fn from_canonical(c: [u64; 4]) -> Self {
// We compute M(c, R^2) = c * R^2 * R^-1 = c * R.
Self { limbs: Self::montgomery_multiply(c, Self::R2) }
}
pub fn to_canonical(&self) -> [u64; 4] {
// Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x.
Self::montgomery_multiply(self.limbs, [1, 0, 0, 0])
}
#[unroll_for_loops]
fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] {
// Interleaved Montgomery multiplication, as described in Algorithm 2 of
// https://eprint.iacr.org/2017/1057.pdf
// Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least
// significant digit and wrap around.
let mut c = [0u64; 5];
for i in 0..4 {
// Add a[i] b to c.
let mut carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
// q = u c mod r = u c[0] mod r.
let q = Self::MU.wrapping_mul(c[i]);
// C += N q
carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
debug_assert_eq!(c[i], 0);
}
let mut result = [c[4], c[0], c[1], c[2]];
// Final conditional subtraction.
if cmp(result, Self::ORDER) != Less {
result = sub(result, Self::ORDER);
}
result
}
}
impl Add<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn add(self, rhs: Self) -> Self {
// First we do a widening addition, then we reduce if necessary.
let sum = add_no_overflow(self.limbs, rhs.limbs);
let limbs = if cmp(sum, Self::ORDER) == Less {
sum
} else {
sub(sum, Self::ORDER)
};
Self { limbs }
}
}
impl Sub<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
let limbs = if cmp(self.limbs, rhs.limbs) == Less {
// Underflow occurs, so we compute the difference as `self + (-rhs)`.
add_no_overflow(self.limbs, (-rhs).limbs)
} else {
// No underflow, so it's faster to subtract directly.
sub(self.limbs, rhs.limbs)
};
Self { limbs }
}
}
impl Mul<Self> for Bls12377Scalar {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) }
}
}
impl Div<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn div(self, rhs: Self) -> Self {
self * rhs.multiplicative_inverse().expect("No inverse")
}
}
impl Neg for Bls12377Scalar {
type Output = Self;
fn neg(self) -> Self {
if self == Self::ZERO {
Self::ZERO
} else {
|
}
}
impl Field for Bls12377Scalar {
const BITS: usize = 253;
const BYTES: usize = 32;
const ZERO: Self = Self { limbs: [0; 4] };
const ONE: Self = Self { limbs: Self::R };
const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] };
const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] };
const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] };
const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] };
const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] };
const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
/// x^11 is a permutation in this field.
const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
const TWO_ADICITY: usize = 47;
/// 60001509534603559531609739528203892656505753216962260608619555
const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] };
fn to_canonical_u64_vec(&self) -> Vec<u64> {
self.to_canonical().to_vec()
}
fn from_canonical_u64_vec(v: Vec<u64
|
Self { limbs: sub(Self::ORDER, self.limbs) }
}
|
conditional_block
|
bls12_377_scalar.rs
|
94542470912991159, 1141836277676842951] };
const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] };
const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] };
const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
/// x^11 is a permutation in this field.
const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
const TWO_ADICITY: usize = 47;
/// 60001509534603559531609739528203892656505753216962260608619555
const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] };
fn to_canonical_u64_vec(&self) -> Vec<u64> {
self.to_canonical().to_vec()
}
fn from_canonical_u64_vec(v: Vec<u64>) -> Self {
Self::from_canonical(v[..].try_into().unwrap())
}
fn from_canonical_u64(n: u64) -> Self {
Self::from_canonical([n, 0, 0, 0])
}
fn is_valid_canonical_u64(v: &[u64]) -> bool {
v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less
}
fn multiplicative_inverse_assuming_nonzero(&self) -> Self {
// Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R.
let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER);
Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) }
}
fn rand() -> Self {
Self {
limbs: rand_range(Self::ORDER),
}
}
fn rand_from_rng<R: Rng>(rng: &mut R) -> Self {
Self {
limbs: rand_range_from_rng(Self::ORDER, rng),
}
}
}
impl Ord for Bls12377Scalar {
fn cmp(&self, other: &Self) -> Ordering {
self.cmp_helper(other)
}
}
impl PartialOrd for Bls12377Scalar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Display for Bls12377Scalar {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
field_to_biguint(*self).fmt(f)
}
}
#[cfg(test)]
mod tests {
use crate::{Bls12377Scalar, Field};
use crate::conversions::u64_slice_to_biguint;
use crate::test_arithmetic;
#[test]
fn bls12scalar_to_and_from_canonical() {
let a = [1, 2, 3, 4];
let a_biguint = u64_slice_to_biguint(&a);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R);
let a_bls12scalar = Bls12377Scalar::from_canonical(a);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs),
&a_biguint * &r_biguint % &order_biguint);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint);
}
#[test]
fn mul_bls12_scalar() {
let a = [1, 2, 3, 4];
let b = [3, 4, 5, 6];
let a_biguint = u64_slice_to_biguint(&a);
let b_biguint = u64_slice_to_biguint(&b);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let a_blsbase = Bls12377Scalar::from_canonical(a);
let b_blsbase = Bls12377Scalar::from_canonical(b);
assert_eq!(
u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()),
a_biguint * b_biguint % order_biguint);
}
#[test]
fn test_bls12_rand() {
let random_element = Bls12377Scalar::rand();
for i in 0..4 {
assert_ne!(random_element.limbs[i], 0x0);
}
}
#[test]
fn exp() {
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9));
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27));
}
#[test]
fn negation() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO);
}
}
#[test]
fn multiplicative_inverse() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
let i_inv_blsscalar = i_blsscalar.multiplicative_inverse();
if i == 0 {
assert!(i_inv_blsscalar.is_none());
} else {
assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE);
}
}
}
#[test]
fn batch_multiplicative_inverse() {
let mut x = Vec::new();
for i in 1..25 {
x.push(Bls12377Scalar::from_canonical_u64(i));
}
let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x);
assert_eq!(x.len(), x_inv.len());
for (x_i, x_i_inv) in x.into_iter().zip(x_inv) {
assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE);
}
}
#[test]
fn n
|
um_bits(
|
identifier_name
|
|
bls12_377_scalar.rs
|
= -|F|^-1 mod 2^64.
const MU: u64 = 725501752471715839;
pub fn from_canonical(c: [u64; 4]) -> Self {
// We compute M(c, R^2) = c * R^2 * R^-1 = c * R.
Self { limbs: Self::montgomery_multiply(c, Self::R2) }
}
pub fn to_canonical(&self) -> [u64; 4] {
// Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x.
Self::montgomery_multiply(self.limbs, [1, 0, 0, 0])
}
#[unroll_for_loops]
fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] {
// Interleaved Montgomery multiplication, as described in Algorithm 2 of
// https://eprint.iacr.org/2017/1057.pdf
// Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least
// significant digit and wrap around.
let mut c = [0u64; 5];
for i in 0..4 {
// Add a[i] b to c.
let mut carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
// q = u c mod r = u c[0] mod r.
let q = Self::MU.wrapping_mul(c[i]);
// C += N q
carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
debug_assert_eq!(c[i], 0);
}
let mut result = [c[4], c[0], c[1], c[2]];
// Final conditional subtraction.
if cmp(result, Self::ORDER) != Less {
result = sub(result, Self::ORDER);
}
result
}
}
impl Add<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn add(self, rhs: Self) -> Self {
// First we do a widening addition, then we reduce if necessary.
let sum = add_no_overflow(self.limbs, rhs.limbs);
let limbs = if cmp(sum, Self::ORDER) == Less {
sum
} else {
sub(sum, Self::ORDER)
};
Self { limbs }
}
}
impl Sub<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
let limbs = if cmp(self.limbs, rhs.limbs) == Less {
// Underflow occurs, so we compute the difference as `self + (-rhs)`.
add_no_overflow(self.limbs, (-rhs).limbs)
} else {
// No underflow, so it's faster to subtract directly.
sub(self.limbs, rhs.limbs)
};
Self { limbs }
}
}
impl Mul<Self> for Bls12377Scalar {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) }
|
}
impl Div<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn div(self, rhs: Self) -> Self {
self * rhs.multiplicative_inverse().expect("No inverse")
}
}
impl Neg for Bls12377Scalar {
type Output = Self;
fn neg(self) -> Self {
if self == Self::ZERO {
Self::ZERO
} else {
Self { limbs: sub(Self::ORDER, self.limbs) }
}
}
}
impl Field for Bls12377Scalar {
const BITS: usize = 253;
const BYTES: usize = 32;
const ZERO: Self = Self { limbs: [0; 4] };
const ONE: Self = Self { limbs: Self::R };
const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] };
const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] };
const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] };
const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] };
const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] };
const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
/// x^11 is a permutation in this field.
const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
const TWO_ADICITY: usize = 47;
/// 60001509534603559531609739528203892656505753216962260608619555
const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] };
fn to_canonical_u64_vec(&self) -> Vec<u64> {
self.to_canonical().to_vec()
}
fn from_canonical_u64_vec(v: Vec<u64>)
|
}
|
random_line_split
|
bls12_377_scalar.rs
|
= -|F|^-1 mod 2^64.
const MU: u64 = 725501752471715839;
pub fn from_canonical(c: [u64; 4]) -> Self {
// We compute M(c, R^2) = c * R^2 * R^-1 = c * R.
Self { limbs: Self::montgomery_multiply(c, Self::R2) }
}
pub fn to_canonical(&self) -> [u64; 4] {
// Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x.
Self::montgomery_multiply(self.limbs, [1, 0, 0, 0])
}
#[unroll_for_loops]
fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] {
// Interleaved Montgomery multiplication, as described in Algorithm 2 of
// https://eprint.iacr.org/2017/1057.pdf
// Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least
// significant digit and wrap around.
let mut c = [0u64; 5];
for i in 0..4 {
// Add a[i] b to c.
let mut carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
// q = u c mod r = u c[0] mod r.
let q = Self::MU.wrapping_mul(c[i]);
// C += N q
carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
debug_assert_eq!(c[i], 0);
}
let mut result = [c[4], c[0], c[1], c[2]];
// Final conditional subtraction.
if cmp(result, Self::ORDER) != Less {
result = sub(result, Self::ORDER);
}
result
}
}
impl Add<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn add(self, rhs: Self) -> Self {
// First we do a widening addition, then we reduce if necessary.
let sum = add_no_overflow(self.limbs, rhs.limbs);
let limbs = if cmp(sum, Self::ORDER) == Less {
sum
} else {
sub(sum, Self::ORDER)
};
Self { limbs }
}
}
impl Sub<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
|
}
impl Mul<Self> for Bls12377Scalar {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) }
}
}
impl Div<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn div(self, rhs: Self) -> Self {
self * rhs.multiplicative_inverse().expect("No inverse")
}
}
impl Neg for Bls12377Scalar {
type Output = Self;
fn neg(self) -> Self {
if self == Self::ZERO {
Self::ZERO
} else {
Self { limbs: sub(Self::ORDER, self.limbs) }
}
}
}
impl Field for Bls12377Scalar {
const BITS: usize = 253;
const BYTES: usize = 32;
const ZERO: Self = Self { limbs: [0; 4] };
const ONE: Self = Self { limbs: Self::R };
const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] };
const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] };
const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] };
const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] };
const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] };
const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
/// x^11 is a permutation in this field.
const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
const TWO_ADICITY: usize = 47;
/// 60001509534603559531609739528203892656505753216962260608619555
const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] };
fn to_canonical_u64_vec(&self) -> Vec<u64> {
self.to_canonical().to_vec()
}
fn from_canonical_u64_vec(v: Vec<u64
|
let limbs = if cmp(self.limbs, rhs.limbs) == Less {
// Underflow occurs, so we compute the difference as `self + (-rhs)`.
add_no_overflow(self.limbs, (-rhs).limbs)
} else {
// No underflow, so it's faster to subtract directly.
sub(self.limbs, rhs.limbs)
};
Self { limbs }
}
|
identifier_body
|
poisson_kriging.py
|
functions
def set_params(self, model,
joined_datasets, population_series, centroids_dataset,
id_col, val_col, pop_col,
lags_number, lag_step_size,
min_no_of_observations, search_radius):
self.model = model
self.joined_datasets = joined_datasets
self.total_population_per_unit = population_series
self.centroids_of_areal_data = centroids_dataset
self.id_col = id_col
self.val_col = val_col
self.pop_col = pop_col
self.lags = lags_number
self.step = lag_step_size
self.min_no_of_observations = min_no_of_observations
self.max_search_radius = search_radius
print('Parameters have been set')
def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid,
weighted=False, verbose=False):
"""
Function prepares data from unknown locations for Poisson Kriging.
:param unknown_areal_data: PKData object (row) with areal and population data.
:param weighted: distances weighted by population (True) or not (False),
:param verbose: if True then method informs about the successful operation.
:return prediction: prepared dataset which contains:
[[x, y, value, known_area_id, distance_to_unknown_position], [...]],
"""
areal_id = unknown_areal_data_centroid[0][-1]
cx_cy = unknown_areal_data_centroid[0][:2]
r = np.array([cx_cy])
known_centroids = self.centroids_of_areal_data
kc = known_centroids[:, :2]
# Build set for Poisson Kriging
if weighted:
weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row,
areal_id)
s = []
for wd in weighted_distances:
for k in known_centroids:
if wd[1] in k:
s.append(wd[0])
break
else:
pass
s = np.array(s).T
kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn]
else:
distances_array = np.zeros(kc.shape)
for i in range(0, r.shape[1]):
distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2
s = distances_array.sum(axis=1)
s = np.sqrt(s)
s = s.T
kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn]
# remove nans
kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)]
# sort by distance
kriging_data = kriging_data[kriging_data[:, -1].argsort()]
# set output by distance params
# search radius
max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius)
output_data = kriging_data[:max_search_pos]
# check number of observations
if len(output_data) < self.min_no_of_observations:
output_data = kriging_data[:self.min_no_of_observations]
# TODO: info to the app logs
# print('Dataset has been set based on the minimum number of observations')
# set final dataset
self.prepared_data = output_data
if verbose:
print('Predictions data prepared')
def normalize_weights(self, weights, estimated_value, kriging_type):
"""
Algorithm for weight normalization to remove negative weights of the points which are
clustering. Derived from Deutsch, C.V., Correcting for negative weights in
ordinary kriging, Computers & Geosciences, Vol. 22, No. 7, pp. 765-773, 1996.
:param: weights - weights matrix calculated with "normal" kriging procedure,
:param: estimated_value - value estimated for a given, unknown point.
:return: weight_matrix - normalized weight matrix where negative weights are removed and
matrix is scalled to give a sum of all elements equal to 1.
"""
if kriging_type == 'ord':
weight_matrix = weights[:-1].copy()
output_matrix = weights[:-1].copy()
elif kriging_type == 'sim':
weight_matrix = weights.copy()
output_matrix = weights.copy()
else:
print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.')
weight_matrix = weights.copy()
output_matrix = weights.copy()
###### Calculate average covariance between the location being ######
###### estimated and the locations with negative weights ######
locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0
locs = locs[:, 0]
# Calculate covariance between those points and unknown point
if len(locs) >= 1:
c = []
mu = 0
for i in locs:
_c = estimated_value * self.prepared_data[i, 2]
mu = mu + estimated_value + self.prepared_data[i, 2]
c.append(_c)
output_matrix[i, 0] = 0
mu = mu / len(c)
cov = np.sum(c) / len(c) - mu * mu
###### Calculate absolute magnitude of the negative weights #####
w = weight_matrix[weight_matrix < 0]
w = w.T
magnitude = np.sum(np.abs(w)) / len(w)
###### Test values greater than 0 and check if they need to be
###### rescaled to 0 ######
###### if weight > 0 and Covariance between unknown point and known
###### point is less than the average covariance between the location
###### being estimated and the locations with negative weights and
###### and weight is less than absolute magnitude of the negative
###### weights then set weight to zero #####
positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0
positive_locs = positive_locs[:, 0]
for j in positive_locs:
cov_est = (estimated_value * self.prepared_data[j, 2]) / 2
mu = (estimated_value + self.prepared_data[j, 2]) / 2
cov_est = cov_est - mu * mu
if cov_est < cov:
if weight_matrix[j, 0] < magnitude:
output_matrix[j, 0] = 0
###### Normalize weight matrix to get a sum of all elements equal to 1 ######
output_matrix = output_matrix / np.sum(output_matrix)
return output_matrix
else:
return weights
# Data processing private class methods
def _calculate_weighted_distances(self, unknown_area, unknown_area_id):
"""Function calculates weighted distances between unknown area and known areas"""
dist_dict = self._prepare_distances_dict(unknown_area)
base_area = dist_dict[unknown_area_id]
base_area_list = base_area['coordinates']
other_keys = list(dist_dict.keys())
weighted_distances = []
for k in other_keys:
other_area_list = dist_dict[k]['coordinates']
dist = calculate_block_to_block_distance(base_area_list,
other_area_list)
weighted_distances.append([dist, k])
return weighted_distances
def _prepare_distances_dict(self, unknown_area):
"""Function prepares dict with distances for weighted distance calculation
between areas"""
new_d = self.joined_datasets.copy()
new_d = new_d.append(unknown_area, ignore_index=True)
try:
new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x)
new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y)
except TypeError:
new_d['px'] = new_d['geometry'].apply(lambda v: v.x)
new_d['py'] = new_d['geometry'].apply(lambda v: v.y)
new_dict = (new_d.groupby(self.id_col)
.apply(lambda v:
{'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))})
.to_dict())
return new_dict
@staticmethod
def _get_list_from_dict(d, l):
|
def _predict_value(self, predicted_array, k_array, vals_of_neigh_areas):
w = np.linalg.solve(predicted_array, k_array)
zhat = (np.matrix(vals_of_neigh_areas * w[:-1])[0, 0])
if np.any(w < 0):
# Normalize weights
normalized_w = self.normalize_weights(w, zhat, 'ord')
zhat = (np.matrix(vals_of_neigh_areas * normalized_w)[0, 0])
sigmasq = (w.T *
|
"""Function creates list of lists from dict of dicts in the order
given by the list with key names"""
new_list = []
for val in l:
subdict = d[val]
inner_list = []
for subval in l:
inner_list.append(subdict[subval])
new_list.append(inner_list)
return np.array(new_list)
|
identifier_body
|
poisson_kriging.py
|
preparation functions
def set_params(self, model,
joined_datasets, population_series, centroids_dataset,
id_col, val_col, pop_col,
lags_number, lag_step_size,
min_no_of_observations, search_radius):
self.model = model
self.joined_datasets = joined_datasets
self.total_population_per_unit = population_series
self.centroids_of_areal_data = centroids_dataset
self.id_col = id_col
self.val_col = val_col
self.pop_col = pop_col
self.lags = lags_number
self.step = lag_step_size
self.min_no_of_observations = min_no_of_observations
self.max_search_radius = search_radius
print('Parameters have been set')
def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid,
weighted=False, verbose=False):
"""
Function prepares data from unknown locations for Poisson Kriging.
:param unknown_areal_data: PKData object (row) with areal and population data.
:param weighted: distances weighted by population (True) or not (False),
:param verbose: if True then method informs about the successful operation.
:return prediction: prepared dataset which contains:
[[x, y, value, known_area_id, distance_to_unknown_position], [...]],
"""
areal_id = unknown_areal_data_centroid[0][-1]
cx_cy = unknown_areal_data_centroid[0][:2]
r = np.array([cx_cy])
known_centroids = self.centroids_of_areal_data
kc = known_centroids[:, :2]
# Build set for Poisson Kriging
if weighted:
weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row,
areal_id)
s = []
for wd in weighted_distances:
for k in known_centroids:
if wd[1] in k:
s.append(wd[0])
break
else:
pass
s = np.array(s).T
kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn]
else:
distances_array = np.zeros(kc.shape)
for i in range(0, r.shape[1]):
distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2
s = distances_array.sum(axis=1)
s = np.sqrt(s)
s = s.T
kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn]
# remove nans
kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)]
# sort by distance
kriging_data = kriging_data[kriging_data[:, -1].argsort()]
# set output by distance params
# search radius
max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius)
output_data = kriging_data[:max_search_pos]
# check number of observations
if len(output_data) < self.min_no_of_observations:
output_data = kriging_data[:self.min_no_of_observations]
# TODO: info to the app logs
# print('Dataset has been set based on the minimum number of observations')
# set final dataset
self.prepared_data = output_data
if verbose:
print('Predictions data prepared')
def normalize_weights(self, weights, estimated_value, kriging_type):
"""
Algorithm for weight normalization to remove negative weights of the points which are
clustering. Derived from Deutsch, C.V., Correcting for negative weights in
ordinary kriging, Computers & Geosciences, Vol. 22, No. 7, pp. 765-773, 1996.
:param: weights - weights matrix calculated with "normal" kriging procedure,
:param: estimated_value - value estimated for a given, unknown point.
:return: weight_matrix - normalized weight matrix where negative weights are removed and
matrix is scalled to give a sum of all elements equal to 1.
"""
if kriging_type == 'ord':
weight_matrix = weights[:-1].copy()
output_matrix = weights[:-1].copy()
elif kriging_type == 'sim':
weight_matrix = weights.copy()
output_matrix = weights.copy()
else:
print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.')
weight_matrix = weights.copy()
output_matrix = weights.copy()
###### Calculate average covariance between the location being ######
###### estimated and the locations with negative weights ######
locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0
locs = locs[:, 0]
# Calculate covariance between those points and unknown point
if len(locs) >= 1:
c = []
mu = 0
for i in locs:
_c = estimated_value * self.prepared_data[i, 2]
mu = mu + estimated_value + self.prepared_data[i, 2]
c.append(_c)
output_matrix[i, 0] = 0
mu = mu / len(c)
cov = np.sum(c) / len(c) - mu * mu
###### Calculate absolute magnitude of the negative weights #####
w = weight_matrix[weight_matrix < 0]
w = w.T
magnitude = np.sum(np.abs(w)) / len(w)
###### Test values greater than 0 and check if they need to be
###### rescaled to 0 ######
###### if weight > 0 and Covariance between unknown point and known
###### point is less than the average covariance between the location
###### being estimated and the locations with negative weights and
###### and weight is less than absolute magnitude of the negative
###### weights then set weight to zero #####
positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0
positive_locs = positive_locs[:, 0]
for j in positive_locs:
cov_est = (estimated_value * self.prepared_data[j, 2]) / 2
mu = (estimated_value + self.prepared_data[j, 2]) / 2
cov_est = cov_est - mu * mu
if cov_est < cov:
if weight_matrix[j, 0] < magnitude:
output_matrix[j, 0] = 0
###### Normalize weight matrix to get a sum of all elements equal to 1 ######
output_matrix = output_matrix / np.sum(output_matrix)
return output_matrix
else:
return weights
# Data processing private class methods
def _calculate_weighted_distances(self, unknown_area, unknown_area_id):
"""Function calculates weighted distances between unknown area and known areas"""
dist_dict = self._prepare_distances_dict(unknown_area)
base_area = dist_dict[unknown_area_id]
base_area_list = base_area['coordinates']
other_keys = list(dist_dict.keys())
weighted_distances = []
for k in other_keys:
other_area_list = dist_dict[k]['coordinates']
dist = calculate_block_to_block_distance(base_area_list,
other_area_list)
weighted_distances.append([dist, k])
return weighted_distances
def
|
(self, unknown_area):
"""Function prepares dict with distances for weighted distance calculation
between areas"""
new_d = self.joined_datasets.copy()
new_d = new_d.append(unknown_area, ignore_index=True)
try:
new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x)
new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y)
except TypeError:
new_d['px'] = new_d['geometry'].apply(lambda v: v.x)
new_d['py'] = new_d['geometry'].apply(lambda v: v.y)
new_dict = (new_d.groupby(self.id_col)
.apply(lambda v:
{'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))})
.to_dict())
return new_dict
@staticmethod
def _get_list_from_dict(d, l):
"""Function creates list of lists from dict of dicts in the order
given by the list with key names"""
new_list = []
for val in l:
subdict = d[val]
inner_list = []
for subval in l:
inner_list.append(subdict[subval])
new_list.append(inner_list)
return np.array(new_list)
def _predict_value(self, predicted_array, k_array, vals_of_neigh_areas):
w = np.linalg.solve(predicted_array, k_array)
zhat = (np.matrix(vals_of_neigh_areas * w[:-1])[0, 0])
if np.any(w < 0):
# Normalize weights
normalized_w = self.normalize_weights(w, zhat, 'ord')
zhat = (np.matrix(vals_of_neigh_areas * normalized_w)[0, 0])
sigmasq = (w.T *
|
_prepare_distances_dict
|
identifier_name
|
poisson_kriging.py
|
preparation functions
def set_params(self, model,
joined_datasets, population_series, centroids_dataset,
id_col, val_col, pop_col,
lags_number, lag_step_size,
min_no_of_observations, search_radius):
self.model = model
self.joined_datasets = joined_datasets
self.total_population_per_unit = population_series
self.centroids_of_areal_data = centroids_dataset
self.id_col = id_col
self.val_col = val_col
self.pop_col = pop_col
self.lags = lags_number
self.step = lag_step_size
self.min_no_of_observations = min_no_of_observations
self.max_search_radius = search_radius
print('Parameters have been set')
def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid,
weighted=False, verbose=False):
"""
Function prepares data from unknown locations for Poisson Kriging.
:param unknown_areal_data: PKData object (row) with areal and population data.
:param weighted: distances weighted by population (True) or not (False),
:param verbose: if True then method informs about the successful operation.
:return prediction: prepared dataset which contains:
[[x, y, value, known_area_id, distance_to_unknown_position], [...]],
"""
areal_id = unknown_areal_data_centroid[0][-1]
cx_cy = unknown_areal_data_centroid[0][:2]
r = np.array([cx_cy])
known_centroids = self.centroids_of_areal_data
kc = known_centroids[:, :2]
# Build set for Poisson Kriging
|
s = []
for wd in weighted_distances:
for k in known_centroids:
if wd[1] in k:
s.append(wd[0])
break
else:
pass
s = np.array(s).T
kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn]
else:
distances_array = np.zeros(kc.shape)
for i in range(0, r.shape[1]):
distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2
s = distances_array.sum(axis=1)
s = np.sqrt(s)
s = s.T
kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn]
# remove nans
kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)]
# sort by distance
kriging_data = kriging_data[kriging_data[:, -1].argsort()]
# set output by distance params
# search radius
max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius)
output_data = kriging_data[:max_search_pos]
# check number of observations
if len(output_data) < self.min_no_of_observations:
output_data = kriging_data[:self.min_no_of_observations]
# TODO: info to the app logs
# print('Dataset has been set based on the minimum number of observations')
# set final dataset
self.prepared_data = output_data
if verbose:
print('Predictions data prepared')
def normalize_weights(self, weights, estimated_value, kriging_type):
"""
Algorithm for weight normalization to remove negative weights of the points which are
clustering. Derived from Deutsch, C.V., Correcting for negative weights in
ordinary kriging, Computers & Geosciences, Vol. 22, No. 7, pp. 765-773, 1996.
:param: weights - weights matrix calculated with "normal" kriging procedure,
:param: estimated_value - value estimated for a given, unknown point.
:return: weight_matrix - normalized weight matrix where negative weights are removed and
matrix is scalled to give a sum of all elements equal to 1.
"""
if kriging_type == 'ord':
weight_matrix = weights[:-1].copy()
output_matrix = weights[:-1].copy()
elif kriging_type == 'sim':
weight_matrix = weights.copy()
output_matrix = weights.copy()
else:
print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.')
weight_matrix = weights.copy()
output_matrix = weights.copy()
###### Calculate average covariance between the location being ######
###### estimated and the locations with negative weights ######
locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0
locs = locs[:, 0]
# Calculate covariance between those points and unknown point
if len(locs) >= 1:
c = []
mu = 0
for i in locs:
_c = estimated_value * self.prepared_data[i, 2]
mu = mu + estimated_value + self.prepared_data[i, 2]
c.append(_c)
output_matrix[i, 0] = 0
mu = mu / len(c)
cov = np.sum(c) / len(c) - mu * mu
###### Calculate absolute magnitude of the negative weights #####
w = weight_matrix[weight_matrix < 0]
w = w.T
magnitude = np.sum(np.abs(w)) / len(w)
###### Test values greater than 0 and check if they need to be
###### rescaled to 0 ######
###### if weight > 0 and Covariance between unknown point and known
###### point is less than the average covariance between the location
###### being estimated and the locations with negative weights and
###### and weight is less than absolute magnitude of the negative
###### weights then set weight to zero #####
positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0
positive_locs = positive_locs[:, 0]
for j in positive_locs:
cov_est = (estimated_value * self.prepared_data[j, 2]) / 2
mu = (estimated_value + self.prepared_data[j, 2]) / 2
cov_est = cov_est - mu * mu
if cov_est < cov:
if weight_matrix[j, 0] < magnitude:
output_matrix[j, 0] = 0
###### Normalize weight matrix to get a sum of all elements equal to 1 ######
output_matrix = output_matrix / np.sum(output_matrix)
return output_matrix
else:
return weights
# Data processing private class methods
def _calculate_weighted_distances(self, unknown_area, unknown_area_id):
"""Function calculates weighted distances between unknown area and known areas"""
dist_dict = self._prepare_distances_dict(unknown_area)
base_area = dist_dict[unknown_area_id]
base_area_list = base_area['coordinates']
other_keys = list(dist_dict.keys())
weighted_distances = []
for k in other_keys:
other_area_list = dist_dict[k]['coordinates']
dist = calculate_block_to_block_distance(base_area_list,
other_area_list)
weighted_distances.append([dist, k])
return weighted_distances
def _prepare_distances_dict(self, unknown_area):
"""Function prepares dict with distances for weighted distance calculation
between areas"""
new_d = self.joined_datasets.copy()
new_d = new_d.append(unknown_area, ignore_index=True)
try:
new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x)
new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y)
except TypeError:
new_d['px'] = new_d['geometry'].apply(lambda v: v.x)
new_d['py'] = new_d['geometry'].apply(lambda v: v.y)
new_dict = (new_d.groupby(self.id_col)
.apply(lambda v:
{'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))})
.to_dict())
return new_dict
@staticmethod
def _get_list_from_dict(d, l):
"""Function creates list of lists from dict of dicts in the order
given by the list with key names"""
new_list = []
for val in l:
subdict = d[val]
inner_list = []
for subval in l:
inner_list.append(subdict[subval])
new_list.append(inner_list)
return np.array(new_list)
def _predict_value(self, predicted_array, k_array, vals_of_neigh_areas):
w = np.linalg.solve(predicted_array, k_array)
zhat = (np.matrix(vals_of_neigh_areas * w[:-1])[0, 0])
if np.any(w < 0):
# Normalize weights
normalized_w = self.normalize_weights(w, zhat, 'ord')
zhat = (np.matrix(vals_of_neigh_areas * normalized_w)[0, 0])
sigmasq = (w.T *
|
if weighted:
weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row,
areal_id)
|
random_line_split
|
poisson_kriging.py
|
self.min_no_of_observations = min_no_of_observations
self.max_search_radius = search_radius
print('Parameters have been set')
def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid,
weighted=False, verbose=False):
"""
Function prepares data from unknown locations for Poisson Kriging.
:param unknown_areal_data: PKData object (row) with areal and population data.
:param weighted: distances weighted by population (True) or not (False),
:param verbose: if True then method informs about the successful operation.
:return prediction: prepared dataset which contains:
[[x, y, value, known_area_id, distance_to_unknown_position], [...]],
"""
areal_id = unknown_areal_data_centroid[0][-1]
cx_cy = unknown_areal_data_centroid[0][:2]
r = np.array([cx_cy])
known_centroids = self.centroids_of_areal_data
kc = known_centroids[:, :2]
# Build set for Poisson Kriging
if weighted:
weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row,
areal_id)
s = []
for wd in weighted_distances:
for k in known_centroids:
if wd[1] in k:
s.append(wd[0])
break
else:
pass
s = np.array(s).T
kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn]
else:
distances_array = np.zeros(kc.shape)
for i in range(0, r.shape[1]):
distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2
s = distances_array.sum(axis=1)
s = np.sqrt(s)
s = s.T
kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn]
# remove nans
kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)]
# sort by distance
kriging_data = kriging_data[kriging_data[:, -1].argsort()]
# set output by distance params
# search radius
max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius)
output_data = kriging_data[:max_search_pos]
# check number of observations
if len(output_data) < self.min_no_of_observations:
output_data = kriging_data[:self.min_no_of_observations]
# TODO: info to the app logs
# print('Dataset has been set based on the minimum number of observations')
# set final dataset
self.prepared_data = output_data
if verbose:
print('Predictions data prepared')
def normalize_weights(self, weights, estimated_value, kriging_type):
"""
Algorithm for weight normalization to remove negative weights of the points which are
clustering. Derived from Deutsch, C.V., Correcting for negative weights in
ordinary kriging, Computers & Geosciences, Vol. 22, No. 7, pp. 765-773, 1996.
:param: weights - weights matrix calculated with "normal" kriging procedure,
:param: estimated_value - value estimated for a given, unknown point.
:return: weight_matrix - normalized weight matrix where negative weights are removed and
matrix is scalled to give a sum of all elements equal to 1.
"""
if kriging_type == 'ord':
weight_matrix = weights[:-1].copy()
output_matrix = weights[:-1].copy()
elif kriging_type == 'sim':
weight_matrix = weights.copy()
output_matrix = weights.copy()
else:
print('You did not choose any kriging type. Chosen type: <sim> - simple kriging.')
weight_matrix = weights.copy()
output_matrix = weights.copy()
###### Calculate average covariance between the location being ######
###### estimated and the locations with negative weights ######
locs = np.argwhere(weight_matrix < 0) # Check where weights are below 0.0
locs = locs[:, 0]
# Calculate covariance between those points and unknown point
if len(locs) >= 1:
c = []
mu = 0
for i in locs:
_c = estimated_value * self.prepared_data[i, 2]
mu = mu + estimated_value + self.prepared_data[i, 2]
c.append(_c)
output_matrix[i, 0] = 0
mu = mu / len(c)
cov = np.sum(c) / len(c) - mu * mu
###### Calculate absolute magnitude of the negative weights #####
w = weight_matrix[weight_matrix < 0]
w = w.T
magnitude = np.sum(np.abs(w)) / len(w)
###### Test values greater than 0 and check if they need to be
###### rescaled to 0 ######
###### if weight > 0 and Covariance between unknown point and known
###### point is less than the average covariance between the location
###### being estimated and the locations with negative weights and
###### and weight is less than absolute magnitude of the negative
###### weights then set weight to zero #####
positive_locs = np.argwhere(weight_matrix > 0) # Check where weights are greater than 0.0
positive_locs = positive_locs[:, 0]
for j in positive_locs:
cov_est = (estimated_value * self.prepared_data[j, 2]) / 2
mu = (estimated_value + self.prepared_data[j, 2]) / 2
cov_est = cov_est - mu * mu
if cov_est < cov:
if weight_matrix[j, 0] < magnitude:
output_matrix[j, 0] = 0
###### Normalize weight matrix to get a sum of all elements equal to 1 ######
output_matrix = output_matrix / np.sum(output_matrix)
return output_matrix
else:
return weights
# Data processing private class methods
def _calculate_weighted_distances(self, unknown_area, unknown_area_id):
"""Function calculates weighted distances between unknown area and known areas"""
dist_dict = self._prepare_distances_dict(unknown_area)
base_area = dist_dict[unknown_area_id]
base_area_list = base_area['coordinates']
other_keys = list(dist_dict.keys())
weighted_distances = []
for k in other_keys:
other_area_list = dist_dict[k]['coordinates']
dist = calculate_block_to_block_distance(base_area_list,
other_area_list)
weighted_distances.append([dist, k])
return weighted_distances
def _prepare_distances_dict(self, unknown_area):
"""Function prepares dict with distances for weighted distance calculation
between areas"""
new_d = self.joined_datasets.copy()
new_d = new_d.append(unknown_area, ignore_index=True)
try:
new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x)
new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y)
except TypeError:
new_d['px'] = new_d['geometry'].apply(lambda v: v.x)
new_d['py'] = new_d['geometry'].apply(lambda v: v.y)
new_dict = (new_d.groupby(self.id_col)
.apply(lambda v:
{'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))})
.to_dict())
return new_dict
@staticmethod
def _get_list_from_dict(d, l):
"""Function creates list of lists from dict of dicts in the order
given by the list with key names"""
new_list = []
for val in l:
subdict = d[val]
inner_list = []
for subval in l:
inner_list.append(subdict[subval])
new_list.append(inner_list)
return np.array(new_list)
def _predict_value(self, predicted_array, k_array, vals_of_neigh_areas):
w = np.linalg.solve(predicted_array, k_array)
zhat = (np.matrix(vals_of_neigh_areas * w[:-1])[0, 0])
if np.any(w < 0):
# Normalize weights
normalized_w = self.normalize_weights(w, zhat, 'ord')
zhat = (np.matrix(vals_of_neigh_areas * normalized_w)[0, 0])
sigmasq = (w.T * k_array)[0, 0]
if sigmasq < 0:
print(sigmasq)
sigma = 0
else:
sigma = np.sqrt(sigmasq)
return zhat, sigma, w[-1][0], normalized_w, self.unknown_area_id
else:
|
sigmasq = (w.T * k_array)[0, 0]
if sigmasq < 0:
sigma = 0
else:
sigma = np.sqrt(sigmasq)
return zhat, sigma, w[-1][0], w, self.unknown_area_id
|
conditional_block
|
|
stream.go
|
: discoveryRequest,
xdsServer: s,
done: make(chan struct{}),
}
}
for {
select {
case <-ctx.Done():
metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec()
return nil
case <-quit:
log.Debug().Msgf("gRPC stream with Envoy on Pod with UID=%s closed!", proxy.GetPodUID())
metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec()
return nil
case discoveryRequest, ok := <-requests:
if !ok {
log.Error().Msgf("Envoy with xDS Certificate SerialNumber=%s on Pod with UID=%s closed gRPC!", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec()
return errGrpcClosed
}
// This function call runs xDS proto state machine given DiscoveryRequest as input.
// It's output is the decision to reply or not to this request.
if !respondToRequest(proxy, &discoveryRequest) {
continue
}
typeURL := envoy.TypeURI(discoveryRequest.TypeUrl)
var typesRequest []envoy.TypeURI
if typeURL == envoy.TypeWildcard {
typesRequest = envoy.XDSResponseOrder
} else {
typesRequest = []envoy.TypeURI{typeURL}
}
<-s.workqueues.AddJob(newJob(typesRequest, &discoveryRequest))
case <-broadcastUpdate:
log.Info().Msgf("Broadcast wake for Proxy SerialNumber=%s UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
// Queue a full configuration update
<-s.workqueues.AddJob(newJob(envoy.XDSResponseOrder, nil))
case certUpdateMsg := <-certAnnouncement:
cert := certUpdateMsg.(events.PubSubMessage).NewObj.(certificate.Certificater)
if isCNforProxy(proxy, cert.GetCommonName()) {
// The CN whose corresponding certificate was updated (rotated) by the certificate provider is associated
// with this proxy, so update the secrets corresponding to this certificate via SDS.
log.Debug().Msgf("Certificate has been updated for proxy with SerialNumber=%s, UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
// Empty DiscoveryRequest should create the SDS specific request
// Prepare to queue the SDS proxy response job on the worker pool
<-s.workqueues.AddJob(newJob([]envoy.TypeURI{envoy.TypeSDS}, nil))
}
}
}
}
// respondToRequest assesses if a given DiscoveryRequest for a given proxy should be responded with
// an xDS DiscoveryResponse.
func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool {
var err error
var requestVersion uint64
var requestNonce string
var lastVersion uint64
var lastNonce string
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl,
discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames,
proxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl)))
if discoveryRequest.ErrorDetail != nil {
log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: [NACK] err: \"%s\" for nonce %s, last version applied on request %s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo)
return false
}
typeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl]
if !ok {
log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl)
return false
}
// It is possible for Envoy to return an empty VersionInfo.
// When that's the case - start with 0
if discoveryRequest.VersionInfo != "" {
if requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil {
// It is probable that Envoy responded with a VersionInfo we did not understand
log.Error().Err(err).Msgf("Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err)
return false
}
}
// Set last version applied
proxy.SetLastAppliedVersion(typeURL, requestVersion)
requestNonce = discoveryRequest.ResponseNonce
// Handle first request on stream, should always reply to empty nonce
if requestNonce == "" {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames)
return true
}
// The version of the config received along with the DiscoveryRequest (ackVersion)
// is what the Envoy proxy may be acknowledging. It is acknowledging
// and not requesting when the ackVersion is <= what we last sent.
// It is possible however for a proxy to have a version that is higher
// than what we last sent. (Perhaps the control plane restarted.)
// In that case we want to make sure that we send new responses with
// VersionInfo incremented starting with the version which the proxy last had.
lastVersion = proxy.GetLastSentVersion(typeURL)
if requestVersion > lastVersion {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion)
proxy.SetLastSentVersion(typeURL, requestVersion)
return true
}
// Compare Nonces
// As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the
// nonce is non-empty.
lastNonce = proxy.GetLastSentNonce(typeURL)
if requestNonce != lastNonce {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce)
return false
}
// ----
// At this point, there is no error and nonces match, it is guaranteed an ACK with last version.
// What's left is to check if the resources listed are the same. If they are not, we must respond
// with the new resources requested.
//
// In case of LDS and CDS, "Envoy will always use wildcard mode for Listener and Cluster resources".
// The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case.
//
// This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference
// https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347
// ----
resourcesLastSent := proxy.GetLastResourcesSent(typeURL)
resourcesRequested := getRequestedResourceNamesSet(discoveryRequest)
// If what we last sent is a superset of what the
// requests resources subscribes to, it's ACK and nothing needs to be done.
// Otherwise, envoy might be asking us for additional resources that have to be sent along last time.
// Difference returns elemenets of <requested> that are not part of elements of <last sent>
requestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent)
if requestedResourcesDifference.Cardinality() != 0 {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference)
return true
}
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested)
return false
}
// Helper to turn the resource names on a discovery request to a Set for later efficient intersection
func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set {
resourcesRequested := mapset.NewSet()
for idx := range discoveryRequest.ResourceNames
|
{
resourcesRequested.Add(discoveryRequest.ResourceNames[idx])
}
|
conditional_block
|
|
stream.go
|
gRPC stream with Envoy on Pod with UID=%s closed!", proxy.GetPodUID())
metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec()
return nil
case discoveryRequest, ok := <-requests:
if !ok {
log.Error().Msgf("Envoy with xDS Certificate SerialNumber=%s on Pod with UID=%s closed gRPC!", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec()
return errGrpcClosed
}
// This function call runs xDS proto state machine given DiscoveryRequest as input.
// It's output is the decision to reply or not to this request.
if !respondToRequest(proxy, &discoveryRequest) {
continue
}
typeURL := envoy.TypeURI(discoveryRequest.TypeUrl)
var typesRequest []envoy.TypeURI
if typeURL == envoy.TypeWildcard {
typesRequest = envoy.XDSResponseOrder
} else {
typesRequest = []envoy.TypeURI{typeURL}
}
<-s.workqueues.AddJob(newJob(typesRequest, &discoveryRequest))
case <-broadcastUpdate:
log.Info().Msgf("Broadcast wake for Proxy SerialNumber=%s UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
// Queue a full configuration update
<-s.workqueues.AddJob(newJob(envoy.XDSResponseOrder, nil))
case certUpdateMsg := <-certAnnouncement:
cert := certUpdateMsg.(events.PubSubMessage).NewObj.(certificate.Certificater)
if isCNforProxy(proxy, cert.GetCommonName()) {
// The CN whose corresponding certificate was updated (rotated) by the certificate provider is associated
// with this proxy, so update the secrets corresponding to this certificate via SDS.
log.Debug().Msgf("Certificate has been updated for proxy with SerialNumber=%s, UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
// Empty DiscoveryRequest should create the SDS specific request
// Prepare to queue the SDS proxy response job on the worker pool
<-s.workqueues.AddJob(newJob([]envoy.TypeURI{envoy.TypeSDS}, nil))
}
}
}
}
// respondToRequest assesses if a given DiscoveryRequest for a given proxy should be responded with
// an xDS DiscoveryResponse.
func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool {
var err error
var requestVersion uint64
var requestNonce string
var lastVersion uint64
var lastNonce string
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl,
discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames,
proxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl)))
if discoveryRequest.ErrorDetail != nil {
log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: [NACK] err: \"%s\" for nonce %s, last version applied on request %s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo)
return false
}
typeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl]
if !ok {
log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl)
return false
}
// It is possible for Envoy to return an empty VersionInfo.
// When that's the case - start with 0
if discoveryRequest.VersionInfo != "" {
if requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil {
// It is probable that Envoy responded with a VersionInfo we did not understand
log.Error().Err(err).Msgf("Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err)
return false
}
}
// Set last version applied
proxy.SetLastAppliedVersion(typeURL, requestVersion)
requestNonce = discoveryRequest.ResponseNonce
// Handle first request on stream, should always reply to empty nonce
if requestNonce == "" {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames)
return true
}
// The version of the config received along with the DiscoveryRequest (ackVersion)
// is what the Envoy proxy may be acknowledging. It is acknowledging
// and not requesting when the ackVersion is <= what we last sent.
// It is possible however for a proxy to have a version that is higher
// than what we last sent. (Perhaps the control plane restarted.)
// In that case we want to make sure that we send new responses with
// VersionInfo incremented starting with the version which the proxy last had.
lastVersion = proxy.GetLastSentVersion(typeURL)
if requestVersion > lastVersion {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion)
proxy.SetLastSentVersion(typeURL, requestVersion)
return true
}
// Compare Nonces
// As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the
// nonce is non-empty.
lastNonce = proxy.GetLastSentNonce(typeURL)
if requestNonce != lastNonce {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce)
return false
}
// ----
// At this point, there is no error and nonces match, it is guaranteed an ACK with last version.
// What's left is to check if the resources listed are the same. If they are not, we must respond
// with the new resources requested.
//
// In case of LDS and CDS, "Envoy will always use wildcard mode for Listener and Cluster resources".
// The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case.
//
// This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference
// https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347
// ----
resourcesLastSent := proxy.GetLastResourcesSent(typeURL)
resourcesRequested := getRequestedResourceNamesSet(discoveryRequest)
// If what we last sent is a superset of what the
// requests resources subscribes to, it's ACK and nothing needs to be done.
// Otherwise, envoy might be asking us for additional resources that have to be sent along last time.
// Difference returns elemenets of <requested> that are not part of elements of <last sent>
requestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent)
if requestedResourcesDifference.Cardinality() != 0 {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference)
return true
}
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested)
return false
}
// Helper to turn the resource names on a discovery request to a Set for later efficient intersection
func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set {
resourcesRequested := mapset.NewSet()
for idx := range discoveryRequest.ResourceNames {
resourcesRequested.Add(discoveryRequest.ResourceNames[idx])
}
return resourcesRequested
}
// isCNforProxy returns true if the given CN for the workload certificate matches the given proxy's identity.
// Proxy identity corresponds to the k8s service account, while the workload certificate is of the form
// <svc-account>.<namespace>.<trust-domain>.
func
|
isCNforProxy
|
identifier_name
|
|
stream.go
|
}
typeURL := envoy.TypeURI(discoveryRequest.TypeUrl)
var typesRequest []envoy.TypeURI
if typeURL == envoy.TypeWildcard {
typesRequest = envoy.XDSResponseOrder
} else {
typesRequest = []envoy.TypeURI{typeURL}
}
<-s.workqueues.AddJob(newJob(typesRequest, &discoveryRequest))
case <-broadcastUpdate:
log.Info().Msgf("Broadcast wake for Proxy SerialNumber=%s UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
// Queue a full configuration update
<-s.workqueues.AddJob(newJob(envoy.XDSResponseOrder, nil))
case certUpdateMsg := <-certAnnouncement:
cert := certUpdateMsg.(events.PubSubMessage).NewObj.(certificate.Certificater)
if isCNforProxy(proxy, cert.GetCommonName()) {
// The CN whose corresponding certificate was updated (rotated) by the certificate provider is associated
// with this proxy, so update the secrets corresponding to this certificate via SDS.
log.Debug().Msgf("Certificate has been updated for proxy with SerialNumber=%s, UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
// Empty DiscoveryRequest should create the SDS specific request
// Prepare to queue the SDS proxy response job on the worker pool
<-s.workqueues.AddJob(newJob([]envoy.TypeURI{envoy.TypeSDS}, nil))
}
}
}
}
// respondToRequest assesses if a given DiscoveryRequest for a given proxy should be responded with
// an xDS DiscoveryResponse.
func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool {
var err error
var requestVersion uint64
var requestNonce string
var lastVersion uint64
var lastNonce string
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl,
discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames,
proxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl)))
if discoveryRequest.ErrorDetail != nil {
log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: [NACK] err: \"%s\" for nonce %s, last version applied on request %s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo)
return false
}
typeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl]
if !ok {
log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl)
return false
}
// It is possible for Envoy to return an empty VersionInfo.
// When that's the case - start with 0
if discoveryRequest.VersionInfo != "" {
if requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil {
// It is probable that Envoy responded with a VersionInfo we did not understand
log.Error().Err(err).Msgf("Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err)
return false
}
}
// Set last version applied
proxy.SetLastAppliedVersion(typeURL, requestVersion)
requestNonce = discoveryRequest.ResponseNonce
// Handle first request on stream, should always reply to empty nonce
if requestNonce == "" {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames)
return true
}
// The version of the config received along with the DiscoveryRequest (ackVersion)
// is what the Envoy proxy may be acknowledging. It is acknowledging
// and not requesting when the ackVersion is <= what we last sent.
// It is possible however for a proxy to have a version that is higher
// than what we last sent. (Perhaps the control plane restarted.)
// In that case we want to make sure that we send new responses with
// VersionInfo incremented starting with the version which the proxy last had.
lastVersion = proxy.GetLastSentVersion(typeURL)
if requestVersion > lastVersion {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion)
proxy.SetLastSentVersion(typeURL, requestVersion)
return true
}
// Compare Nonces
// As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the
// nonce is non-empty.
lastNonce = proxy.GetLastSentNonce(typeURL)
if requestNonce != lastNonce {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce)
return false
}
// ----
// At this point, there is no error and nonces match, it is guaranteed an ACK with last version.
// What's left is to check if the resources listed are the same. If they are not, we must respond
// with the new resources requested.
//
// In case of LDS and CDS, "Envoy will always use wildcard mode for Listener and Cluster resources".
// The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case.
//
// This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference
// https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347
// ----
resourcesLastSent := proxy.GetLastResourcesSent(typeURL)
resourcesRequested := getRequestedResourceNamesSet(discoveryRequest)
// If what we last sent is a superset of what the
// requests resources subscribes to, it's ACK and nothing needs to be done.
// Otherwise, envoy might be asking us for additional resources that have to be sent along last time.
// Difference returns elemenets of <requested> that are not part of elements of <last sent>
requestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent)
if requestedResourcesDifference.Cardinality() != 0 {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference)
return true
}
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested)
return false
}
// Helper to turn the resource names on a discovery request to a Set for later efficient intersection
func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set {
resourcesRequested := mapset.NewSet()
for idx := range discoveryRequest.ResourceNames {
resourcesRequested.Add(discoveryRequest.ResourceNames[idx])
}
return resourcesRequested
}
// isCNforProxy returns true if the given CN for the workload certificate matches the given proxy's identity.
// Proxy identity corresponds to the k8s service account, while the workload certificate is of the form
// <svc-account>.<namespace>.<trust-domain>.
func isCNforProxy(proxy *envoy.Proxy, cn certificate.CommonName) bool {
proxyIdentity, err := catalog.GetServiceAccountFromProxyCertificate(proxy.GetCertificateCommonName())
if err != nil {
log.Error().Err(err).Msgf("Error looking up proxy identity for proxy with SerialNumber=%s on Pod with UID=%s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
return false
}
// Workload certificate CN is of the form <svc-account>.<namespace>.<trust-domain>
chunks := strings.Split(cn.String(), constants.DomainDelimiter)
if len(chunks) < 3 {
return false
}
identityForCN := identity.K8sServiceAccount{Name: chunks[0], Namespace: chunks[1]}
return identityForCN == proxyIdentity
|
}
|
random_line_split
|
|
stream.go
|
xdsServer: s,
done: make(chan struct{}),
}
}
for {
select {
case <-ctx.Done():
metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec()
return nil
case <-quit:
log.Debug().Msgf("gRPC stream with Envoy on Pod with UID=%s closed!", proxy.GetPodUID())
metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec()
return nil
case discoveryRequest, ok := <-requests:
if !ok {
log.Error().Msgf("Envoy with xDS Certificate SerialNumber=%s on Pod with UID=%s closed gRPC!", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
metricsstore.DefaultMetricsStore.ProxyConnectCount.Dec()
return errGrpcClosed
}
// This function call runs xDS proto state machine given DiscoveryRequest as input.
// It's output is the decision to reply or not to this request.
if !respondToRequest(proxy, &discoveryRequest) {
continue
}
typeURL := envoy.TypeURI(discoveryRequest.TypeUrl)
var typesRequest []envoy.TypeURI
if typeURL == envoy.TypeWildcard {
typesRequest = envoy.XDSResponseOrder
} else {
typesRequest = []envoy.TypeURI{typeURL}
}
<-s.workqueues.AddJob(newJob(typesRequest, &discoveryRequest))
case <-broadcastUpdate:
log.Info().Msgf("Broadcast wake for Proxy SerialNumber=%s UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
// Queue a full configuration update
<-s.workqueues.AddJob(newJob(envoy.XDSResponseOrder, nil))
case certUpdateMsg := <-certAnnouncement:
cert := certUpdateMsg.(events.PubSubMessage).NewObj.(certificate.Certificater)
if isCNforProxy(proxy, cert.GetCommonName()) {
// The CN whose corresponding certificate was updated (rotated) by the certificate provider is associated
// with this proxy, so update the secrets corresponding to this certificate via SDS.
log.Debug().Msgf("Certificate has been updated for proxy with SerialNumber=%s, UID=%s", proxy.GetCertificateSerialNumber(), proxy.GetPodUID())
// Empty DiscoveryRequest should create the SDS specific request
// Prepare to queue the SDS proxy response job on the worker pool
<-s.workqueues.AddJob(newJob([]envoy.TypeURI{envoy.TypeSDS}, nil))
}
}
}
}
// respondToRequest assesses if a given DiscoveryRequest for a given proxy should be responded with
// an xDS DiscoveryResponse.
func respondToRequest(proxy *envoy.Proxy, discoveryRequest *xds_discovery.DiscoveryRequest) bool {
var err error
var requestVersion uint64
var requestNonce string
var lastVersion uint64
var lastNonce string
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Request %s [nonce=%s; version=%s; resources=%v] last sent [nonce=%s; version=%d]",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl,
discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo, discoveryRequest.ResourceNames,
proxy.GetLastSentNonce(envoy.TypeURI(discoveryRequest.TypeUrl)), proxy.GetLastSentVersion(envoy.TypeURI(discoveryRequest.TypeUrl)))
if discoveryRequest.ErrorDetail != nil {
log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: [NACK] err: \"%s\" for nonce %s, last version applied on request %s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.ErrorDetail, discoveryRequest.ResponseNonce, discoveryRequest.VersionInfo)
return false
}
typeURL, ok := envoy.ValidURI[discoveryRequest.TypeUrl]
if !ok {
log.Error().Msgf("Proxy SerialNumber=%s PodUID=%s: Unknown/Unsupported URI: %s",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), discoveryRequest.TypeUrl)
return false
}
// It is possible for Envoy to return an empty VersionInfo.
// When that's the case - start with 0
if discoveryRequest.VersionInfo != "" {
if requestVersion, err = strconv.ParseUint(discoveryRequest.VersionInfo, 10, 64); err != nil {
// It is probable that Envoy responded with a VersionInfo we did not understand
log.Error().Err(err).Msgf("Proxy SerialNumber=%s PodUID=%s: Error parsing DiscoveryRequest with TypeURL=%s VersionInfo=%s (%v)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.VersionInfo, err)
return false
}
}
// Set last version applied
proxy.SetLastAppliedVersion(typeURL, requestVersion)
requestNonce = discoveryRequest.ResponseNonce
// Handle first request on stream, should always reply to empty nonce
if requestNonce == "" {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Empty nonce for %s, should be first message on stream (req resources: %v)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), discoveryRequest.ResourceNames)
return true
}
// The version of the config received along with the DiscoveryRequest (ackVersion)
// is what the Envoy proxy may be acknowledging. It is acknowledging
// and not requesting when the ackVersion is <= what we last sent.
// It is possible however for a proxy to have a version that is higher
// than what we last sent. (Perhaps the control plane restarted.)
// In that case we want to make sure that we send new responses with
// VersionInfo incremented starting with the version which the proxy last had.
lastVersion = proxy.GetLastSentVersion(typeURL)
if requestVersion > lastVersion {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Higher version on request %s, req ver: %d - last ver: %d. Updating to match latest.",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, lastVersion)
proxy.SetLastSentVersion(typeURL, requestVersion)
return true
}
// Compare Nonces
// As per protocol, we can ignore any request on the TypeURL stream that has not caught up with last sent nonce, if the
// nonce is non-empty.
lastNonce = proxy.GetLastSentNonce(typeURL)
if requestNonce != lastNonce {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: Ignoring request for %s non-latest nonce (request: %s, current: %s)",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestNonce, lastNonce)
return false
}
// ----
// At this point, there is no error and nonces match, it is guaranteed an ACK with last version.
// What's left is to check if the resources listed are the same. If they are not, we must respond
// with the new resources requested.
//
// In case of LDS and CDS, "Envoy will always use wildcard mode for Listener and Cluster resources".
// The following logic is not needed (though correct) for LDS and CDS as request resources are also empty in ACK case.
//
// This part of the code was inspired by Istio's `shouldRespond` handling of request resource difference
// https://github.com/istio/istio/blob/da6178604559bdf2c707a57f452d16bee0de90c8/pilot/pkg/xds/ads.go#L347
// ----
resourcesLastSent := proxy.GetLastResourcesSent(typeURL)
resourcesRequested := getRequestedResourceNamesSet(discoveryRequest)
// If what we last sent is a superset of what the
// requests resources subscribes to, it's ACK and nothing needs to be done.
// Otherwise, envoy might be asking us for additional resources that have to be sent along last time.
// Difference returns elemenets of <requested> that are not part of elements of <last sent>
requestedResourcesDifference := resourcesRequested.Difference(resourcesLastSent)
if requestedResourcesDifference.Cardinality() != 0 {
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: request difference in v:%d - requested: %v lastSent: %v (diff: %v), triggering update",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), requestVersion, resourcesRequested, resourcesLastSent, requestedResourcesDifference)
return true
}
log.Debug().Msgf("Proxy SerialNumber=%s PodUID=%s: ACK received for %s, version: %d nonce: %s resources ACKd: %v",
proxy.GetCertificateSerialNumber(), proxy.GetPodUID(), typeURL.Short(), requestVersion, requestNonce, resourcesRequested)
return false
}
// Helper to turn the resource names on a discovery request to a Set for later efficient intersection
func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set
|
{
resourcesRequested := mapset.NewSet()
for idx := range discoveryRequest.ResourceNames {
resourcesRequested.Add(discoveryRequest.ResourceNames[idx])
}
return resourcesRequested
}
|
identifier_body
|
|
main.py
|
2
ORDER BY created DESC""", public_keys, ancestor)
else:
results = Result.gql(""" WHERE keys IN :1
ORDER BY created DESC""", public_keys)
metrics = set()
answers = []
for r in results:
metrics.add(r.metric)
answers.append(r.get_answers())
if len(metrics) > 1:
raise Exception("Keys were not all from the same metric: {} {}"
.format(public_keys, metrics))
if len(answers) > 0:
return {'metric': metrics.pop(), 'answers': answers}
else:
# No results
logging.info('No answers found')
return {'metric': 'no responses yet',
'answers': []}
@classmethod
def put_result(self, keys, metric, answers, group):
if group:
parent = Group.get_group(group)
result = Result(keys=keys, metric=metric, answers_json=answers,
parent=parent)
else:
result = Result(keys=keys, metric=metric, answers_json=answers)
return result.put()
def get_answers(self):
# Some old entities don't have json-based results. Treat them as if
# they are empty. This is easier than deleting them all.
if self.answers_json:
answers = json.loads(self.answers_json)
else:
answers = {}
# Always take the precaution of hashing participant ids, if present.
if 'pid' in answers:
answers['pid'] = util.hash_participant_id(answers['pid'])
return answers
# Page Handlers and APIs
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.write(*a, **kw)
def render_str(self, template, **params):
return jinja_environment.get_template(template).render(**params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def write_json(self, obj):
self.response.headers['Content-Type'] = "text/json; charset=utf-8"
self.write(json.dumps(obj))
class MainHandler(Handler):
def get(self):
self.render('index.html')
class TakeHandler(Handler):
def get(self, name):
metric = Metric.get_by_name(name)
if metric:
self.render(name + '_survey.html', name=name)
else:
logging.error('Could not find requested metric')
self.render('404.html')
class CompleteHandler(Handler):
def get(self, name):
key = self.request.get('private_key', None)
group = self.request.get('group', None)
answers = []
if key is None:
# If there's no key, then this is a preview. Don't try to load any
# answers.
logging.info("No key present; rendering preview.")
else:
try:
answers = Result.get_results([key], group)['answers']
except Exception as e:
# There was some problem with the keys that were given. Just
# display the report with no answers.
logging.error('Problem with private key: {}'.format(e))
try:
metric = Metric.get_by_name(name)
except Exception as e:
logging.error('Could not find requested metric: {}'.format(e))
self.render('404.html')
return
# Render without html escaping
self.render(metric.name + '_survey_complete.html',
group=group, answers=jinja2.Markup(json.dumps(answers)))
class SampleHandler(Handler):
def get(self, name):
metric = Metric.get_by_name(name)
if metric:
sample_template = name + '_sample_results.html'
# If there's a sample report, render that.
if os.path.isfile('templates/' + sample_template):
self.render(name + '_sample_results.html', name=name)
# Some reports can render themselves as a sample if no data is
# provided. These don't have a separate sample template. Instead,
# just serve up the main report template.
else:
self.render(name + '_results.html', name=name)
else:
logging.error('Could not find requested metric')
self.render('404.html')
class ResultsHandler(Handler):
def get(self, metric=None, keys_str=''):
# Multiple keys can be specified, separated by hyphens, in which case
# multiple sets of results should be sent to the template.
keys = keys_str.split('-')
# A group may be applicable here for single-keyed results.
group = self.request.get('group') if len(keys) is 1 else None
try:
results = Result.get_results(keys, group)
except Exception as e:
# There was some problem with the keys that were given.
logging.error('{}'.format(e))
self.render('404.html')
return
template = None
answers = []
if metric:
# A specific metric was requested. Check that 1) it exists and 2)
# it matches the answers, if any. Then show that metric's results
# page.
if metric not in config.metrics:
logging.error("Unknown metric: {}".format(metric))
template = '404.html'
|
.format(results['metric'], metric))
template = '404.html'
answers = results['answers']
# If the template hasn't been set by an error check above, give the
# metric-specific results page.
template = template or metric + '_results.html'
else:
# No specific metric was given. Infer it from the answers, if any,
# otherwise show a generic no-results page.
if len(results['answers']) > 0:
metric = results['metric']
answers = results['answers']
template = metric + '_results.html'
else:
template = 'no_responses.html'
# Render without html escaping.
answers = jinja2.Markup(json.dumps(answers))
self.render(template, group=group, answers=answers)
class ShareHandler(Handler):
def get(self, name):
keypair = util.Keys().get_pair()
# Render without html escaping
metric = Metric.get_by_name(name)
self.render(
metric.name + '_share.html', name=name,
private_key=keypair['private_keys'][0],
public_key=keypair['public_keys'][0])
class CsvHandler(Handler):
"""Return a csv based on the json array passed in
for example the following is a valid request (exploded for clarity)
/csv?
filename=gotcha&
headers=["name","age"]&
data=[["jack",12],["john",42],["joe",68]]
"""
def get(self):
# Get input
data = self.request.get('data')
headers = self.request.get('headers')
filename = self.request.get('filename').encode('ascii', 'ignore')
# Convert to json
data = json.loads(data)
if headers:
headers = json.loads(headers)
# Check input
if not headers:
logging.warning('no headers sent')
if not type(headers) == 'list':
logging.warning('the headers are not a list')
if not data:
logging.warning('no data')
if not type(data) == 'list':
logging.warning('data is not a list')
if not len(data) > 0:
logging.warning('data has not length')
if not all([type(row) == 'list' for row in data]):
logging.warning('data contains members which are not lists')
# Set up headers for browser to correctly recognize the file
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '.csv"'
# write the csv to a file like string
csv_file = cStringIO.StringIO()
csv_writer = csv.writer(csv_file)
# add headers if sent
if headers:
csv_writer.writerow(headers)
# add data
for row in data:
csv_writer.writerow(row)
# Emit the files directly to HTTP response stream
self.response.out.write(csv_file.getvalue())
class AdminCreateHandler(Handler):
def get(self):
self.render('create.html')
class MetricApi(Handler):
default_rubric = """<script>
pretty_answers = JSON.stringify(mm.answers, null, 4)
$('#responses').html(pretty_answers);
</script>
<pre id='responses'></pre>
"""
default_survey = """<input name="quest"/>"""
def get(self):
name = self.request.get('name')
if name:
metric = Metric.get_by_name(name)
if metric:
self.write_json(util.to_dict(metric))
else:
default_description = "<h3>" + name + "</h3>"
self.write_json({
'survey': self.default_survey,
'rubric': self.default_rubric,
'description': default_description
})
else:
logging.error('Metric request had no name')
self.write_json({'error': 'a name is required'})
class AdminMetricApi(Handler):
def post(self):
name = self.request.get('name')
survey = self.request.get('survey')
rubric = self.request.get('rubric')
description = self.request.get('description')
if name and survey and rubric
|
if len(results['answers']) > 0:
if metric != results['metric']:
logging.error("Key is from metric {}, but {} requested."
|
random_line_split
|
main.py
|
2
ORDER BY created DESC""", public_keys, ancestor)
else:
results = Result.gql(""" WHERE keys IN :1
ORDER BY created DESC""", public_keys)
metrics = set()
answers = []
for r in results:
metrics.add(r.metric)
answers.append(r.get_answers())
if len(metrics) > 1:
raise Exception("Keys were not all from the same metric: {} {}"
.format(public_keys, metrics))
if len(answers) > 0:
return {'metric': metrics.pop(), 'answers': answers}
else:
# No results
logging.info('No answers found')
return {'metric': 'no responses yet',
'answers': []}
@classmethod
def put_result(self, keys, metric, answers, group):
if group:
parent = Group.get_group(group)
result = Result(keys=keys, metric=metric, answers_json=answers,
parent=parent)
else:
result = Result(keys=keys, metric=metric, answers_json=answers)
return result.put()
def get_answers(self):
# Some old entities don't have json-based results. Treat them as if
# they are empty. This is easier than deleting them all.
if self.answers_json:
answers = json.loads(self.answers_json)
else:
answers = {}
# Always take the precaution of hashing participant ids, if present.
if 'pid' in answers:
answers['pid'] = util.hash_participant_id(answers['pid'])
return answers
# Page Handlers and APIs
class Handler(webapp2.RequestHandler):
def
|
(self, *a, **kw):
self.response.write(*a, **kw)
def render_str(self, template, **params):
return jinja_environment.get_template(template).render(**params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def write_json(self, obj):
self.response.headers['Content-Type'] = "text/json; charset=utf-8"
self.write(json.dumps(obj))
class MainHandler(Handler):
def get(self):
self.render('index.html')
class TakeHandler(Handler):
def get(self, name):
metric = Metric.get_by_name(name)
if metric:
self.render(name + '_survey.html', name=name)
else:
logging.error('Could not find requested metric')
self.render('404.html')
class CompleteHandler(Handler):
def get(self, name):
key = self.request.get('private_key', None)
group = self.request.get('group', None)
answers = []
if key is None:
# If there's no key, then this is a preview. Don't try to load any
# answers.
logging.info("No key present; rendering preview.")
else:
try:
answers = Result.get_results([key], group)['answers']
except Exception as e:
# There was some problem with the keys that were given. Just
# display the report with no answers.
logging.error('Problem with private key: {}'.format(e))
try:
metric = Metric.get_by_name(name)
except Exception as e:
logging.error('Could not find requested metric: {}'.format(e))
self.render('404.html')
return
# Render without html escaping
self.render(metric.name + '_survey_complete.html',
group=group, answers=jinja2.Markup(json.dumps(answers)))
class SampleHandler(Handler):
def get(self, name):
metric = Metric.get_by_name(name)
if metric:
sample_template = name + '_sample_results.html'
# If there's a sample report, render that.
if os.path.isfile('templates/' + sample_template):
self.render(name + '_sample_results.html', name=name)
# Some reports can render themselves as a sample if no data is
# provided. These don't have a separate sample template. Instead,
# just serve up the main report template.
else:
self.render(name + '_results.html', name=name)
else:
logging.error('Could not find requested metric')
self.render('404.html')
class ResultsHandler(Handler):
def get(self, metric=None, keys_str=''):
# Multiple keys can be specified, separated by hyphens, in which case
# multiple sets of results should be sent to the template.
keys = keys_str.split('-')
# A group may be applicable here for single-keyed results.
group = self.request.get('group') if len(keys) is 1 else None
try:
results = Result.get_results(keys, group)
except Exception as e:
# There was some problem with the keys that were given.
logging.error('{}'.format(e))
self.render('404.html')
return
template = None
answers = []
if metric:
# A specific metric was requested. Check that 1) it exists and 2)
# it matches the answers, if any. Then show that metric's results
# page.
if metric not in config.metrics:
logging.error("Unknown metric: {}".format(metric))
template = '404.html'
if len(results['answers']) > 0:
if metric != results['metric']:
logging.error("Key is from metric {}, but {} requested."
.format(results['metric'], metric))
template = '404.html'
answers = results['answers']
# If the template hasn't been set by an error check above, give the
# metric-specific results page.
template = template or metric + '_results.html'
else:
# No specific metric was given. Infer it from the answers, if any,
# otherwise show a generic no-results page.
if len(results['answers']) > 0:
metric = results['metric']
answers = results['answers']
template = metric + '_results.html'
else:
template = 'no_responses.html'
# Render without html escaping.
answers = jinja2.Markup(json.dumps(answers))
self.render(template, group=group, answers=answers)
class ShareHandler(Handler):
def get(self, name):
keypair = util.Keys().get_pair()
# Render without html escaping
metric = Metric.get_by_name(name)
self.render(
metric.name + '_share.html', name=name,
private_key=keypair['private_keys'][0],
public_key=keypair['public_keys'][0])
class CsvHandler(Handler):
"""Return a csv based on the json array passed in
for example the following is a valid request (exploded for clarity)
/csv?
filename=gotcha&
headers=["name","age"]&
data=[["jack",12],["john",42],["joe",68]]
"""
def get(self):
# Get input
data = self.request.get('data')
headers = self.request.get('headers')
filename = self.request.get('filename').encode('ascii', 'ignore')
# Convert to json
data = json.loads(data)
if headers:
headers = json.loads(headers)
# Check input
if not headers:
logging.warning('no headers sent')
if not type(headers) == 'list':
logging.warning('the headers are not a list')
if not data:
logging.warning('no data')
if not type(data) == 'list':
logging.warning('data is not a list')
if not len(data) > 0:
logging.warning('data has not length')
if not all([type(row) == 'list' for row in data]):
logging.warning('data contains members which are not lists')
# Set up headers for browser to correctly recognize the file
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '.csv"'
# write the csv to a file like string
csv_file = cStringIO.StringIO()
csv_writer = csv.writer(csv_file)
# add headers if sent
if headers:
csv_writer.writerow(headers)
# add data
for row in data:
csv_writer.writerow(row)
# Emit the files directly to HTTP response stream
self.response.out.write(csv_file.getvalue())
class AdminCreateHandler(Handler):
def get(self):
self.render('create.html')
class MetricApi(Handler):
default_rubric = """<script>
pretty_answers = JSON.stringify(mm.answers, null, 4)
$('#responses').html(pretty_answers);
</script>
<pre id='responses'></pre>
"""
default_survey = """<input name="quest"/>"""
def get(self):
name = self.request.get('name')
if name:
metric = Metric.get_by_name(name)
if metric:
self.write_json(util.to_dict(metric))
else:
default_description = "<h3>" + name + "</h3>"
self.write_json({
'survey': self.default_survey,
'rubric': self.default_rubric,
'description': default_description
})
else:
logging.error('Metric request had no name')
self.write_json({'error': 'a name is required'})
class AdminMetricApi(Handler):
def post(self):
name = self.request.get('name')
survey = self.request.get('survey')
rubric = self.request.get('rubric')
description = self.request.get('description')
if name and survey and rub
|
write
|
identifier_name
|
main.py
|
2
ORDER BY created DESC""", public_keys, ancestor)
else:
results = Result.gql(""" WHERE keys IN :1
ORDER BY created DESC""", public_keys)
metrics = set()
answers = []
for r in results:
metrics.add(r.metric)
answers.append(r.get_answers())
if len(metrics) > 1:
raise Exception("Keys were not all from the same metric: {} {}"
.format(public_keys, metrics))
if len(answers) > 0:
return {'metric': metrics.pop(), 'answers': answers}
else:
# No results
logging.info('No answers found')
return {'metric': 'no responses yet',
'answers': []}
@classmethod
def put_result(self, keys, metric, answers, group):
if group:
parent = Group.get_group(group)
result = Result(keys=keys, metric=metric, answers_json=answers,
parent=parent)
else:
result = Result(keys=keys, metric=metric, answers_json=answers)
return result.put()
def get_answers(self):
# Some old entities don't have json-based results. Treat them as if
# they are empty. This is easier than deleting them all.
if self.answers_json:
answers = json.loads(self.answers_json)
else:
answers = {}
# Always take the precaution of hashing participant ids, if present.
if 'pid' in answers:
answers['pid'] = util.hash_participant_id(answers['pid'])
return answers
# Page Handlers and APIs
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.write(*a, **kw)
def render_str(self, template, **params):
return jinja_environment.get_template(template).render(**params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def write_json(self, obj):
self.response.headers['Content-Type'] = "text/json; charset=utf-8"
self.write(json.dumps(obj))
class MainHandler(Handler):
def get(self):
self.render('index.html')
class TakeHandler(Handler):
def get(self, name):
metric = Metric.get_by_name(name)
if metric:
self.render(name + '_survey.html', name=name)
else:
|
class CompleteHandler(Handler):
def get(self, name):
key = self.request.get('private_key', None)
group = self.request.get('group', None)
answers = []
if key is None:
# If there's no key, then this is a preview. Don't try to load any
# answers.
logging.info("No key present; rendering preview.")
else:
try:
answers = Result.get_results([key], group)['answers']
except Exception as e:
# There was some problem with the keys that were given. Just
# display the report with no answers.
logging.error('Problem with private key: {}'.format(e))
try:
metric = Metric.get_by_name(name)
except Exception as e:
logging.error('Could not find requested metric: {}'.format(e))
self.render('404.html')
return
# Render without html escaping
self.render(metric.name + '_survey_complete.html',
group=group, answers=jinja2.Markup(json.dumps(answers)))
class SampleHandler(Handler):
def get(self, name):
metric = Metric.get_by_name(name)
if metric:
sample_template = name + '_sample_results.html'
# If there's a sample report, render that.
if os.path.isfile('templates/' + sample_template):
self.render(name + '_sample_results.html', name=name)
# Some reports can render themselves as a sample if no data is
# provided. These don't have a separate sample template. Instead,
# just serve up the main report template.
else:
self.render(name + '_results.html', name=name)
else:
logging.error('Could not find requested metric')
self.render('404.html')
class ResultsHandler(Handler):
def get(self, metric=None, keys_str=''):
# Multiple keys can be specified, separated by hyphens, in which case
# multiple sets of results should be sent to the template.
keys = keys_str.split('-')
# A group may be applicable here for single-keyed results.
group = self.request.get('group') if len(keys) is 1 else None
try:
results = Result.get_results(keys, group)
except Exception as e:
# There was some problem with the keys that were given.
logging.error('{}'.format(e))
self.render('404.html')
return
template = None
answers = []
if metric:
# A specific metric was requested. Check that 1) it exists and 2)
# it matches the answers, if any. Then show that metric's results
# page.
if metric not in config.metrics:
logging.error("Unknown metric: {}".format(metric))
template = '404.html'
if len(results['answers']) > 0:
if metric != results['metric']:
logging.error("Key is from metric {}, but {} requested."
.format(results['metric'], metric))
template = '404.html'
answers = results['answers']
# If the template hasn't been set by an error check above, give the
# metric-specific results page.
template = template or metric + '_results.html'
else:
# No specific metric was given. Infer it from the answers, if any,
# otherwise show a generic no-results page.
if len(results['answers']) > 0:
metric = results['metric']
answers = results['answers']
template = metric + '_results.html'
else:
template = 'no_responses.html'
# Render without html escaping.
answers = jinja2.Markup(json.dumps(answers))
self.render(template, group=group, answers=answers)
class ShareHandler(Handler):
def get(self, name):
keypair = util.Keys().get_pair()
# Render without html escaping
metric = Metric.get_by_name(name)
self.render(
metric.name + '_share.html', name=name,
private_key=keypair['private_keys'][0],
public_key=keypair['public_keys'][0])
class CsvHandler(Handler):
"""Return a csv based on the json array passed in
for example the following is a valid request (exploded for clarity)
/csv?
filename=gotcha&
headers=["name","age"]&
data=[["jack",12],["john",42],["joe",68]]
"""
def get(self):
# Get input
data = self.request.get('data')
headers = self.request.get('headers')
filename = self.request.get('filename').encode('ascii', 'ignore')
# Convert to json
data = json.loads(data)
if headers:
headers = json.loads(headers)
# Check input
if not headers:
logging.warning('no headers sent')
if not type(headers) == 'list':
logging.warning('the headers are not a list')
if not data:
logging.warning('no data')
if not type(data) == 'list':
logging.warning('data is not a list')
if not len(data) > 0:
logging.warning('data has not length')
if not all([type(row) == 'list' for row in data]):
logging.warning('data contains members which are not lists')
# Set up headers for browser to correctly recognize the file
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '.csv"'
# write the csv to a file like string
csv_file = cStringIO.StringIO()
csv_writer = csv.writer(csv_file)
# add headers if sent
if headers:
csv_writer.writerow(headers)
# add data
for row in data:
csv_writer.writerow(row)
# Emit the files directly to HTTP response stream
self.response.out.write(csv_file.getvalue())
class AdminCreateHandler(Handler):
def get(self):
self.render('create.html')
class MetricApi(Handler):
default_rubric = """<script>
pretty_answers = JSON.stringify(mm.answers, null, 4)
$('#responses').html(pretty_answers);
</script>
<pre id='responses'></pre>
"""
default_survey = """<input name="quest"/>"""
def get(self):
name = self.request.get('name')
if name:
metric = Metric.get_by_name(name)
if metric:
self.write_json(util.to_dict(metric))
else:
default_description = "<h3>" + name + "</h3>"
self.write_json({
'survey': self.default_survey,
'rubric': self.default_rubric,
'description': default_description
})
else:
logging.error('Metric request had no name')
self.write_json({'error': 'a name is required'})
class AdminMetricApi(Handler):
def post(self):
name = self.request.get('name')
survey = self.request.get('survey')
rubric = self.request.get('rubric')
description = self.request.get('description')
if name and survey and rub
|
logging.error('Could not find requested metric')
self.render('404.html')
|
conditional_block
|
main.py
|
))
def write_json(self, obj):
self.response.headers['Content-Type'] = "text/json; charset=utf-8"
self.write(json.dumps(obj))
class MainHandler(Handler):
def get(self):
self.render('index.html')
class TakeHandler(Handler):
def get(self, name):
metric = Metric.get_by_name(name)
if metric:
self.render(name + '_survey.html', name=name)
else:
logging.error('Could not find requested metric')
self.render('404.html')
class CompleteHandler(Handler):
def get(self, name):
key = self.request.get('private_key', None)
group = self.request.get('group', None)
answers = []
if key is None:
# If there's no key, then this is a preview. Don't try to load any
# answers.
logging.info("No key present; rendering preview.")
else:
try:
answers = Result.get_results([key], group)['answers']
except Exception as e:
# There was some problem with the keys that were given. Just
# display the report with no answers.
logging.error('Problem with private key: {}'.format(e))
try:
metric = Metric.get_by_name(name)
except Exception as e:
logging.error('Could not find requested metric: {}'.format(e))
self.render('404.html')
return
# Render without html escaping
self.render(metric.name + '_survey_complete.html',
group=group, answers=jinja2.Markup(json.dumps(answers)))
class SampleHandler(Handler):
def get(self, name):
metric = Metric.get_by_name(name)
if metric:
sample_template = name + '_sample_results.html'
# If there's a sample report, render that.
if os.path.isfile('templates/' + sample_template):
self.render(name + '_sample_results.html', name=name)
# Some reports can render themselves as a sample if no data is
# provided. These don't have a separate sample template. Instead,
# just serve up the main report template.
else:
self.render(name + '_results.html', name=name)
else:
logging.error('Could not find requested metric')
self.render('404.html')
class ResultsHandler(Handler):
def get(self, metric=None, keys_str=''):
# Multiple keys can be specified, separated by hyphens, in which case
# multiple sets of results should be sent to the template.
keys = keys_str.split('-')
# A group may be applicable here for single-keyed results.
group = self.request.get('group') if len(keys) is 1 else None
try:
results = Result.get_results(keys, group)
except Exception as e:
# There was some problem with the keys that were given.
logging.error('{}'.format(e))
self.render('404.html')
return
template = None
answers = []
if metric:
# A specific metric was requested. Check that 1) it exists and 2)
# it matches the answers, if any. Then show that metric's results
# page.
if metric not in config.metrics:
logging.error("Unknown metric: {}".format(metric))
template = '404.html'
if len(results['answers']) > 0:
if metric != results['metric']:
logging.error("Key is from metric {}, but {} requested."
.format(results['metric'], metric))
template = '404.html'
answers = results['answers']
# If the template hasn't been set by an error check above, give the
# metric-specific results page.
template = template or metric + '_results.html'
else:
# No specific metric was given. Infer it from the answers, if any,
# otherwise show a generic no-results page.
if len(results['answers']) > 0:
metric = results['metric']
answers = results['answers']
template = metric + '_results.html'
else:
template = 'no_responses.html'
# Render without html escaping.
answers = jinja2.Markup(json.dumps(answers))
self.render(template, group=group, answers=answers)
class ShareHandler(Handler):
def get(self, name):
keypair = util.Keys().get_pair()
# Render without html escaping
metric = Metric.get_by_name(name)
self.render(
metric.name + '_share.html', name=name,
private_key=keypair['private_keys'][0],
public_key=keypair['public_keys'][0])
class CsvHandler(Handler):
"""Return a csv based on the json array passed in
for example the following is a valid request (exploded for clarity)
/csv?
filename=gotcha&
headers=["name","age"]&
data=[["jack",12],["john",42],["joe",68]]
"""
def get(self):
# Get input
data = self.request.get('data')
headers = self.request.get('headers')
filename = self.request.get('filename').encode('ascii', 'ignore')
# Convert to json
data = json.loads(data)
if headers:
headers = json.loads(headers)
# Check input
if not headers:
logging.warning('no headers sent')
if not type(headers) == 'list':
logging.warning('the headers are not a list')
if not data:
logging.warning('no data')
if not type(data) == 'list':
logging.warning('data is not a list')
if not len(data) > 0:
logging.warning('data has not length')
if not all([type(row) == 'list' for row in data]):
logging.warning('data contains members which are not lists')
# Set up headers for browser to correctly recognize the file
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = 'attachment; filename="' + filename + '.csv"'
# write the csv to a file like string
csv_file = cStringIO.StringIO()
csv_writer = csv.writer(csv_file)
# add headers if sent
if headers:
csv_writer.writerow(headers)
# add data
for row in data:
csv_writer.writerow(row)
# Emit the files directly to HTTP response stream
self.response.out.write(csv_file.getvalue())
class AdminCreateHandler(Handler):
def get(self):
self.render('create.html')
class MetricApi(Handler):
default_rubric = """<script>
pretty_answers = JSON.stringify(mm.answers, null, 4)
$('#responses').html(pretty_answers);
</script>
<pre id='responses'></pre>
"""
default_survey = """<input name="quest"/>"""
def get(self):
name = self.request.get('name')
if name:
metric = Metric.get_by_name(name)
if metric:
self.write_json(util.to_dict(metric))
else:
default_description = "<h3>" + name + "</h3>"
self.write_json({
'survey': self.default_survey,
'rubric': self.default_rubric,
'description': default_description
})
else:
logging.error('Metric request had no name')
self.write_json({'error': 'a name is required'})
class AdminMetricApi(Handler):
def post(self):
name = self.request.get('name')
survey = self.request.get('survey')
rubric = self.request.get('rubric')
description = self.request.get('description')
if name and survey and rubric:
Metric(
name=name, survey=survey,
rubric=rubric, description=description).put()
self.write_json({'ok': True})
else:
logging.error('Posted metric was missing name, survey, description, or rubric')
message = "a name, survey, description, and grading rubric are required"
self.write_json({'error': message})
class AdminDataHandler(Handler):
"""Return a csv of all responses"""
def get(self):
# Set up headers for browser to correctly recognize the file
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = 'attachment; filename="mm_data.csv"'
# write the csv to a file like string
csv_file = cStringIO.StringIO()
csv_writer = csv.writer(csv_file)
headers = ['created', 'metric', 'question', 'answer']
csv_writer.writerow(headers)
for result in Result.all():
for k, v in result.get_answers().items():
row = [result.created, result.metric, k, v]
csv_writer.writerow(row)
# Emit the files directly to HTTP response stream
self.response.out.write(csv_file.getvalue())
logging.info('All data downloaded by admin')
logging.info(csv_file.getvalue())
class ResultApi(Handler):
|
def get(self):
private_keys = json.loads(self.request.get('private_keys'))
group = self.request.get('group')
if private_keys:
for k in private_keys:
k.encode('ascii')
try:
response = Result.get_results(private_keys, group)
except Exception as e:
logging.error('{}'.format(e))
response = "Problem with provided keys. {}".format(e)
else:
logging.error('Requested result without a private key')
response = "a private key is required"
self.write_json(response)
def post(self):
keys = json.loads(self.request.get('keys'))
|
identifier_body
|
|
server.go
|
Response(id, result)
if err != nil {
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(resBytes)
}
}
func writeDirectlyToResponse(w http.ResponseWriter, data []byte) {
w.Write(data)
}
func isNeedCacheMethod(config *Config, rpcReqMethod string) bool {
if config.CacheAllJSONRpcMethods {
return true
}
if config.CacheJSONRpcMethodsWithBlacklist {
for _, m := range config.CacheJSONRpcMethodsBlacklist {
if m == rpcReqMethod {
return false
}
}
return true
}
return false
}
func useWorkerToProvideService(config *Config, workerIndex int, workerUri string, rpcReqMethod string, reqBody []byte) *WorkerResponse {
res := new(WorkerResponse)
res.WorkerIndex = workerIndex
res.WorkerUri = workerUri
cache1Key := workerUri
cache2Key := string(reqBody)
// because of there will not be any '^' in workerUri, so join cache1Key and cache2Key by '^'
cacheKey := cache1Key + "^" + cache2Key
if isNeedCacheMethod(config, rpcReqMethod) {
if cacheValue, ok := cache.Get(cacheKey); ok {
resultBytes := cacheValue.([]byte)
resultJSON, jsonErr := simplejson.NewJson(resultBytes)
if jsonErr == nil {
res.Result = resultBytes
res.ResultJSON = resultJSON
// TODO: digest result json and when got > 1/2 same results, just break the loop
return res
}
}
}
workerHttpRes, workerResErr := http.Post(workerUri, "application/json", bytes.NewReader(reqBody))
if workerResErr != nil {
res.Error = workerResErr
} else {
defer workerHttpRes.Body.Close()
readBytes, readErr := ioutil.ReadAll(workerHttpRes.Body)
if readErr != nil {
res.Error = readErr
} else {
res.Result = readBytes
resultJSON, jsonErr := simplejson.NewJson(readBytes)
if jsonErr == nil {
res.ResultJSON = resultJSON
// TODO: digest result json and when got > 1/2 same results, just break the loop
if isNeedCacheMethod(config, rpcReqMethod) || IsSuccessJSONRpcResponse(resultJSON) {
cacheValue := readBytes
cache.SetWithDefaultExpire(cacheKey, cacheValue)
}
}
}
}
return res
}
// send request to workers one by one. now just send to all workers
func asyncTryWorkersUntilSuccess(config *Config, workerUris []string, startWorkerIndex int, responsesChannel chan *WorkerResponse,
rpcReqMethod string, reqBody []byte) {
if len(workerUris) <= startWorkerIndex {
return
}
go func() {
res := useWorkerToProvideService(config, startWorkerIndex, workerUris[startWorkerIndex], rpcReqMethod, reqBody)
if startWorkerIndex == (len(workerUris) - 1) {
responsesChannel <- res
return
}
if res.IsValidJSONRpcResult() {
responsesChannel <- res
} else {
asyncTryWorkersUntilSuccess(config, workerUris, startWorkerIndex+1, responsesChannel, rpcReqMethod, reqBody)
}
}()
}
func selectWorkersToProvideService(config *Config, workerUris []string,
responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte)
|
var workerLoadBalanceIndex uint32 = 0
// teturns the order of workers according to the mode in the configuration
func getWorkersSequenceBySelectMode(config *Config, workerUris []string) []string {
if config.IsMostOfAllSelectMode() || config.IsFirstOfAllSelectMode() {
return workerUris
} else if config.IsOnlyFirstSelectMode() || config.IsOnlyOnceSelectMode() {
firstIdx := atomic.AddUint32(&workerLoadBalanceIndex, 1)
firstIdx = firstIdx % uint32(len(workerUris))
newSeq := []string{workerUris[firstIdx]}
beforeWorkers := workerUris[0:firstIdx]
afterWorkers := workerUris[firstIdx+1:]
newSeq = append(newSeq, beforeWorkers...)
newSeq = append(newSeq, afterWorkers...)
return newSeq
} else {
panic("not supported config select_worker_mode")
return nil
}
}
// TODO: use jsonrpcmethods whitelist if enabled
// TODO: fault handler
// TODO: rate limit
// TODO: workers health check
func StartServer(config *Config) {
if config.LogPath == "" {
config.LogPath = "./query_api_proxy.log"
}
logger, err := NewLogger(config.LogPath)
if err != nil {
panic("error happen when open log " + err.Error())
return
}
defer logger.Close()
proxyHandlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
// only support POST json-rpc now
writeErrorToJSONRpcResponse(w, 1, JSONRPC_PARSE_ERROR_CODE, "only support POST JSON-RPC now")
return
}
defer r.Body.Close()
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error())
return
}
var rpcReqId interface{} = 1
var rpcReqMethod string = ""
reqBodyJSON, err := simplejson.NewJson(reqBody)
if err == nil {
tryGetReqId, err := reqBodyJSON.Get("id").Int()
if err == nil {
rpcReqId = tryGetReqId
} else {
tryGetReqId, err := reqBodyJSON.Get("id").String()
if err == nil {
rpcReqId = tryGetReqId
}
}
method, err := reqBodyJSON.Get("method").String()
if err == nil {
rpcReqMethod = method
} else {
writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error())
return
}
}
responsesChannel := make(chan *WorkerResponse, len(config.Workers))
workerUris := getWorkersSequenceBySelectMode(config, config.Workers)
selectWorkersToProvideService(config, workerUris, responsesChannel, rpcReqMethod, reqBody)
timeout := false
breakIterWorkerResponses := false
workerResponses := make([]*WorkerResponse, 0)
for i := 0; i < len(workerUris); i++ {
if timeout {
break
}
select {
case res := <-responsesChannel:
workerResponses = append(workerResponses, res)
if config.IsOnlyOnceSelectMode() {
breakIterWorkerResponses = true
break
}
if config.IsOnlyFirstSelectMode() && res.ResultJSON != nil {
breakIterWorkerResponses = true
break
}
if !config.IsMostOfAllSelectMode() && res.ResultJSON != nil {
breakIterWorkerResponses = true
}
case <-time.After(time.Duration(config.RequestTimeoutSeconds) * time.Second):
timeout = true
}
if breakIterWorkerResponses {
break
}
}
// compare workerResponses to select most same responses
hasSomeErrorInWorkerResponses := false
if (config.IsFirstOfAllSelectMode() || config.IsMostOfAllSelectMode()) || len(workerResponses) < len(config.Workers) {
hasSomeErrorInWorkerResponses = true
}
if len(workerResponses) < 1 {
hasSomeErrorInWorkerResponses = true
}
type WorkerResponseSameGroup struct {
ResultJSON *simplejson.Json
ResultBytes []byte
Count int
}
if !config.IsMostOfAllSelectMode() && len(workerResponses) > 0 {
// find first not empty result json and final response
for _, workerRes := range workerResponses {
if workerRes.ResultJSON != nil {
writeDirectlyToResponse(w, workerRes.Result)
return
}
}
}
var sameWorkerResponseGroups = make(map[string]*WorkerResponseSameGroup, 0)
var maxCountGroup *WorkerResponseSameGroup = nil
for _, workerRes := range workerResponses {
if workerRes.ResultJSON == nil {
hasSomeErrorInWorkerResponses = true
continue
}
resultJSON
|
{
if config.IsOnlyFirstSelectMode() {
asyncTryWorkersUntilSuccess(config, workerUris, 0, responsesChannel, rpcReqMethod, reqBody)
} else {
for workerIndex, workerUri := range workerUris {
go func(workerIndex int, workerUri string) {
res := useWorkerToProvideService(config, workerIndex, workerUri, rpcReqMethod, reqBody)
responsesChannel <- res
}(workerIndex, workerUri)
if config.IsOnlyOnceSelectMode() {
break
}
}
}
}
|
identifier_body
|
server.go
|
Response(id, result)
if err != nil {
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(resBytes)
}
}
func writeDirectlyToResponse(w http.ResponseWriter, data []byte) {
w.Write(data)
}
func isNeedCacheMethod(config *Config, rpcReqMethod string) bool {
if config.CacheAllJSONRpcMethods {
return true
}
if config.CacheJSONRpcMethodsWithBlacklist {
for _, m := range config.CacheJSONRpcMethodsBlacklist {
if m == rpcReqMethod {
return false
}
}
return true
}
return false
}
func
|
(config *Config, workerIndex int, workerUri string, rpcReqMethod string, reqBody []byte) *WorkerResponse {
res := new(WorkerResponse)
res.WorkerIndex = workerIndex
res.WorkerUri = workerUri
cache1Key := workerUri
cache2Key := string(reqBody)
// because of there will not be any '^' in workerUri, so join cache1Key and cache2Key by '^'
cacheKey := cache1Key + "^" + cache2Key
if isNeedCacheMethod(config, rpcReqMethod) {
if cacheValue, ok := cache.Get(cacheKey); ok {
resultBytes := cacheValue.([]byte)
resultJSON, jsonErr := simplejson.NewJson(resultBytes)
if jsonErr == nil {
res.Result = resultBytes
res.ResultJSON = resultJSON
// TODO: digest result json and when got > 1/2 same results, just break the loop
return res
}
}
}
workerHttpRes, workerResErr := http.Post(workerUri, "application/json", bytes.NewReader(reqBody))
if workerResErr != nil {
res.Error = workerResErr
} else {
defer workerHttpRes.Body.Close()
readBytes, readErr := ioutil.ReadAll(workerHttpRes.Body)
if readErr != nil {
res.Error = readErr
} else {
res.Result = readBytes
resultJSON, jsonErr := simplejson.NewJson(readBytes)
if jsonErr == nil {
res.ResultJSON = resultJSON
// TODO: digest result json and when got > 1/2 same results, just break the loop
if isNeedCacheMethod(config, rpcReqMethod) || IsSuccessJSONRpcResponse(resultJSON) {
cacheValue := readBytes
cache.SetWithDefaultExpire(cacheKey, cacheValue)
}
}
}
}
return res
}
// send request to workers one by one. now just send to all workers
func asyncTryWorkersUntilSuccess(config *Config, workerUris []string, startWorkerIndex int, responsesChannel chan *WorkerResponse,
rpcReqMethod string, reqBody []byte) {
if len(workerUris) <= startWorkerIndex {
return
}
go func() {
res := useWorkerToProvideService(config, startWorkerIndex, workerUris[startWorkerIndex], rpcReqMethod, reqBody)
if startWorkerIndex == (len(workerUris) - 1) {
responsesChannel <- res
return
}
if res.IsValidJSONRpcResult() {
responsesChannel <- res
} else {
asyncTryWorkersUntilSuccess(config, workerUris, startWorkerIndex+1, responsesChannel, rpcReqMethod, reqBody)
}
}()
}
func selectWorkersToProvideService(config *Config, workerUris []string,
responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) {
if config.IsOnlyFirstSelectMode() {
asyncTryWorkersUntilSuccess(config, workerUris, 0, responsesChannel, rpcReqMethod, reqBody)
} else {
for workerIndex, workerUri := range workerUris {
go func(workerIndex int, workerUri string) {
res := useWorkerToProvideService(config, workerIndex, workerUri, rpcReqMethod, reqBody)
responsesChannel <- res
}(workerIndex, workerUri)
if config.IsOnlyOnceSelectMode() {
break
}
}
}
}
var workerLoadBalanceIndex uint32 = 0
// teturns the order of workers according to the mode in the configuration
func getWorkersSequenceBySelectMode(config *Config, workerUris []string) []string {
if config.IsMostOfAllSelectMode() || config.IsFirstOfAllSelectMode() {
return workerUris
} else if config.IsOnlyFirstSelectMode() || config.IsOnlyOnceSelectMode() {
firstIdx := atomic.AddUint32(&workerLoadBalanceIndex, 1)
firstIdx = firstIdx % uint32(len(workerUris))
newSeq := []string{workerUris[firstIdx]}
beforeWorkers := workerUris[0:firstIdx]
afterWorkers := workerUris[firstIdx+1:]
newSeq = append(newSeq, beforeWorkers...)
newSeq = append(newSeq, afterWorkers...)
return newSeq
} else {
panic("not supported config select_worker_mode")
return nil
}
}
// TODO: use jsonrpcmethods whitelist if enabled
// TODO: fault handler
// TODO: rate limit
// TODO: workers health check
func StartServer(config *Config) {
if config.LogPath == "" {
config.LogPath = "./query_api_proxy.log"
}
logger, err := NewLogger(config.LogPath)
if err != nil {
panic("error happen when open log " + err.Error())
return
}
defer logger.Close()
proxyHandlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
// only support POST json-rpc now
writeErrorToJSONRpcResponse(w, 1, JSONRPC_PARSE_ERROR_CODE, "only support POST JSON-RPC now")
return
}
defer r.Body.Close()
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error())
return
}
var rpcReqId interface{} = 1
var rpcReqMethod string = ""
reqBodyJSON, err := simplejson.NewJson(reqBody)
if err == nil {
tryGetReqId, err := reqBodyJSON.Get("id").Int()
if err == nil {
rpcReqId = tryGetReqId
} else {
tryGetReqId, err := reqBodyJSON.Get("id").String()
if err == nil {
rpcReqId = tryGetReqId
}
}
method, err := reqBodyJSON.Get("method").String()
if err == nil {
rpcReqMethod = method
} else {
writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error())
return
}
}
responsesChannel := make(chan *WorkerResponse, len(config.Workers))
workerUris := getWorkersSequenceBySelectMode(config, config.Workers)
selectWorkersToProvideService(config, workerUris, responsesChannel, rpcReqMethod, reqBody)
timeout := false
breakIterWorkerResponses := false
workerResponses := make([]*WorkerResponse, 0)
for i := 0; i < len(workerUris); i++ {
if timeout {
break
}
select {
case res := <-responsesChannel:
workerResponses = append(workerResponses, res)
if config.IsOnlyOnceSelectMode() {
breakIterWorkerResponses = true
break
}
if config.IsOnlyFirstSelectMode() && res.ResultJSON != nil {
breakIterWorkerResponses = true
break
}
if !config.IsMostOfAllSelectMode() && res.ResultJSON != nil {
breakIterWorkerResponses = true
}
case <-time.After(time.Duration(config.RequestTimeoutSeconds) * time.Second):
timeout = true
}
if breakIterWorkerResponses {
break
}
}
// compare workerResponses to select most same responses
hasSomeErrorInWorkerResponses := false
if (config.IsFirstOfAllSelectMode() || config.IsMostOfAllSelectMode()) || len(workerResponses) < len(config.Workers) {
hasSomeErrorInWorkerResponses = true
}
if len(workerResponses) < 1 {
hasSomeErrorInWorkerResponses = true
}
type WorkerResponseSameGroup struct {
ResultJSON *simplejson.Json
ResultBytes []byte
Count int
}
if !config.IsMostOfAllSelectMode() && len(workerResponses) > 0 {
// find first not empty result json and final response
for _, workerRes := range workerResponses {
if workerRes.ResultJSON != nil {
writeDirectlyToResponse(w, workerRes.Result)
return
}
}
}
var sameWorkerResponseGroups = make(map[string]*WorkerResponseSameGroup, 0)
var maxCountGroup *WorkerResponseSameGroup = nil
for _, workerRes := range workerResponses {
if workerRes.ResultJSON == nil {
hasSomeErrorInWorkerResponses = true
continue
}
resultJSONDigest
|
useWorkerToProvideService
|
identifier_name
|
server.go
|
"bytes"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/zoowii/query_api_proxy/cache"
"sync/atomic"
"github.com/bitly/go-simplejson"
"github.com/zoowii/betterjson"
"gopkg.in/yaml.v2"
)
func ReadConfigFromYaml(yamlConfigFilePath string) (*Config, error) {
conf := NewConfig()
yamlFile, err := ioutil.ReadFile(yamlConfigFilePath)
if err != nil {
return nil, err
}
err = yaml.Unmarshal(yamlFile, conf)
if err != nil {
return nil, err
}
return conf, nil
}
func writeErrorToJSONRpcResponse(w http.ResponseWriter, id interface{}, errorCode int, errMsg string) {
resBytes, err := MakeJSONRpcErrorResponse(id, errorCode, errMsg, nil)
if err != nil {
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(resBytes)
}
}
func writeResultToJSONRpcResponse(w http.ResponseWriter, id interface{}, result interface{}) {
resBytes, err := MakeJSONRpcSuccessResponse(id, result)
if err != nil {
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(resBytes)
}
}
func writeDirectlyToResponse(w http.ResponseWriter, data []byte) {
w.Write(data)
}
func isNeedCacheMethod(config *Config, rpcReqMethod string) bool {
if config.CacheAllJSONRpcMethods {
return true
}
if config.CacheJSONRpcMethodsWithBlacklist {
for _, m := range config.CacheJSONRpcMethodsBlacklist {
if m == rpcReqMethod {
return false
}
}
return true
}
return false
}
func useWorkerToProvideService(config *Config, workerIndex int, workerUri string, rpcReqMethod string, reqBody []byte) *WorkerResponse {
res := new(WorkerResponse)
res.WorkerIndex = workerIndex
res.WorkerUri = workerUri
cache1Key := workerUri
cache2Key := string(reqBody)
// because of there will not be any '^' in workerUri, so join cache1Key and cache2Key by '^'
cacheKey := cache1Key + "^" + cache2Key
if isNeedCacheMethod(config, rpcReqMethod) {
if cacheValue, ok := cache.Get(cacheKey); ok {
resultBytes := cacheValue.([]byte)
resultJSON, jsonErr := simplejson.NewJson(resultBytes)
if jsonErr == nil {
res.Result = resultBytes
res.ResultJSON = resultJSON
// TODO: digest result json and when got > 1/2 same results, just break the loop
return res
}
}
}
workerHttpRes, workerResErr := http.Post(workerUri, "application/json", bytes.NewReader(reqBody))
if workerResErr != nil {
res.Error = workerResErr
} else {
defer workerHttpRes.Body.Close()
readBytes, readErr := ioutil.ReadAll(workerHttpRes.Body)
if readErr != nil {
res.Error = readErr
} else {
res.Result = readBytes
resultJSON, jsonErr := simplejson.NewJson(readBytes)
if jsonErr == nil {
res.ResultJSON = resultJSON
// TODO: digest result json and when got > 1/2 same results, just break the loop
if isNeedCacheMethod(config, rpcReqMethod) || IsSuccessJSONRpcResponse(resultJSON) {
cacheValue := readBytes
cache.SetWithDefaultExpire(cacheKey, cacheValue)
}
}
}
}
return res
}
// send request to workers one by one. now just send to all workers
func asyncTryWorkersUntilSuccess(config *Config, workerUris []string, startWorkerIndex int, responsesChannel chan *WorkerResponse,
rpcReqMethod string, reqBody []byte) {
if len(workerUris) <= startWorkerIndex {
return
}
go func() {
res := useWorkerToProvideService(config, startWorkerIndex, workerUris[startWorkerIndex], rpcReqMethod, reqBody)
if startWorkerIndex == (len(workerUris) - 1) {
responsesChannel <- res
return
}
if res.IsValidJSONRpcResult() {
responsesChannel <- res
} else {
asyncTryWorkersUntilSuccess(config, workerUris, startWorkerIndex+1, responsesChannel, rpcReqMethod, reqBody)
}
}()
}
func selectWorkersToProvideService(config *Config, workerUris []string,
responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) {
if config.IsOnlyFirstSelectMode() {
asyncTryWorkersUntilSuccess(config, workerUris, 0, responsesChannel, rpcReqMethod, reqBody)
} else {
for workerIndex, workerUri := range workerUris {
go func(workerIndex int, workerUri string) {
res := useWorkerToProvideService(config, workerIndex, workerUri, rpcReqMethod, reqBody)
responsesChannel <- res
}(workerIndex, workerUri)
if config.IsOnlyOnceSelectMode() {
break
}
}
}
}
var workerLoadBalanceIndex uint32 = 0
// teturns the order of workers according to the mode in the configuration
func getWorkersSequenceBySelectMode(config *Config, workerUris []string) []string {
if config.IsMostOfAllSelectMode() || config.IsFirstOfAllSelectMode() {
return workerUris
} else if config.IsOnlyFirstSelectMode() || config.IsOnlyOnceSelectMode() {
firstIdx := atomic.AddUint32(&workerLoadBalanceIndex, 1)
firstIdx = firstIdx % uint32(len(workerUris))
newSeq := []string{workerUris[firstIdx]}
beforeWorkers := workerUris[0:firstIdx]
afterWorkers := workerUris[firstIdx+1:]
newSeq = append(newSeq, beforeWorkers...)
newSeq = append(newSeq, afterWorkers...)
return newSeq
} else {
panic("not supported config select_worker_mode")
return nil
}
}
// TODO: use jsonrpcmethods whitelist if enabled
// TODO: fault handler
// TODO: rate limit
// TODO: workers health check
func StartServer(config *Config) {
if config.LogPath == "" {
config.LogPath = "./query_api_proxy.log"
}
logger, err := NewLogger(config.LogPath)
if err != nil {
panic("error happen when open log " + err.Error())
return
}
defer logger.Close()
proxyHandlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
// only support POST json-rpc now
writeErrorToJSONRpcResponse(w, 1, JSONRPC_PARSE_ERROR_CODE, "only support POST JSON-RPC now")
return
}
defer r.Body.Close()
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error())
return
}
var rpcReqId interface{} = 1
var rpcReqMethod string = ""
reqBodyJSON, err := simplejson.NewJson(reqBody)
if err == nil {
tryGetReqId, err := reqBodyJSON.Get("id").Int()
if err == nil {
rpcReqId = tryGetReqId
} else {
tryGetReqId, err := reqBodyJSON.Get("id").String()
if err == nil {
rpcReqId = tryGetReqId
}
}
method, err := reqBodyJSON.Get("method").String()
if err == nil {
rpcReqMethod = method
} else {
writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error())
return
}
}
responsesChannel := make(chan *WorkerResponse, len(config.Workers))
workerUris := getWorkersSequenceBySelectMode(config, config.Workers)
selectWorkersToProvideService(config, workerUris, responsesChannel, rpcReqMethod, reqBody)
timeout := false
breakIterWorkerResponses := false
workerResponses := make([]*WorkerResponse, 0)
for i := 0; i < len(workerUris); i++ {
if timeout {
break
}
select {
case res := <-responsesChannel:
workerResponses = append(workerResponses, res)
if config.IsOnlyOnceSelectMode() {
breakIterWorkerResponses = true
break
}
if config.IsOnlyFirstSelectMode() && res.ResultJSON != nil {
breakIterWorkerResponses = true
break
}
if !config.IsMostOfAllSelectMode() && res.ResultJSON != nil {
breakIterWorkerResponses = true
}
case <-time.After(time.Duration(config.RequestTimeoutSeconds) * time.Second):
timeout = true
}
if breakIterWorkerResponses {
break
}
}
// compare workerResponses to select most same responses
hasSomeErrorInWorkerResponses := false
|
random_line_split
|
||
server.go
|
else {
w.Header().Set("Content-Type", "application/json")
w.Write(resBytes)
}
}
func writeResultToJSONRpcResponse(w http.ResponseWriter, id interface{}, result interface{}) {
resBytes, err := MakeJSONRpcSuccessResponse(id, result)
if err != nil {
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(resBytes)
}
}
func writeDirectlyToResponse(w http.ResponseWriter, data []byte) {
w.Write(data)
}
func isNeedCacheMethod(config *Config, rpcReqMethod string) bool {
if config.CacheAllJSONRpcMethods {
return true
}
if config.CacheJSONRpcMethodsWithBlacklist {
for _, m := range config.CacheJSONRpcMethodsBlacklist {
if m == rpcReqMethod {
return false
}
}
return true
}
return false
}
func useWorkerToProvideService(config *Config, workerIndex int, workerUri string, rpcReqMethod string, reqBody []byte) *WorkerResponse {
res := new(WorkerResponse)
res.WorkerIndex = workerIndex
res.WorkerUri = workerUri
cache1Key := workerUri
cache2Key := string(reqBody)
// because of there will not be any '^' in workerUri, so join cache1Key and cache2Key by '^'
cacheKey := cache1Key + "^" + cache2Key
if isNeedCacheMethod(config, rpcReqMethod) {
if cacheValue, ok := cache.Get(cacheKey); ok {
resultBytes := cacheValue.([]byte)
resultJSON, jsonErr := simplejson.NewJson(resultBytes)
if jsonErr == nil {
res.Result = resultBytes
res.ResultJSON = resultJSON
// TODO: digest result json and when got > 1/2 same results, just break the loop
return res
}
}
}
workerHttpRes, workerResErr := http.Post(workerUri, "application/json", bytes.NewReader(reqBody))
if workerResErr != nil {
res.Error = workerResErr
} else {
defer workerHttpRes.Body.Close()
readBytes, readErr := ioutil.ReadAll(workerHttpRes.Body)
if readErr != nil {
res.Error = readErr
} else {
res.Result = readBytes
resultJSON, jsonErr := simplejson.NewJson(readBytes)
if jsonErr == nil {
res.ResultJSON = resultJSON
// TODO: digest result json and when got > 1/2 same results, just break the loop
if isNeedCacheMethod(config, rpcReqMethod) || IsSuccessJSONRpcResponse(resultJSON) {
cacheValue := readBytes
cache.SetWithDefaultExpire(cacheKey, cacheValue)
}
}
}
}
return res
}
// send request to workers one by one. now just send to all workers
func asyncTryWorkersUntilSuccess(config *Config, workerUris []string, startWorkerIndex int, responsesChannel chan *WorkerResponse,
rpcReqMethod string, reqBody []byte) {
if len(workerUris) <= startWorkerIndex {
return
}
go func() {
res := useWorkerToProvideService(config, startWorkerIndex, workerUris[startWorkerIndex], rpcReqMethod, reqBody)
if startWorkerIndex == (len(workerUris) - 1) {
responsesChannel <- res
return
}
if res.IsValidJSONRpcResult() {
responsesChannel <- res
} else {
asyncTryWorkersUntilSuccess(config, workerUris, startWorkerIndex+1, responsesChannel, rpcReqMethod, reqBody)
}
}()
}
func selectWorkersToProvideService(config *Config, workerUris []string,
responsesChannel chan *WorkerResponse, rpcReqMethod string, reqBody []byte) {
if config.IsOnlyFirstSelectMode() {
asyncTryWorkersUntilSuccess(config, workerUris, 0, responsesChannel, rpcReqMethod, reqBody)
} else {
for workerIndex, workerUri := range workerUris {
go func(workerIndex int, workerUri string) {
res := useWorkerToProvideService(config, workerIndex, workerUri, rpcReqMethod, reqBody)
responsesChannel <- res
}(workerIndex, workerUri)
if config.IsOnlyOnceSelectMode() {
break
}
}
}
}
var workerLoadBalanceIndex uint32 = 0
// teturns the order of workers according to the mode in the configuration
func getWorkersSequenceBySelectMode(config *Config, workerUris []string) []string {
if config.IsMostOfAllSelectMode() || config.IsFirstOfAllSelectMode() {
return workerUris
} else if config.IsOnlyFirstSelectMode() || config.IsOnlyOnceSelectMode() {
firstIdx := atomic.AddUint32(&workerLoadBalanceIndex, 1)
firstIdx = firstIdx % uint32(len(workerUris))
newSeq := []string{workerUris[firstIdx]}
beforeWorkers := workerUris[0:firstIdx]
afterWorkers := workerUris[firstIdx+1:]
newSeq = append(newSeq, beforeWorkers...)
newSeq = append(newSeq, afterWorkers...)
return newSeq
} else {
panic("not supported config select_worker_mode")
return nil
}
}
// TODO: use jsonrpcmethods whitelist if enabled
// TODO: fault handler
// TODO: rate limit
// TODO: workers health check
func StartServer(config *Config) {
if config.LogPath == "" {
config.LogPath = "./query_api_proxy.log"
}
logger, err := NewLogger(config.LogPath)
if err != nil {
panic("error happen when open log " + err.Error())
return
}
defer logger.Close()
proxyHandlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
// only support POST json-rpc now
writeErrorToJSONRpcResponse(w, 1, JSONRPC_PARSE_ERROR_CODE, "only support POST JSON-RPC now")
return
}
defer r.Body.Close()
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error())
return
}
var rpcReqId interface{} = 1
var rpcReqMethod string = ""
reqBodyJSON, err := simplejson.NewJson(reqBody)
if err == nil {
tryGetReqId, err := reqBodyJSON.Get("id").Int()
if err == nil {
rpcReqId = tryGetReqId
} else {
tryGetReqId, err := reqBodyJSON.Get("id").String()
if err == nil {
rpcReqId = tryGetReqId
}
}
method, err := reqBodyJSON.Get("method").String()
if err == nil {
rpcReqMethod = method
} else {
writeErrorToJSONRpcResponse(w, 1, JSONRPC_INVALID_REQUEST_ERROR_CODE, err.Error())
return
}
}
responsesChannel := make(chan *WorkerResponse, len(config.Workers))
workerUris := getWorkersSequenceBySelectMode(config, config.Workers)
selectWorkersToProvideService(config, workerUris, responsesChannel, rpcReqMethod, reqBody)
timeout := false
breakIterWorkerResponses := false
workerResponses := make([]*WorkerResponse, 0)
for i := 0; i < len(workerUris); i++ {
if timeout {
break
}
select {
case res := <-responsesChannel:
workerResponses = append(workerResponses, res)
if config.IsOnlyOnceSelectMode() {
breakIterWorkerResponses = true
break
}
if config.IsOnlyFirstSelectMode() && res.ResultJSON != nil {
breakIterWorkerResponses = true
break
}
if !config.IsMostOfAllSelectMode() && res.ResultJSON != nil {
breakIterWorkerResponses = true
}
case <-time.After(time.Duration(config.RequestTimeoutSeconds) * time.Second):
timeout = true
}
if breakIterWorkerResponses {
break
}
}
// compare workerResponses to select most same responses
hasSomeErrorInWorkerResponses := false
if (config.IsFirstOfAllSelectMode() || config.IsMostOfAllSelectMode()) || len(workerResponses) < len(config.Workers) {
hasSomeErrorInWorkerResponses = true
}
if len(workerResponses) < 1 {
hasSomeErrorInWorkerResponses = true
}
type WorkerResponseSameGroup struct {
ResultJSON *simplejson.Json
ResultBytes []byte
Count int
}
if !config.IsMostOfAllSelectMode() && len(workerResponses) > 0 {
// find first not empty result json and final response
for _, workerRes := range workerResponses {
if workerRes.ResultJSON != nil {
writeDirectlyToResponse(w, workerRes.Result)
return
}
}
}
var sameWorkerResponseGroups = make(map[string
|
{
w.Write([]byte(err.Error()))
}
|
conditional_block
|
|
lib.rs
|
new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// // ...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self
|
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1, ..., Dn).
//
// If we fix some value k<N and rewrite indices 1, ..., N as
//
// 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) + ... + ka(k) + 1
// b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
*s %= quotient;
}
}
}
impl MulAssign<u32> for U32X4 {
fn mul
|
{
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
|
identifier_body
|
lib.rs
|
{
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// // ...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1, ..., Dn).
//
// If we fix some value k<N and rewrite indices 1, ..., N as
//
// 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) + ... + ka(k) + 1
// b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
|
Adler32
|
identifier_name
|
|
lib.rs
|
//! - `#![no_std]` support (with `default-features = false`).
#![doc(html_root_url = "https://docs.rs/adler/0.2.2")]
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_debug_implementations)]
#![forbid(unsafe_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate core as std;
use std::hash::Hasher;
use std::ops::{AddAssign, MulAssign, RemAssign};
#[cfg(feature = "std")]
use std::io::{self, BufRead};
/// Adler-32 checksum calculator.
///
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
///
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
/// that is not recommended (while every checksum is a hash, they are not necessarily good at being
/// one).
///
/// [`new`]: #method.new
/// [`from_checksum`]: #method.from_checksum
/// [`checksum`]: #method.checksum
#[derive(Debug, Copy, Clone)]
pub struct Adler32 {
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// // ...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1, ..., Dn).
//
// If we fix some value k<N and rewrite indices 1, ..., N as
//
// 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) + ... + ka(k) + 1
// b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
|
//! This implementation features:
//!
//! - Permissively licensed (0BSD) clean-room implementation.
//! - Zero dependencies.
//! - Decent performance (3-4 GB/s).
|
random_line_split
|
|
lib.rs
|
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1, ..., Dn).
//
// If we fix some value k<N and rewrite indices 1, ..., N as
//
// 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) + ... + ka(k) + 1
// b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
*s %= quotient;
}
}
}
impl MulAssign<u32> for U32X4 {
fn mul_assign(&mut self, rhs: u32) {
for s in self.0.iter_mut() {
*s *= rhs;
}
}
}
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
///
/// The passed `BufRead` implementor will be read until it reaches EOF.
///
/// If you only have a `Read` implementor, wrap it in `std::io::BufReader`.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> {
let mut h = Adler32::new();
loop {
let len = {
let buf = reader.fill_buf()?;
if buf.is_empty() {
|
return Ok(h.checksum());
}
|
conditional_block
|
|
main.rs
|
},
material::Scatter::Emit(emission) => {
// println!("Hit light!");
return emission * current_attenuation;
},
material::Scatter::Absorb => {
return Vec3::new(0.0, 0.0, 0.0)
}
}
continue;
}
let this_hemi = Disc::new(hr.p, hr.normal, 1.0);
let light = {
let chosen_light = &lights[rand_range(0, lights.len())];
let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p);
// sample from that disc
let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc);
let gx = gx_sample.0;
let sample_direction = gx_sample.1;
if gx == 0.0 {
(0.0, sample_direction)
} else {
(2.0 * std::f64::consts::PI / gx_sample.0, sample_direction)
}
};
let scatter = {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(_attenuation, scattered) => {
(hr.material.bsdf(¤t_ray, &scattered, &hr.normal),
scattered.direction())
}
material::Scatter::Emit(_emission) => {
panic!("Whaaaaa emit?!")
},
material::Scatter::Absorb => {
panic!("Whaaaaa absorb?!")
}
}
};
let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 };
let light_d = light.1;
let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 };
let scatter_d = scatter.1;
// Veach's balance heuristic for a one-sample MIS estimator
// gives these weights:
let s = light_p + scatter_p;
let light_w = light_p / s;
let scatter_w = scatter_p / s;
// println!("{} {}", light_p, scatter_p);
// the classic Veach one-sample MIS estimator is
// (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f
let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights
((light_w / light_p) * 2.0, Ray::new(hr.p, light_d))
} else if scatter_p > 0.0 {
((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d))
} else {
return Vec3::new(0.0, 0.0, 0.0);
};
let albedo = hr.material.albedo(¤t_ray, &next_values.1, &hr.normal);
current_ray = next_values.1;
current_attenuation = current_attenuation * albedo * next_values.0;
}
}
}
current_attenuation
}
//////////////////////////////////////////////////////////////////////////////
// my own bastardized version of a float file format, horrendously inefficient
fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String)
{
println!("Writing output to {}",
format!("{}.linear_rgb", file_prefix));
let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap());
let ny = image.len()/subsample;
let nx = image[0].len()/subsample;
let ns = samples_so_far as f64;
f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap();
for super_j in (0..ny).rev() {
for super_i in 0..nx {
let mut super_pixel = Vec3::zero();
let top = cmp::min(image.len(), (super_j+1)*subsample);
let right = cmp::min(image[0].len(), (super_i+1)*subsample);
let h = top - super_j*subsample;
let w = right - super_i*subsample;
for j in (super_j*subsample..top).rev() {
for i in super_i*subsample..right {
super_pixel = super_pixel + image[j][i];
}
}
let mut out_col = super_pixel / (ns * (w as f64) * (h as f64));
f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap();
}
}
}
fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>,
camera: &Camera,
bvh_world: &Hitable,
background: &Background,
lights: &Vec<AABB>,
nx: usize,
ny: usize,
rng: &mut rand::ThreadRng) {
for j in (0..ny).rev() {
for i in 0..nx {
let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64);
let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64);
let r = camera.get_ray(u, v);
output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights);
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct ImageSummaries {
w: usize,
h: usize,
s: usize,
data: Vec<Vec<Vec3>>
}
fn combine_summaries(summary1: &ImageSummaries,
summary2: &ImageSummaries) -> ImageSummaries {
if summary1.w != summary2.w {
panic!(format!("Need same widths ({} vs {})!",
summary1.w, summary2.w));
}
if summary1.h != summary2.h {
panic!(format!("Need same heights ({} vs {})!",
summary1.h, summary2.h));
}
if summary1.data.len() != summary2.data.len() {
panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?",
summary1.data.len(), summary2.data.len()));
}
let mut result = Vec::new();
for i in 0..summary1.data.len() {
let l1 = summary1.data[i].len();
let l2 = summary2.data[i].len();
if l1 != l2 {
panic!(format!(
"Inconsistent row lengths (row {}: {} {}) - upstream bug?",
i, l1, l2));
}
let row1 = summary1.data[i].iter();
let row2 = summary2.data[i].iter();
result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect())
}
ImageSummaries {
w: summary1.w,
h: summary1.h,
s: summary1.s + summary2.s,
data: result
}
}
fn write_image(args: &Args)
{
let default_output_name = "out".to_string();
let output_name = &args.o.as_ref().unwrap_or(&default_output_name);
let default_input_name = "/dev/stdin".to_string();
let input_name = &args.i.as_ref().unwrap_or(&default_input_name);
let br = BufReader::new(File::open(input_name).unwrap());
let json_value = serde_json::from_reader(br).unwrap();
let scene = deserialize_scene(&json_value).unwrap();
let background = scene.background;
let camera = scene.camera;
let lights: Vec<_> = scene.object_list
.iter()
.map(|h| h.importance_distribution())
.filter(|h| h.is_some())
.map(|h| h.unwrap())
.collect();
let bvh_world = BVH::build(scene.object_list);
let ny = args.h.unwrap_or(200);
let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize);
let n_threads
|
{
let mut current_ray = *ray;
let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0);
for _depth in 0..50 {
if current_attenuation.length() < 1e-8 {
return Vec3::new(0.0, 0.0, 0.0)
}
match world.hit(¤t_ray, 0.00001, 1e20) {
None => {
let unit_direction = vector::unit_vector(¤t_ray.direction());
return background.get_background(&unit_direction) * current_attenuation;
},
Some(hr) => {
if !hr.material.wants_importance_sampling() || lights.len() == 0 {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(next_attenuation, scattered) => {
current_attenuation = current_attenuation * next_attenuation;
current_ray = scattered;
|
identifier_body
|
|
main.rs
|
mut current_attenuation = Vec3::new(1.0, 1.0, 1.0);
for _depth in 0..50 {
if current_attenuation.length() < 1e-8 {
return Vec3::new(0.0, 0.0, 0.0)
}
match world.hit(¤t_ray, 0.00001, 1e20) {
None => {
let unit_direction = vector::unit_vector(¤t_ray.direction());
return background.get_background(&unit_direction) * current_attenuation;
},
Some(hr) => {
if !hr.material.wants_importance_sampling() || lights.len() == 0 {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(next_attenuation, scattered) => {
current_attenuation = current_attenuation * next_attenuation;
current_ray = scattered;
},
material::Scatter::Emit(emission) => {
// println!("Hit light!");
return emission * current_attenuation;
},
material::Scatter::Absorb => {
return Vec3::new(0.0, 0.0, 0.0)
}
}
continue;
}
let this_hemi = Disc::new(hr.p, hr.normal, 1.0);
let light = {
let chosen_light = &lights[rand_range(0, lights.len())];
let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p);
// sample from that disc
let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc);
let gx = gx_sample.0;
let sample_direction = gx_sample.1;
if gx == 0.0 {
(0.0, sample_direction)
} else {
(2.0 * std::f64::consts::PI / gx_sample.0, sample_direction)
}
};
let scatter = {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(_attenuation, scattered) => {
(hr.material.bsdf(¤t_ray, &scattered, &hr.normal),
scattered.direction())
}
material::Scatter::Emit(_emission) => {
panic!("Whaaaaa emit?!")
},
material::Scatter::Absorb => {
panic!("Whaaaaa absorb?!")
}
}
};
let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 };
let light_d = light.1;
let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 };
let scatter_d = scatter.1;
// Veach's balance heuristic for a one-sample MIS estimator
// gives these weights:
let s = light_p + scatter_p;
let light_w = light_p / s;
let scatter_w = scatter_p / s;
// println!("{} {}", light_p, scatter_p);
// the classic Veach one-sample MIS estimator is
// (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f
let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights
((light_w / light_p) * 2.0, Ray::new(hr.p, light_d))
} else if scatter_p > 0.0 {
((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d))
} else {
return Vec3::new(0.0, 0.0, 0.0);
};
let albedo = hr.material.albedo(¤t_ray, &next_values.1, &hr.normal);
current_ray = next_values.1;
current_attenuation = current_attenuation * albedo * next_values.0;
}
}
}
current_attenuation
}
//////////////////////////////////////////////////////////////////////////////
// my own bastardized version of a float file format, horrendously inefficient
fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String)
{
println!("Writing output to {}",
format!("{}.linear_rgb", file_prefix));
let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap());
let ny = image.len()/subsample;
let nx = image[0].len()/subsample;
let ns = samples_so_far as f64;
f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap();
for super_j in (0..ny).rev() {
for super_i in 0..nx {
let mut super_pixel = Vec3::zero();
let top = cmp::min(image.len(), (super_j+1)*subsample);
let right = cmp::min(image[0].len(), (super_i+1)*subsample);
let h = top - super_j*subsample;
let w = right - super_i*subsample;
for j in (super_j*subsample..top).rev() {
for i in super_i*subsample..right {
super_pixel = super_pixel + image[j][i];
}
}
let mut out_col = super_pixel / (ns * (w as f64) * (h as f64));
f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap();
}
}
}
fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>,
camera: &Camera,
bvh_world: &Hitable,
background: &Background,
lights: &Vec<AABB>,
nx: usize,
ny: usize,
rng: &mut rand::ThreadRng) {
for j in (0..ny).rev() {
for i in 0..nx {
let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64);
let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64);
let r = camera.get_ray(u, v);
output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights);
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct ImageSummaries {
w: usize,
h: usize,
s: usize,
data: Vec<Vec<Vec3>>
}
fn combine_summaries(summary1: &ImageSummaries,
summary2: &ImageSummaries) -> ImageSummaries {
if summary1.w != summary2.w {
panic!(format!("Need same widths ({} vs {})!",
summary1.w, summary2.w));
}
if summary1.h != summary2.h {
panic!(format!("Need same heights ({} vs {})!",
summary1.h, summary2.h));
}
if summary1.data.len() != summary2.data.len() {
panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?",
summary1.data.len(), summary2.data.len()));
}
let mut result = Vec::new();
for i in 0..summary1.data.len() {
let l1 = summary1.data[i].len();
let l2 = summary2.data[i].len();
if l1 != l2 {
panic!(format!(
"Inconsistent row lengths (row {}: {} {}) - upstream bug?",
i, l1, l2));
}
let row1 = summary1.data[i].iter();
let row2 = summary2.data[i].iter();
result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect())
}
ImageSummaries {
w: summary1.w,
h: summary1.h,
s: summary1.s + summary2.s,
data: result
}
}
fn
|
(args: &Args)
{
let default_output_name = "out".to_string();
let output_name = &args.o.as_ref().unwrap_or(&default_output_name);
let default_input_name = "/dev/stdin".to_string();
let input_name = &args.i.as_ref().unwrap_or(&default_input_name);
let br = BufReader::new(File::open(input_name).unwrap());
let json_value = serde_json::from_reader(br).unwrap();
let scene = deserialize_scene(&json_value).unwrap();
let background = scene.background;
let camera = scene.camera;
let lights: Vec<_> = scene.object_list
.iter()
.map(|h| h.importance_distribution())
.filter(|h| h.is_some())
.map(|h| h.unwrap())
.collect();
let bvh_world = BVH::build(scene.object_list);
let ny = args.h.unwrap_or(200);
let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize);
let n_threads = args.n.unwrap_or(1);
let ns
|
write_image
|
identifier_name
|
main.rs
|
mut current_attenuation = Vec3::new(1.0, 1.0, 1.0);
for _depth in 0..50 {
if current_attenuation.length() < 1e-8 {
return Vec3::new(0.0, 0.0, 0.0)
}
match world.hit(¤t_ray, 0.00001, 1e20) {
None => {
let unit_direction = vector::unit_vector(¤t_ray.direction());
return background.get_background(&unit_direction) * current_attenuation;
},
Some(hr) => {
if !hr.material.wants_importance_sampling() || lights.len() == 0 {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(next_attenuation, scattered) => {
current_attenuation = current_attenuation * next_attenuation;
current_ray = scattered;
},
material::Scatter::Emit(emission) => {
// println!("Hit light!");
return emission * current_attenuation;
},
material::Scatter::Absorb => {
return Vec3::new(0.0, 0.0, 0.0)
}
}
continue;
}
let this_hemi = Disc::new(hr.p, hr.normal, 1.0);
let light = {
let chosen_light = &lights[rand_range(0, lights.len())];
let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p);
// sample from that disc
let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc);
let gx = gx_sample.0;
let sample_direction = gx_sample.1;
if gx == 0.0 {
(0.0, sample_direction)
} else {
(2.0 * std::f64::consts::PI / gx_sample.0, sample_direction)
}
};
let scatter = {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(_attenuation, scattered) => {
(hr.material.bsdf(¤t_ray, &scattered, &hr.normal),
scattered.direction())
}
material::Scatter::Emit(_emission) => {
panic!("Whaaaaa emit?!")
},
material::Scatter::Absorb => {
panic!("Whaaaaa absorb?!")
}
}
};
let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 };
let light_d = light.1;
let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 };
let scatter_d = scatter.1;
// Veach's balance heuristic for a one-sample MIS estimator
// gives these weights:
let s = light_p + scatter_p;
let light_w = light_p / s;
let scatter_w = scatter_p / s;
// println!("{} {}", light_p, scatter_p);
// the classic Veach one-sample MIS estimator is
// (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f
let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights
((light_w / light_p) * 2.0, Ray::new(hr.p, light_d))
} else if scatter_p > 0.0 {
((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d))
} else {
return Vec3::new(0.0, 0.0, 0.0);
};
let albedo = hr.material.albedo(¤t_ray, &next_values.1, &hr.normal);
current_ray = next_values.1;
current_attenuation = current_attenuation * albedo * next_values.0;
}
}
}
current_attenuation
}
|
//////////////////////////////////////////////////////////////////////////////
// my own bastardized version of a float file format, horrendously inefficient
fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String)
{
println!("Writing output to {}",
format!("{}.linear_rgb", file_prefix));
let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap());
let ny = image.len()/subsample;
let nx = image[0].len()/subsample;
let ns = samples_so_far as f64;
f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap();
for super_j in (0..ny).rev() {
for super_i in 0..nx {
let mut super_pixel = Vec3::zero();
let top = cmp::min(image.len(), (super_j+1)*subsample);
let right = cmp::min(image[0].len(), (super_i+1)*subsample);
let h = top - super_j*subsample;
let w = right - super_i*subsample;
for j in (super_j*subsample..top).rev() {
for i in super_i*subsample..right {
super_pixel = super_pixel + image[j][i];
}
}
let mut out_col = super_pixel / (ns * (w as f64) * (h as f64));
f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap();
}
}
}
fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>,
camera: &Camera,
bvh_world: &Hitable,
background: &Background,
lights: &Vec<AABB>,
nx: usize,
ny: usize,
rng: &mut rand::ThreadRng) {
for j in (0..ny).rev() {
for i in 0..nx {
let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64);
let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64);
let r = camera.get_ray(u, v);
output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights);
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct ImageSummaries {
w: usize,
h: usize,
s: usize,
data: Vec<Vec<Vec3>>
}
fn combine_summaries(summary1: &ImageSummaries,
summary2: &ImageSummaries) -> ImageSummaries {
if summary1.w != summary2.w {
panic!(format!("Need same widths ({} vs {})!",
summary1.w, summary2.w));
}
if summary1.h != summary2.h {
panic!(format!("Need same heights ({} vs {})!",
summary1.h, summary2.h));
}
if summary1.data.len() != summary2.data.len() {
panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?",
summary1.data.len(), summary2.data.len()));
}
let mut result = Vec::new();
for i in 0..summary1.data.len() {
let l1 = summary1.data[i].len();
let l2 = summary2.data[i].len();
if l1 != l2 {
panic!(format!(
"Inconsistent row lengths (row {}: {} {}) - upstream bug?",
i, l1, l2));
}
let row1 = summary1.data[i].iter();
let row2 = summary2.data[i].iter();
result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect())
}
ImageSummaries {
w: summary1.w,
h: summary1.h,
s: summary1.s + summary2.s,
data: result
}
}
fn write_image(args: &Args)
{
let default_output_name = "out".to_string();
let output_name = &args.o.as_ref().unwrap_or(&default_output_name);
let default_input_name = "/dev/stdin".to_string();
let input_name = &args.i.as_ref().unwrap_or(&default_input_name);
let br = BufReader::new(File::open(input_name).unwrap());
let json_value = serde_json::from_reader(br).unwrap();
let scene = deserialize_scene(&json_value).unwrap();
let background = scene.background;
let camera = scene.camera;
let lights: Vec<_> = scene.object_list
.iter()
.map(|h| h.importance_distribution())
.filter(|h| h.is_some())
.map(|h| h.unwrap())
.collect();
let bvh_world = BVH::build(scene.object_list);
let ny = args.h.unwrap_or(200);
let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize);
let n_threads = args.n.unwrap_or(1);
let ns
|
random_line_split
|
|
main.rs
|
mut current_attenuation = Vec3::new(1.0, 1.0, 1.0);
for _depth in 0..50 {
if current_attenuation.length() < 1e-8 {
return Vec3::new(0.0, 0.0, 0.0)
}
match world.hit(¤t_ray, 0.00001, 1e20) {
None => {
let unit_direction = vector::unit_vector(¤t_ray.direction());
return background.get_background(&unit_direction) * current_attenuation;
},
Some(hr) => {
if !hr.material.wants_importance_sampling() || lights.len() == 0 {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(next_attenuation, scattered) => {
current_attenuation = current_attenuation * next_attenuation;
current_ray = scattered;
},
material::Scatter::Emit(emission) => {
// println!("Hit light!");
return emission * current_attenuation;
},
material::Scatter::Absorb => {
return Vec3::new(0.0, 0.0, 0.0)
}
}
continue;
}
let this_hemi = Disc::new(hr.p, hr.normal, 1.0);
let light = {
let chosen_light = &lights[rand_range(0, lights.len())];
let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p);
// sample from that disc
let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc);
let gx = gx_sample.0;
let sample_direction = gx_sample.1;
if gx == 0.0 {
(0.0, sample_direction)
} else {
(2.0 * std::f64::consts::PI / gx_sample.0, sample_direction)
}
};
let scatter = {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(_attenuation, scattered) => {
(hr.material.bsdf(¤t_ray, &scattered, &hr.normal),
scattered.direction())
}
material::Scatter::Emit(_emission) => {
panic!("Whaaaaa emit?!")
},
material::Scatter::Absorb => {
panic!("Whaaaaa absorb?!")
}
}
};
let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 };
let light_d = light.1;
let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 };
let scatter_d = scatter.1;
// Veach's balance heuristic for a one-sample MIS estimator
// gives these weights:
let s = light_p + scatter_p;
let light_w = light_p / s;
let scatter_w = scatter_p / s;
// println!("{} {}", light_p, scatter_p);
// the classic Veach one-sample MIS estimator is
// (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f
let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights
((light_w / light_p) * 2.0, Ray::new(hr.p, light_d))
} else if scatter_p > 0.0 {
((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d))
} else
|
;
let albedo = hr.material.albedo(¤t_ray, &next_values.1, &hr.normal);
current_ray = next_values.1;
current_attenuation = current_attenuation * albedo * next_values.0;
}
}
}
current_attenuation
}
//////////////////////////////////////////////////////////////////////////////
// my own bastardized version of a float file format, horrendously inefficient
fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String)
{
println!("Writing output to {}",
format!("{}.linear_rgb", file_prefix));
let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap());
let ny = image.len()/subsample;
let nx = image[0].len()/subsample;
let ns = samples_so_far as f64;
f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap();
for super_j in (0..ny).rev() {
for super_i in 0..nx {
let mut super_pixel = Vec3::zero();
let top = cmp::min(image.len(), (super_j+1)*subsample);
let right = cmp::min(image[0].len(), (super_i+1)*subsample);
let h = top - super_j*subsample;
let w = right - super_i*subsample;
for j in (super_j*subsample..top).rev() {
for i in super_i*subsample..right {
super_pixel = super_pixel + image[j][i];
}
}
let mut out_col = super_pixel / (ns * (w as f64) * (h as f64));
f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap();
}
}
}
fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>,
camera: &Camera,
bvh_world: &Hitable,
background: &Background,
lights: &Vec<AABB>,
nx: usize,
ny: usize,
rng: &mut rand::ThreadRng) {
for j in (0..ny).rev() {
for i in 0..nx {
let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64);
let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64);
let r = camera.get_ray(u, v);
output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights);
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct ImageSummaries {
w: usize,
h: usize,
s: usize,
data: Vec<Vec<Vec3>>
}
fn combine_summaries(summary1: &ImageSummaries,
summary2: &ImageSummaries) -> ImageSummaries {
if summary1.w != summary2.w {
panic!(format!("Need same widths ({} vs {})!",
summary1.w, summary2.w));
}
if summary1.h != summary2.h {
panic!(format!("Need same heights ({} vs {})!",
summary1.h, summary2.h));
}
if summary1.data.len() != summary2.data.len() {
panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?",
summary1.data.len(), summary2.data.len()));
}
let mut result = Vec::new();
for i in 0..summary1.data.len() {
let l1 = summary1.data[i].len();
let l2 = summary2.data[i].len();
if l1 != l2 {
panic!(format!(
"Inconsistent row lengths (row {}: {} {}) - upstream bug?",
i, l1, l2));
}
let row1 = summary1.data[i].iter();
let row2 = summary2.data[i].iter();
result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect())
}
ImageSummaries {
w: summary1.w,
h: summary1.h,
s: summary1.s + summary2.s,
data: result
}
}
fn write_image(args: &Args)
{
let default_output_name = "out".to_string();
let output_name = &args.o.as_ref().unwrap_or(&default_output_name);
let default_input_name = "/dev/stdin".to_string();
let input_name = &args.i.as_ref().unwrap_or(&default_input_name);
let br = BufReader::new(File::open(input_name).unwrap());
let json_value = serde_json::from_reader(br).unwrap();
let scene = deserialize_scene(&json_value).unwrap();
let background = scene.background;
let camera = scene.camera;
let lights: Vec<_> = scene.object_list
.iter()
.map(|h| h.importance_distribution())
.filter(|h| h.is_some())
.map(|h| h.unwrap())
.collect();
let bvh_world = BVH::build(scene.object_list);
let ny = args.h.unwrap_or(200);
let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize);
let n_threads = args.n.unwrap_or(1);
let
|
{
return Vec3::new(0.0, 0.0, 0.0);
}
|
conditional_block
|
mod.rs
|
/// A user-defined function is really a closure, it has a scope
/// where it is defined and a body of items.
UserDefined(ScopeRef, Vec<sass::Item>),
}
impl PartialOrd for FuncImpl {
fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
match (self, rhs) {
(&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None,
(&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => {
Some(cmp::Ordering::Less)
}
(&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => {
Some(cmp::Ordering::Greater)
}
(
&FuncImpl::UserDefined(ref _sa, ref a),
&FuncImpl::UserDefined(ref _sb, ref b),
) => a.partial_cmp(b),
}
}
}
impl cmp::PartialEq for FuncImpl {
fn eq(&self, rhs: &FuncImpl) -> bool {
match (self, rhs) {
(
&FuncImpl::UserDefined(ref sa, ref a),
&FuncImpl::UserDefined(ref sb, ref b),
) => ScopeRef::is_same(sa, sb) && a == b,
(&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => {
// Each builtin function is only created once, so this
// should be ok.
#[allow(clippy::vtable_address_comparisons)]
Arc::ptr_eq(a, b)
}
_ => false,
}
}
}
impl cmp::Eq for FuncImpl {}
impl fmt::Debug for FuncImpl {
fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
FuncImpl::Builtin(_) => write!(out, "(builtin function)"),
FuncImpl::UserDefined(..) => {
write!(out, "(user-defined function)")
}
}
}
}
trait Functions {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
);
}
impl Functions for Scope {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin(&self.get_name(), &name, args, body);
self.define_function(name, f);
}
}
impl Function {
/// Get a built-in function by name.
pub fn get_builtin(name: &Name) -> Option<&'static Function> {
FUNCTIONS.get(name)
}
/// Create a new `Function` from a rust implementation.
///
/// Note: This does not expose the function in any scope, it just
/// creates it.
pub fn builtin(
module: &str,
name: &Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) -> Self {
let pos = SourcePos::mock_function(name, &args, module);
Function {
args,
pos,
body: FuncImpl::Builtin(body),
}
}
/// Create a new `Function` from a scss implementation.
///
/// The scope is where the function is defined, used to bind any
/// non-parameter names in the body.
pub fn
|
(
args: FormalArgs,
pos: SourcePos,
scope: ScopeRef,
body: Vec<sass::Item>,
) -> Self {
Function {
args,
pos,
body: FuncImpl::UserDefined(scope, body),
}
}
/// Call the function from a given scope and with a given set of
/// arguments.
pub fn call(
&self,
callscope: ScopeRef,
args: CallArgs,
) -> Result<Value, Error> {
let cs = "%%CALLING_SCOPE%%";
match self.body {
FuncImpl::Builtin(ref body) => {
let s = self.do_eval_args(
ScopeRef::new_global(callscope.get_format()),
args,
)?;
s.define_module(cs.into(), callscope);
body(&s)
}
FuncImpl::UserDefined(ref defscope, ref body) => {
let s = self.do_eval_args(defscope.clone(), args)?;
s.define_module(cs.into(), callscope);
Ok(s.eval_body(body)?.unwrap_or(Value::Null))
}
}
.map(Value::into_calculated)
}
fn do_eval_args(
&self,
def: ScopeRef,
args: CallArgs,
) -> Result<ScopeRef, Error> {
self.args.eval(def, args).map_err(|e| match e {
sass::ArgsError::Eval(e) => e,
ae => Error::BadArguments(ae.to_string(), self.pos.clone()),
})
}
}
lazy_static! {
static ref MODULES: BTreeMap<&'static str, Scope> = {
let mut modules = BTreeMap::new();
modules.insert("sass:color", color::create_module());
modules.insert("sass:list", list::create_module());
modules.insert("sass:map", map::create_module());
modules.insert("sass:math", math::create_module());
modules.insert("sass:meta", meta::create_module());
modules.insert("sass:selector", selector::create_module());
modules.insert("sass:string", string::create_module());
modules
};
}
/// Get a global module (e.g. `sass:math`) by name.
pub fn get_global_module(name: &str) -> Option<ScopeRef> {
MODULES.get(name).map(ScopeRef::Builtin)
}
type FunctionMap = BTreeMap<Name, Function>;
impl Functions for FunctionMap {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin("", &name, args, body);
self.insert(name, f);
}
}
lazy_static! {
static ref FUNCTIONS: FunctionMap = {
let mut f = BTreeMap::new();
def!(f, if(condition, if_true, if_false), |s| {
if s.get("condition")?.is_true() {
Ok(s.get("if_true")?)
} else {
Ok(s.get("if_false")?)
}
});
color::expose(MODULES.get("sass:color").unwrap(), &mut f);
list::expose(MODULES.get("sass:list").unwrap(), &mut f);
map::expose(MODULES.get("sass:map").unwrap(), &mut f);
math::expose(MODULES.get("sass:math").unwrap(), &mut f);
meta::expose(MODULES.get("sass:meta").unwrap(), &mut f);
selector::expose(MODULES.get("sass:selector").unwrap(), &mut f);
string::expose(MODULES.get("sass:string").unwrap(), &mut f);
f
};
}
// argument helpers for the actual functions
trait CheckedArg<T> {
fn named(self, name: Name) -> Result<T, Error>;
}
impl<T> CheckedArg<T> for Result<T, String> {
fn named(self, name: Name) -> Result<T, Error> {
self.map_err(|e| Error::BadArgument(name, e))
}
}
fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error>
where
F: Fn(Value) -> Result<T, String>,
{
check(s.get(name.as_ref())?).named(name)
}
fn get_opt_check<T, F>(
s: &Scope,
name: Name,
check: F,
) -> Result<Option<T>, Error>
where
F: Fn(Value) -> Result<T, String>,
{
match s.get(name.as_ref())? {
Value::Null => Ok(None),
v => check(v).named(name).map(Some),
}
}
fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> {
get_checked(s, name.into(), check::numeric)
}
fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> {
get_checked(s, name, check::unitless_int)
}
fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> {
get_checked(s, name.into(), check::string)
}
fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> {
get_checked(s, name, check::va_list)
}
fn expected_to<'a, T>(value: &'a T, cond: &str) -> String
where
Formatted<'a, T>: std::fmt::Display,
{
format!(
"Expected {} to {}.",
Formatted {
value,
format: Format::introspect()
},
cond,
)
}
mod check {
use super::{expected_to, is_not};
use crate::css::{CssString, Value};
use crate::value::{ListSeparator, Number, Numeric};
pub fn numeric(v: Value) -> Result<Numeric, String> {
v.numeric_value().map_err(|v| is_not(&v, "a number"))
}
pub fn int(v: Value) ->
|
closure
|
identifier_name
|
mod.rs
|
/// A user-defined function is really a closure, it has a scope
/// where it is defined and a body of items.
UserDefined(ScopeRef, Vec<sass::Item>),
}
impl PartialOrd for FuncImpl {
fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
match (self, rhs) {
(&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None,
(&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => {
Some(cmp::Ordering::Less)
}
(&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => {
Some(cmp::Ordering::Greater)
}
(
&FuncImpl::UserDefined(ref _sa, ref a),
&FuncImpl::UserDefined(ref _sb, ref b),
) => a.partial_cmp(b),
}
}
}
impl cmp::PartialEq for FuncImpl {
fn eq(&self, rhs: &FuncImpl) -> bool {
match (self, rhs) {
(
&FuncImpl::UserDefined(ref sa, ref a),
&FuncImpl::UserDefined(ref sb, ref b),
) => ScopeRef::is_same(sa, sb) && a == b,
(&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => {
// Each builtin function is only created once, so this
// should be ok.
#[allow(clippy::vtable_address_comparisons)]
Arc::ptr_eq(a, b)
}
_ => false,
}
}
}
impl cmp::Eq for FuncImpl {}
impl fmt::Debug for FuncImpl {
fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
FuncImpl::Builtin(_) => write!(out, "(builtin function)"),
FuncImpl::UserDefined(..) => {
write!(out, "(user-defined function)")
}
}
}
}
trait Functions {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
);
}
impl Functions for Scope {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin(&self.get_name(), &name, args, body);
self.define_function(name, f);
}
}
impl Function {
/// Get a built-in function by name.
pub fn get_builtin(name: &Name) -> Option<&'static Function> {
FUNCTIONS.get(name)
}
/// Create a new `Function` from a rust implementation.
///
/// Note: This does not expose the function in any scope, it just
/// creates it.
pub fn builtin(
module: &str,
name: &Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) -> Self {
let pos = SourcePos::mock_function(name, &args, module);
Function {
args,
pos,
body: FuncImpl::Builtin(body),
}
}
/// Create a new `Function` from a scss implementation.
///
/// The scope is where the function is defined, used to bind any
/// non-parameter names in the body.
pub fn closure(
args: FormalArgs,
pos: SourcePos,
scope: ScopeRef,
body: Vec<sass::Item>,
) -> Self {
Function {
args,
pos,
body: FuncImpl::UserDefined(scope, body),
}
}
/// Call the function from a given scope and with a given set of
/// arguments.
pub fn call(
&self,
callscope: ScopeRef,
args: CallArgs,
) -> Result<Value, Error> {
let cs = "%%CALLING_SCOPE%%";
match self.body {
FuncImpl::Builtin(ref body) => {
let s = self.do_eval_args(
ScopeRef::new_global(callscope.get_format()),
args,
)?;
s.define_module(cs.into(), callscope);
body(&s)
}
FuncImpl::UserDefined(ref defscope, ref body) => {
let s = self.do_eval_args(defscope.clone(), args)?;
s.define_module(cs.into(), callscope);
Ok(s.eval_body(body)?.unwrap_or(Value::Null))
}
}
.map(Value::into_calculated)
}
fn do_eval_args(
&self,
def: ScopeRef,
args: CallArgs,
) -> Result<ScopeRef, Error> {
self.args.eval(def, args).map_err(|e| match e {
sass::ArgsError::Eval(e) => e,
ae => Error::BadArguments(ae.to_string(), self.pos.clone()),
})
}
}
lazy_static! {
static ref MODULES: BTreeMap<&'static str, Scope> = {
let mut modules = BTreeMap::new();
modules.insert("sass:color", color::create_module());
modules.insert("sass:list", list::create_module());
modules.insert("sass:map", map::create_module());
modules.insert("sass:math", math::create_module());
modules.insert("sass:meta", meta::create_module());
modules.insert("sass:selector", selector::create_module());
modules.insert("sass:string", string::create_module());
modules
};
}
/// Get a global module (e.g. `sass:math`) by name.
pub fn get_global_module(name: &str) -> Option<ScopeRef> {
MODULES.get(name).map(ScopeRef::Builtin)
}
type FunctionMap = BTreeMap<Name, Function>;
impl Functions for FunctionMap {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin("", &name, args, body);
self.insert(name, f);
}
}
lazy_static! {
static ref FUNCTIONS: FunctionMap = {
let mut f = BTreeMap::new();
def!(f, if(condition, if_true, if_false), |s| {
if s.get("condition")?.is_true() {
Ok(s.get("if_true")?)
} else {
Ok(s.get("if_false")?)
}
});
color::expose(MODULES.get("sass:color").unwrap(), &mut f);
list::expose(MODULES.get("sass:list").unwrap(), &mut f);
map::expose(MODULES.get("sass:map").unwrap(), &mut f);
math::expose(MODULES.get("sass:math").unwrap(), &mut f);
meta::expose(MODULES.get("sass:meta").unwrap(), &mut f);
selector::expose(MODULES.get("sass:selector").unwrap(), &mut f);
string::expose(MODULES.get("sass:string").unwrap(), &mut f);
f
};
}
// argument helpers for the actual functions
trait CheckedArg<T> {
fn named(self, name: Name) -> Result<T, Error>;
}
impl<T> CheckedArg<T> for Result<T, String> {
fn named(self, name: Name) -> Result<T, Error> {
self.map_err(|e| Error::BadArgument(name, e))
}
}
fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error>
where
F: Fn(Value) -> Result<T, String>,
{
check(s.get(name.as_ref())?).named(name)
}
fn get_opt_check<T, F>(
s: &Scope,
name: Name,
check: F,
) -> Result<Option<T>, Error>
where
F: Fn(Value) -> Result<T, String>,
{
match s.get(name.as_ref())? {
Value::Null => Ok(None),
v => check(v).named(name).map(Some),
}
}
fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> {
get_checked(s, name.into(), check::numeric)
}
fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> {
get_checked(s, name, check::unitless_int)
}
fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> {
|
fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> {
get_checked(s, name, check::va_list)
}
fn expected_to<'a, T>(value: &'a T, cond: &str) -> String
where
Formatted<'a, T>: std::fmt::Display,
{
format!(
"Expected {} to {}.",
Formatted {
value,
format: Format::introspect()
},
cond,
)
}
mod check {
use super::{expected_to, is_not};
use crate::css::{CssString, Value};
use crate::value::{ListSeparator, Number, Numeric};
pub fn numeric(v: Value) -> Result<Numeric, String> {
v.numeric_value().map_err(|v| is_not(&v, "a number"))
}
pub fn int(v: Value) -> Result
|
get_checked(s, name.into(), check::string)
}
|
random_line_split
|
mod.rs
|
/// A user-defined function is really a closure, it has a scope
/// where it is defined and a body of items.
UserDefined(ScopeRef, Vec<sass::Item>),
}
impl PartialOrd for FuncImpl {
fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
match (self, rhs) {
(&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None,
(&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => {
Some(cmp::Ordering::Less)
}
(&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => {
Some(cmp::Ordering::Greater)
}
(
&FuncImpl::UserDefined(ref _sa, ref a),
&FuncImpl::UserDefined(ref _sb, ref b),
) => a.partial_cmp(b),
}
}
}
impl cmp::PartialEq for FuncImpl {
fn eq(&self, rhs: &FuncImpl) -> bool {
match (self, rhs) {
(
&FuncImpl::UserDefined(ref sa, ref a),
&FuncImpl::UserDefined(ref sb, ref b),
) => ScopeRef::is_same(sa, sb) && a == b,
(&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => {
// Each builtin function is only created once, so this
// should be ok.
#[allow(clippy::vtable_address_comparisons)]
Arc::ptr_eq(a, b)
}
_ => false,
}
}
}
impl cmp::Eq for FuncImpl {}
impl fmt::Debug for FuncImpl {
fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
FuncImpl::Builtin(_) => write!(out, "(builtin function)"),
FuncImpl::UserDefined(..) => {
write!(out, "(user-defined function)")
}
}
}
}
trait Functions {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
);
}
impl Functions for Scope {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin(&self.get_name(), &name, args, body);
self.define_function(name, f);
}
}
impl Function {
/// Get a built-in function by name.
pub fn get_builtin(name: &Name) -> Option<&'static Function> {
FUNCTIONS.get(name)
}
/// Create a new `Function` from a rust implementation.
///
/// Note: This does not expose the function in any scope, it just
/// creates it.
pub fn builtin(
module: &str,
name: &Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) -> Self {
let pos = SourcePos::mock_function(name, &args, module);
Function {
args,
pos,
body: FuncImpl::Builtin(body),
}
}
/// Create a new `Function` from a scss implementation.
///
/// The scope is where the function is defined, used to bind any
/// non-parameter names in the body.
pub fn closure(
args: FormalArgs,
pos: SourcePos,
scope: ScopeRef,
body: Vec<sass::Item>,
) -> Self
|
/// Call the function from a given scope and with a given set of
/// arguments.
pub fn call(
&self,
callscope: ScopeRef,
args: CallArgs,
) -> Result<Value, Error> {
let cs = "%%CALLING_SCOPE%%";
match self.body {
FuncImpl::Builtin(ref body) => {
let s = self.do_eval_args(
ScopeRef::new_global(callscope.get_format()),
args,
)?;
s.define_module(cs.into(), callscope);
body(&s)
}
FuncImpl::UserDefined(ref defscope, ref body) => {
let s = self.do_eval_args(defscope.clone(), args)?;
s.define_module(cs.into(), callscope);
Ok(s.eval_body(body)?.unwrap_or(Value::Null))
}
}
.map(Value::into_calculated)
}
fn do_eval_args(
&self,
def: ScopeRef,
args: CallArgs,
) -> Result<ScopeRef, Error> {
self.args.eval(def, args).map_err(|e| match e {
sass::ArgsError::Eval(e) => e,
ae => Error::BadArguments(ae.to_string(), self.pos.clone()),
})
}
}
lazy_static! {
static ref MODULES: BTreeMap<&'static str, Scope> = {
let mut modules = BTreeMap::new();
modules.insert("sass:color", color::create_module());
modules.insert("sass:list", list::create_module());
modules.insert("sass:map", map::create_module());
modules.insert("sass:math", math::create_module());
modules.insert("sass:meta", meta::create_module());
modules.insert("sass:selector", selector::create_module());
modules.insert("sass:string", string::create_module());
modules
};
}
/// Get a global module (e.g. `sass:math`) by name.
pub fn get_global_module(name: &str) -> Option<ScopeRef> {
MODULES.get(name).map(ScopeRef::Builtin)
}
type FunctionMap = BTreeMap<Name, Function>;
impl Functions for FunctionMap {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin("", &name, args, body);
self.insert(name, f);
}
}
lazy_static! {
static ref FUNCTIONS: FunctionMap = {
let mut f = BTreeMap::new();
def!(f, if(condition, if_true, if_false), |s| {
if s.get("condition")?.is_true() {
Ok(s.get("if_true")?)
} else {
Ok(s.get("if_false")?)
}
});
color::expose(MODULES.get("sass:color").unwrap(), &mut f);
list::expose(MODULES.get("sass:list").unwrap(), &mut f);
map::expose(MODULES.get("sass:map").unwrap(), &mut f);
math::expose(MODULES.get("sass:math").unwrap(), &mut f);
meta::expose(MODULES.get("sass:meta").unwrap(), &mut f);
selector::expose(MODULES.get("sass:selector").unwrap(), &mut f);
string::expose(MODULES.get("sass:string").unwrap(), &mut f);
f
};
}
// argument helpers for the actual functions
trait CheckedArg<T> {
fn named(self, name: Name) -> Result<T, Error>;
}
impl<T> CheckedArg<T> for Result<T, String> {
fn named(self, name: Name) -> Result<T, Error> {
self.map_err(|e| Error::BadArgument(name, e))
}
}
fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error>
where
F: Fn(Value) -> Result<T, String>,
{
check(s.get(name.as_ref())?).named(name)
}
fn get_opt_check<T, F>(
s: &Scope,
name: Name,
check: F,
) -> Result<Option<T>, Error>
where
F: Fn(Value) -> Result<T, String>,
{
match s.get(name.as_ref())? {
Value::Null => Ok(None),
v => check(v).named(name).map(Some),
}
}
fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> {
get_checked(s, name.into(), check::numeric)
}
fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> {
get_checked(s, name, check::unitless_int)
}
fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> {
get_checked(s, name.into(), check::string)
}
fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> {
get_checked(s, name, check::va_list)
}
fn expected_to<'a, T>(value: &'a T, cond: &str) -> String
where
Formatted<'a, T>: std::fmt::Display,
{
format!(
"Expected {} to {}.",
Formatted {
value,
format: Format::introspect()
},
cond,
)
}
mod check {
use super::{expected_to, is_not};
use crate::css::{CssString, Value};
use crate::value::{ListSeparator, Number, Numeric};
pub fn numeric(v: Value) -> Result<Numeric, String> {
v.numeric_value().map_err(|v| is_not(&v, "a number"))
}
pub fn int(v: Value)
|
{
Function {
args,
pos,
body: FuncImpl::UserDefined(scope, body),
}
}
|
identifier_body
|
tic_tac_toe.py
|
8]]) == {player}
d2_win = set(self.data[[2, 4, 6]]) == {player}
if any([row_win, col_win, d1_win, d2_win]):
return ("win", player)
if self.counter["_"] == 0:
return ("tie", None)
else:
return ("turn", "1" if self.counter["1"] == self.counter["2"] else "2")
def find_next_states(self):
"""Determine possible next moves. Returns a dict {index: new_state}"""
status, player = self.game_status
moves = {}
if status == "turn":
for idx in np.where(self.data == "_")[0]:
new_move = self.data.copy()
new_move[idx] = player
moves[idx] = "".join(new_move)
return moves
def printable_board(self, indent_char="\t", legend_hint=True, symbols=None):
|
)
else:
return "\n".join([indent_char + " ".join(row) for row in board_symbols])
def gen_game_tree(state_init):
"""Generate full game tree from initial state"""
current_path = [state_init]
game_tree = {}
while current_path:
cur_state = current_path[-1]
if cur_state not in game_tree:
ttt = TicTacToe(cur_state)
game_tree[cur_state] = {
"unexplored": ttt.next_states,
"explored": [],
"status": ttt.game_status,
}
if game_tree[cur_state]["unexplored"]:
current_path.append(game_tree[cur_state]["unexplored"].pop(0))
else:
explored = current_path.pop(-1)
if explored != state_init:
game_tree[current_path[-1]]["explored"].append(explored)
status, player = game_tree[cur_state]["status"]
if status == "tie":
value = 0
outcomes = {0: 1}
elif status == "win":
value = -1 if player == "1" else 1
outcomes = {value: 1}
else:
value = (min if player == "1" else max)(
[
game_tree[state]["value"]
for state in game_tree[cur_state]["explored"]
]
)
outcomes = {}
for state in game_tree[cur_state]["explored"]:
for res, res_ct in game_tree[state]["outcomes"].items():
outcomes[res] = outcomes.get(res, 0) + res_ct
game_tree[cur_state]["value"] = value
game_tree[cur_state]["outcomes"] = outcomes
return game_tree
def answer_exercise():
"""Function to answer exercise in Chapter 2 section III"""
state = "1212__21_"
ttt = TicTacToe(state)
game_tree = gen_game_tree(state)
print(
f"The value of game state:\n{ttt.printable_board(' ', legend_hint=False)}\n"
f"is {game_tree[state]['value']}"
)
return game_tree[state]["value"]
def learn(state="_________"):
"""Build game tree and export given initial state"""
game_tree = gen_game_tree(state)
with open(GAME_TREE_FILE, "w") as gt_file:
json.dump(game_tree, gt_file, indent=4)
def human(gstate: TicTacToe, *args):
"""Function for a human player"""
return input_with_validation("Please enter move.", list(gstate.next_moves.keys()))
def ai_w_level(gstate: TicTacToe, game_tree, level=3):
"""AI with levels
Level Descriptions
* 0 = stupid
* 1 = easy
* 2 = medium
* 3 = hard
* 4 = unfair
"""
assert isinstance(level, int), "`level` must be `int`"
assert 0 <= level <= 4, "level values must be from 0 to 4"
seed = random.random()
logging.debug(f"seed value: {seed:.3f}")
if level == 0:
ai_func = ai_derp
elif level == 1:
ai_func = ai_derp if seed <= 0.3 else ai_strategy1
elif level == 2:
ai_func = ai_derp if seed <= 0.2 else ai_strategy2
elif level == 3:
ai_func = ai_derp if seed <= 0.1 else ai_strategy3
elif level == 4:
ai_func = ai_strategy3
return ai_func(gstate, game_tree)
def ai_derp(gstate: TicTacToe, *args):
"""AI that randomly picks the next move"""
return random.choice(list(gstate.next_moves.keys()))
def ai_strategy1(gstate: TicTacToe, game_tree):
"""Strategy assuming opponent plays optimally"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
mod = -1 if player == "1" else 1
next_move_vals = {
idx: mod * game_tree[state]["value"] for idx, state in gstate.next_moves.items()
}
max_val = max(next_move_vals.values())
moves = [idx for idx, val in next_move_vals.items() if val == max_val]
logging.debug("moves: %s; value: %i", moves, max_val)
move = random.choice(moves)
return move
def ai_strategy2(gstate: TicTacToe, game_tree):
"""Strategy maximizing the number of paths to winning end states"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
win, lose = (-1, 1) if player == "1" else (1, -1)
next_move_vals = {
idx: win * game_tree[state]["value"] for idx, state in gstate.next_moves.items()
}
max_val = max(next_move_vals.values())
moves = {
idx: (
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(win), 0),
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(0), 0),
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(lose), 0)
)
for idx, val in next_move_vals.items() if val == max_val
}
win_ct = {idx: vals[0] for idx, vals in moves.items()}
win_pct = {idx: vals[0] / sum(vals) for idx, vals in moves.items()}
lose_pct = {idx: vals[2] / sum(vals) for idx, vals in moves.items()}
wl_ratio = {idx: vals[0] / max(vals[2], 0.5)
for idx, vals in moves.items()}
# criteria, agg_func = lose_pct, min
# criteria, agg_func = win_pct, max
criteria, agg_func = wl_ratio, max
if max_val == 1 and 1 in win_ct.values():
move = [idx for idx, val in win_ct.items() if val == 1][0]
else:
move = random.choice(
[idx for idx, val in criteria.items() if val ==
agg_func(criteria.values())]
)
logging.debug("move: %i; value: %i, win paths %%: %.1f%%, lose paths %%: %.1f%%, moves: %s\n",
move, max_val, win_pct[move] * 100, lose_pct[move] * 100, moves)
# trash talk
if max_val == 1:
print(
"*beep* *boop* *beep*"
" -=[ I calculate chances of winning to be 100% ]=- "
"*beep* *boop* *beep*"
)
return move
def ai_strategy3(gstate: TicTacToe, game_tree):
"""AI strategy that maximizes the opponent's losing moves in the next turn"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
win, lose = (-1, 1) if player == "1
|
"""Returns a string representing the game board for printing"""
symbols = symbols or self.symbols
assert len(symbols) == 2, "`symbols` must have exactly 2 elements"
data_symbols = self.data.copy()
for orig, new in zip(("1", "2"), symbols):
data_symbols[data_symbols == orig] = new
board_symbols = data_symbols.reshape((3, 3))
if legend_hint:
legend_board = np.where(
self.data == "_", range(9), " ").reshape((3, 3))
return "\n".join(
[indent_char + "GAME | INDEX"]
+ [indent_char + "===== | ====="]
+ [
indent_char + " ".join(b_row) + " | " + " ".join(l_row)
for b_row, l_row in zip(board_symbols, legend_board)
]
|
identifier_body
|
tic_tac_toe.py
|
8]]) == {player}
d2_win = set(self.data[[2, 4, 6]]) == {player}
if any([row_win, col_win, d1_win, d2_win]):
return ("win", player)
if self.counter["_"] == 0:
return ("tie", None)
else:
return ("turn", "1" if self.counter["1"] == self.counter["2"] else "2")
def
|
(self):
"""Determine possible next moves. Returns a dict {index: new_state}"""
status, player = self.game_status
moves = {}
if status == "turn":
for idx in np.where(self.data == "_")[0]:
new_move = self.data.copy()
new_move[idx] = player
moves[idx] = "".join(new_move)
return moves
def printable_board(self, indent_char="\t", legend_hint=True, symbols=None):
"""Returns a string representing the game board for printing"""
symbols = symbols or self.symbols
assert len(symbols) == 2, "`symbols` must have exactly 2 elements"
data_symbols = self.data.copy()
for orig, new in zip(("1", "2"), symbols):
data_symbols[data_symbols == orig] = new
board_symbols = data_symbols.reshape((3, 3))
if legend_hint:
legend_board = np.where(
self.data == "_", range(9), " ").reshape((3, 3))
return "\n".join(
[indent_char + "GAME | INDEX"]
+ [indent_char + "===== | ====="]
+ [
indent_char + " ".join(b_row) + " | " + " ".join(l_row)
for b_row, l_row in zip(board_symbols, legend_board)
]
)
else:
return "\n".join([indent_char + " ".join(row) for row in board_symbols])
def gen_game_tree(state_init):
"""Generate full game tree from initial state"""
current_path = [state_init]
game_tree = {}
while current_path:
cur_state = current_path[-1]
if cur_state not in game_tree:
ttt = TicTacToe(cur_state)
game_tree[cur_state] = {
"unexplored": ttt.next_states,
"explored": [],
"status": ttt.game_status,
}
if game_tree[cur_state]["unexplored"]:
current_path.append(game_tree[cur_state]["unexplored"].pop(0))
else:
explored = current_path.pop(-1)
if explored != state_init:
game_tree[current_path[-1]]["explored"].append(explored)
status, player = game_tree[cur_state]["status"]
if status == "tie":
value = 0
outcomes = {0: 1}
elif status == "win":
value = -1 if player == "1" else 1
outcomes = {value: 1}
else:
value = (min if player == "1" else max)(
[
game_tree[state]["value"]
for state in game_tree[cur_state]["explored"]
]
)
outcomes = {}
for state in game_tree[cur_state]["explored"]:
for res, res_ct in game_tree[state]["outcomes"].items():
outcomes[res] = outcomes.get(res, 0) + res_ct
game_tree[cur_state]["value"] = value
game_tree[cur_state]["outcomes"] = outcomes
return game_tree
def answer_exercise():
"""Function to answer exercise in Chapter 2 section III"""
state = "1212__21_"
ttt = TicTacToe(state)
game_tree = gen_game_tree(state)
print(
f"The value of game state:\n{ttt.printable_board(' ', legend_hint=False)}\n"
f"is {game_tree[state]['value']}"
)
return game_tree[state]["value"]
def learn(state="_________"):
"""Build game tree and export given initial state"""
game_tree = gen_game_tree(state)
with open(GAME_TREE_FILE, "w") as gt_file:
json.dump(game_tree, gt_file, indent=4)
def human(gstate: TicTacToe, *args):
"""Function for a human player"""
return input_with_validation("Please enter move.", list(gstate.next_moves.keys()))
def ai_w_level(gstate: TicTacToe, game_tree, level=3):
"""AI with levels
Level Descriptions
* 0 = stupid
* 1 = easy
* 2 = medium
* 3 = hard
* 4 = unfair
"""
assert isinstance(level, int), "`level` must be `int`"
assert 0 <= level <= 4, "level values must be from 0 to 4"
seed = random.random()
logging.debug(f"seed value: {seed:.3f}")
if level == 0:
ai_func = ai_derp
elif level == 1:
ai_func = ai_derp if seed <= 0.3 else ai_strategy1
elif level == 2:
ai_func = ai_derp if seed <= 0.2 else ai_strategy2
elif level == 3:
ai_func = ai_derp if seed <= 0.1 else ai_strategy3
elif level == 4:
ai_func = ai_strategy3
return ai_func(gstate, game_tree)
def ai_derp(gstate: TicTacToe, *args):
"""AI that randomly picks the next move"""
return random.choice(list(gstate.next_moves.keys()))
def ai_strategy1(gstate: TicTacToe, game_tree):
"""Strategy assuming opponent plays optimally"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
mod = -1 if player == "1" else 1
next_move_vals = {
idx: mod * game_tree[state]["value"] for idx, state in gstate.next_moves.items()
}
max_val = max(next_move_vals.values())
moves = [idx for idx, val in next_move_vals.items() if val == max_val]
logging.debug("moves: %s; value: %i", moves, max_val)
move = random.choice(moves)
return move
def ai_strategy2(gstate: TicTacToe, game_tree):
"""Strategy maximizing the number of paths to winning end states"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
win, lose = (-1, 1) if player == "1" else (1, -1)
next_move_vals = {
idx: win * game_tree[state]["value"] for idx, state in gstate.next_moves.items()
}
max_val = max(next_move_vals.values())
moves = {
idx: (
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(win), 0),
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(0), 0),
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(lose), 0)
)
for idx, val in next_move_vals.items() if val == max_val
}
win_ct = {idx: vals[0] for idx, vals in moves.items()}
win_pct = {idx: vals[0] / sum(vals) for idx, vals in moves.items()}
lose_pct = {idx: vals[2] / sum(vals) for idx, vals in moves.items()}
wl_ratio = {idx: vals[0] / max(vals[2], 0.5)
for idx, vals in moves.items()}
# criteria, agg_func = lose_pct, min
# criteria, agg_func = win_pct, max
criteria, agg_func = wl_ratio, max
if max_val == 1 and 1 in win_ct.values():
move = [idx for idx, val in win_ct.items() if val == 1][0]
else:
move = random.choice(
[idx for idx, val in criteria.items() if val ==
agg_func(criteria.values())]
)
logging.debug("move: %i; value: %i, win paths %%: %.1f%%, lose paths %%: %.1f%%, moves: %s\n",
move, max_val, win_pct[move] * 100, lose_pct[move] * 100, moves)
# trash talk
if max_val == 1:
print(
"*beep* *boop* *beep*"
" -=[ I calculate chances of winning to be 100% ]=- "
"*beep* *boop* *beep*"
)
return move
def ai_strategy3(gstate: TicTacToe, game_tree):
"""AI strategy that maximizes the opponent's losing moves in the next turn"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
win, lose = (-1, 1) if player == "1
|
find_next_states
|
identifier_name
|
tic_tac_toe.py
|
8]]) == {player}
d2_win = set(self.data[[2, 4, 6]]) == {player}
if any([row_win, col_win, d1_win, d2_win]):
return ("win", player)
if self.counter["_"] == 0:
return ("tie", None)
else:
return ("turn", "1" if self.counter["1"] == self.counter["2"] else "2")
def find_next_states(self):
"""Determine possible next moves. Returns a dict {index: new_state}"""
status, player = self.game_status
moves = {}
if status == "turn":
for idx in np.where(self.data == "_")[0]:
new_move = self.data.copy()
new_move[idx] = player
moves[idx] = "".join(new_move)
return moves
def printable_board(self, indent_char="\t", legend_hint=True, symbols=None):
"""Returns a string representing the game board for printing"""
symbols = symbols or self.symbols
assert len(symbols) == 2, "`symbols` must have exactly 2 elements"
data_symbols = self.data.copy()
for orig, new in zip(("1", "2"), symbols):
|
board_symbols = data_symbols.reshape((3, 3))
if legend_hint:
legend_board = np.where(
self.data == "_", range(9), " ").reshape((3, 3))
return "\n".join(
[indent_char + "GAME | INDEX"]
+ [indent_char + "===== | ====="]
+ [
indent_char + " ".join(b_row) + " | " + " ".join(l_row)
for b_row, l_row in zip(board_symbols, legend_board)
]
)
else:
return "\n".join([indent_char + " ".join(row) for row in board_symbols])
def gen_game_tree(state_init):
"""Generate full game tree from initial state"""
current_path = [state_init]
game_tree = {}
while current_path:
cur_state = current_path[-1]
if cur_state not in game_tree:
ttt = TicTacToe(cur_state)
game_tree[cur_state] = {
"unexplored": ttt.next_states,
"explored": [],
"status": ttt.game_status,
}
if game_tree[cur_state]["unexplored"]:
current_path.append(game_tree[cur_state]["unexplored"].pop(0))
else:
explored = current_path.pop(-1)
if explored != state_init:
game_tree[current_path[-1]]["explored"].append(explored)
status, player = game_tree[cur_state]["status"]
if status == "tie":
value = 0
outcomes = {0: 1}
elif status == "win":
value = -1 if player == "1" else 1
outcomes = {value: 1}
else:
value = (min if player == "1" else max)(
[
game_tree[state]["value"]
for state in game_tree[cur_state]["explored"]
]
)
outcomes = {}
for state in game_tree[cur_state]["explored"]:
for res, res_ct in game_tree[state]["outcomes"].items():
outcomes[res] = outcomes.get(res, 0) + res_ct
game_tree[cur_state]["value"] = value
game_tree[cur_state]["outcomes"] = outcomes
return game_tree
def answer_exercise():
"""Function to answer exercise in Chapter 2 section III"""
state = "1212__21_"
ttt = TicTacToe(state)
game_tree = gen_game_tree(state)
print(
f"The value of game state:\n{ttt.printable_board(' ', legend_hint=False)}\n"
f"is {game_tree[state]['value']}"
)
return game_tree[state]["value"]
def learn(state="_________"):
"""Build game tree and export given initial state"""
game_tree = gen_game_tree(state)
with open(GAME_TREE_FILE, "w") as gt_file:
json.dump(game_tree, gt_file, indent=4)
def human(gstate: TicTacToe, *args):
"""Function for a human player"""
return input_with_validation("Please enter move.", list(gstate.next_moves.keys()))
def ai_w_level(gstate: TicTacToe, game_tree, level=3):
"""AI with levels
Level Descriptions
* 0 = stupid
* 1 = easy
* 2 = medium
* 3 = hard
* 4 = unfair
"""
assert isinstance(level, int), "`level` must be `int`"
assert 0 <= level <= 4, "level values must be from 0 to 4"
seed = random.random()
logging.debug(f"seed value: {seed:.3f}")
if level == 0:
ai_func = ai_derp
elif level == 1:
ai_func = ai_derp if seed <= 0.3 else ai_strategy1
elif level == 2:
ai_func = ai_derp if seed <= 0.2 else ai_strategy2
elif level == 3:
ai_func = ai_derp if seed <= 0.1 else ai_strategy3
elif level == 4:
ai_func = ai_strategy3
return ai_func(gstate, game_tree)
def ai_derp(gstate: TicTacToe, *args):
"""AI that randomly picks the next move"""
return random.choice(list(gstate.next_moves.keys()))
def ai_strategy1(gstate: TicTacToe, game_tree):
"""Strategy assuming opponent plays optimally"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
mod = -1 if player == "1" else 1
next_move_vals = {
idx: mod * game_tree[state]["value"] for idx, state in gstate.next_moves.items()
}
max_val = max(next_move_vals.values())
moves = [idx for idx, val in next_move_vals.items() if val == max_val]
logging.debug("moves: %s; value: %i", moves, max_val)
move = random.choice(moves)
return move
def ai_strategy2(gstate: TicTacToe, game_tree):
"""Strategy maximizing the number of paths to winning end states"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
win, lose = (-1, 1) if player == "1" else (1, -1)
next_move_vals = {
idx: win * game_tree[state]["value"] for idx, state in gstate.next_moves.items()
}
max_val = max(next_move_vals.values())
moves = {
idx: (
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(win), 0),
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(0), 0),
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(lose), 0)
)
for idx, val in next_move_vals.items() if val == max_val
}
win_ct = {idx: vals[0] for idx, vals in moves.items()}
win_pct = {idx: vals[0] / sum(vals) for idx, vals in moves.items()}
lose_pct = {idx: vals[2] / sum(vals) for idx, vals in moves.items()}
wl_ratio = {idx: vals[0] / max(vals[2], 0.5)
for idx, vals in moves.items()}
# criteria, agg_func = lose_pct, min
# criteria, agg_func = win_pct, max
criteria, agg_func = wl_ratio, max
if max_val == 1 and 1 in win_ct.values():
move = [idx for idx, val in win_ct.items() if val == 1][0]
else:
move = random.choice(
[idx for idx, val in criteria.items() if val ==
agg_func(criteria.values())]
)
logging.debug("move: %i; value: %i, win paths %%: %.1f%%, lose paths %%: %.1f%%, moves: %s\n",
move, max_val, win_pct[move] * 100, lose_pct[move] * 100, moves)
# trash talk
if max_val == 1:
print(
"*beep* *boop* *beep*"
" -=[ I calculate chances of winning to be 100% ]=- "
"*beep* *boop* *beep*"
)
return move
def ai_strategy3(gstate: TicTacToe, game_tree):
"""AI strategy that maximizes the opponent's losing moves in the next turn"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
win, lose = (-1, 1) if player == "1"
|
data_symbols[data_symbols == orig] = new
|
random_line_split
|
tic_tac_toe.py
|
_char + " ".join(row) for row in board_symbols])
def gen_game_tree(state_init):
"""Generate full game tree from initial state"""
current_path = [state_init]
game_tree = {}
while current_path:
cur_state = current_path[-1]
if cur_state not in game_tree:
ttt = TicTacToe(cur_state)
game_tree[cur_state] = {
"unexplored": ttt.next_states,
"explored": [],
"status": ttt.game_status,
}
if game_tree[cur_state]["unexplored"]:
current_path.append(game_tree[cur_state]["unexplored"].pop(0))
else:
explored = current_path.pop(-1)
if explored != state_init:
game_tree[current_path[-1]]["explored"].append(explored)
status, player = game_tree[cur_state]["status"]
if status == "tie":
value = 0
outcomes = {0: 1}
elif status == "win":
value = -1 if player == "1" else 1
outcomes = {value: 1}
else:
value = (min if player == "1" else max)(
[
game_tree[state]["value"]
for state in game_tree[cur_state]["explored"]
]
)
outcomes = {}
for state in game_tree[cur_state]["explored"]:
for res, res_ct in game_tree[state]["outcomes"].items():
outcomes[res] = outcomes.get(res, 0) + res_ct
game_tree[cur_state]["value"] = value
game_tree[cur_state]["outcomes"] = outcomes
return game_tree
def answer_exercise():
"""Function to answer exercise in Chapter 2 section III"""
state = "1212__21_"
ttt = TicTacToe(state)
game_tree = gen_game_tree(state)
print(
f"The value of game state:\n{ttt.printable_board(' ', legend_hint=False)}\n"
f"is {game_tree[state]['value']}"
)
return game_tree[state]["value"]
def learn(state="_________"):
"""Build game tree and export given initial state"""
game_tree = gen_game_tree(state)
with open(GAME_TREE_FILE, "w") as gt_file:
json.dump(game_tree, gt_file, indent=4)
def human(gstate: TicTacToe, *args):
"""Function for a human player"""
return input_with_validation("Please enter move.", list(gstate.next_moves.keys()))
def ai_w_level(gstate: TicTacToe, game_tree, level=3):
"""AI with levels
Level Descriptions
* 0 = stupid
* 1 = easy
* 2 = medium
* 3 = hard
* 4 = unfair
"""
assert isinstance(level, int), "`level` must be `int`"
assert 0 <= level <= 4, "level values must be from 0 to 4"
seed = random.random()
logging.debug(f"seed value: {seed:.3f}")
if level == 0:
ai_func = ai_derp
elif level == 1:
ai_func = ai_derp if seed <= 0.3 else ai_strategy1
elif level == 2:
ai_func = ai_derp if seed <= 0.2 else ai_strategy2
elif level == 3:
ai_func = ai_derp if seed <= 0.1 else ai_strategy3
elif level == 4:
ai_func = ai_strategy3
return ai_func(gstate, game_tree)
def ai_derp(gstate: TicTacToe, *args):
"""AI that randomly picks the next move"""
return random.choice(list(gstate.next_moves.keys()))
def ai_strategy1(gstate: TicTacToe, game_tree):
"""Strategy assuming opponent plays optimally"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
mod = -1 if player == "1" else 1
next_move_vals = {
idx: mod * game_tree[state]["value"] for idx, state in gstate.next_moves.items()
}
max_val = max(next_move_vals.values())
moves = [idx for idx, val in next_move_vals.items() if val == max_val]
logging.debug("moves: %s; value: %i", moves, max_val)
move = random.choice(moves)
return move
def ai_strategy2(gstate: TicTacToe, game_tree):
"""Strategy maximizing the number of paths to winning end states"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
win, lose = (-1, 1) if player == "1" else (1, -1)
next_move_vals = {
idx: win * game_tree[state]["value"] for idx, state in gstate.next_moves.items()
}
max_val = max(next_move_vals.values())
moves = {
idx: (
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(win), 0),
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(0), 0),
game_tree[gstate.next_moves[idx]]["outcomes"].get(str(lose), 0)
)
for idx, val in next_move_vals.items() if val == max_val
}
win_ct = {idx: vals[0] for idx, vals in moves.items()}
win_pct = {idx: vals[0] / sum(vals) for idx, vals in moves.items()}
lose_pct = {idx: vals[2] / sum(vals) for idx, vals in moves.items()}
wl_ratio = {idx: vals[0] / max(vals[2], 0.5)
for idx, vals in moves.items()}
# criteria, agg_func = lose_pct, min
# criteria, agg_func = win_pct, max
criteria, agg_func = wl_ratio, max
if max_val == 1 and 1 in win_ct.values():
move = [idx for idx, val in win_ct.items() if val == 1][0]
else:
move = random.choice(
[idx for idx, val in criteria.items() if val ==
agg_func(criteria.values())]
)
logging.debug("move: %i; value: %i, win paths %%: %.1f%%, lose paths %%: %.1f%%, moves: %s\n",
move, max_val, win_pct[move] * 100, lose_pct[move] * 100, moves)
# trash talk
if max_val == 1:
print(
"*beep* *boop* *beep*"
" -=[ I calculate chances of winning to be 100% ]=- "
"*beep* *boop* *beep*"
)
return move
def ai_strategy3(gstate: TicTacToe, game_tree):
"""AI strategy that maximizes the opponent's losing moves in the next turn"""
status, player = gstate.game_status
if status != "turn":
logging.warning("Game status = %s. No move needed.", status)
return None
win, lose = (-1, 1) if player == "1" else (1, -1)
move_vals = {
move: win * game_tree[state]["value"] for move, state in gstate.next_moves.items()
}
max_val = max(move_vals.values())
if max_val == 1:
# ai_strategy2 can handle "won" states
return ai_strategy2(gstate, game_tree)
else:
ok_moves = [move for move, val in move_vals.items() if val == max_val]
move_vals = {
move: collections.Counter(
[
game_tree[state2]["value"] for state2 in game_tree[gstate.next_moves[move]]["explored"]
]
)
for move in ok_moves
}
move_eval = {
move: val_ctr.get(win, 0) / val_ctr.get(lose, 0.5)
for move, val_ctr in move_vals.items()
}
max_win_pct = max(move_eval.values())
good_moves = [move for move, win_pct in move_eval.items() if win_pct ==
max_win_pct]
move = random.choice(good_moves)
all_ct = sum(move_vals[move].values())
win_ct = move_vals[move].get(win, 0)
logging.debug(
"move: %i; value: %i, win %%: %.1f%%, moves: %s\n",
move, win * game_tree[gstate.state]["value"],
win_ct / max(all_ct, 0.1) * 100, move_vals
)
return move
def input_with_validation(text, choices):
"""Take input with validation"""
choice_vals = set(map(str, choices))
while True:
val = input(f"{text} | choices={choices}: ")
if val in choice_vals:
return val
else:
|
print(f"{val} is not a valid value. Please choose from: {choices}")
|
conditional_block
|
|
web-trader - console.py
|
:
Current stock price for a symbol from Yahoo! Finance or
-1 if price could not be extracted.
'''
price = -1
url = 'https://finance.yahoo.com/quote/'+symbol
page = req.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
scripts = soup.findAll('script')
# Cycle through all script blocks to find the one with data
# Ideally, data in JSON format should be properly converted and read
# Here it is simply extracted with string functions
for s in scripts:
pos = s.text.find('currentPrice')
if pos>0:
sPrice = s.text[pos:s.text.find(',',pos)]
try:
price = float(sPrice[sPrice.rfind(':')+1:])
except ValueError:
return -1
break
return price
def showBlotter():
'''Displays entire blotter'''
print('\nCURRENT BLOTTER')
print('{:<5s} {:<7s} {:>8s} {:>10s} {:>30s} {:>15s}'.format(
'Side','Ticker','Volume','Price','Date and Time','Cash')
)
for index, row in blotter.iterrows():
print('{:<5s} {:<7s} {:>8d} {:>10.2f} {:>30s} {:>15.2f}'.format(
row['Side'],
row['Ticker'],
row['Volume'],
row['Price'],
str(row['Date']),
row['Cash']
))
if blotter.empty:
print('[No Trades Recorded]')
print('')
# Using same function for calculating UPL and RPL since the formula is the same
def getPL(position, price, wap):
'''Calculates UPL or RPL based on position/volume, market/sell price and WAP.'''
return (position*(price-wap))
def updateWAP(currentWAP, currentPosition, price, volume):
'''Calculates new WAP based on previous WAP and new buy information.'''
return ((currentWAP*currentPosition)+(price*volume))/(currentPosition+volume)
# Display current P/L with refreshed market price
def
|
():
'''Displays current P/L with updated market price.'''
print('\nCURRENT P/L')
print('{:<7s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'Ticker','Position','Market','WAP','UPL','RPL')
)
for index, row in pl.iterrows():
price = getPrice(row['Ticker'])
print('{:<7s} {:>10d} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'.format(
row['Ticker'],
row['Position'],
price,
row['WAP'],
getPL(row['Position'], price, row['WAP']),
row['RPL'])
)
if pl.empty:
print('[Holding no positions]')
print('Cash: ${:,.2f}\n'.format(cash))
def getShares(side):
'''Prompt for and return number of shares to buy or sell.
Argument is either "buy" or "sell".'''
shares = input('Enter number of shares to {:s}: '.format(side))
try:
numShares = int(shares)
except ValueError:
print ('Invalid number of shares.\n')
return -1
if numShares<0:
print ('Invalid number of shares. Must be positive.\n')
return -1
return numShares
def getSymbol(side):
'''Prompt for and return stock symbol to buy or sell.
Argument is either "buy" or "sell".'''
symbol = input('Enter stock symbol to {:s}: '.format(side)).upper()
if symbol not in symbols:
print ('Invalid symbol. Valid symbols:')
for s in symbols:
print(s, end=" ")
print('\n')
return ''
return symbol
def doBuy(symbol, volume):
'''
Buys given amount of selected stock.
Args:
symbol: Stock to purchase.
volume: Number of shares to purchase.
Returns:
TRUE if successful and FALSE otherwise.
'''
global cash
global blotter
global pl
global db
# Check that it's a valid symbol
if symbol not in symbols:
print ('Buy unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol))
return False
# Refresh price to get most up to date information
price = getPrice(symbol)
if price<0:
print ('Buy unsuccessful. Could not get valid market price.\n')
return False
# Check that have enough cash
if (volume*price)>cash:
print ('Buy unsuccessful. Not enough cash.\n')
return False
# Perform buy - add to P/L and adjust cash position
if symbol in pl.index:
pl.at[symbol,'WAP'] = updateWAP(pl.loc[symbol]['WAP'],
pl.loc[symbol]['Position'],
price, volume)
pl.at[symbol,'Position'] += volume
else:
entry = pd.DataFrame([[symbol,volume,updateWAP(0,0,price,volume),0]],
columns=['Ticker','Position','WAP','RPL'],
index=[symbol])
pl = pl.append(entry)
savePL()
cash -= volume*price
saveCash()
# Add to blotter
entry = pd.DataFrame([['Buy',symbol,volume,price,datetime.now(),cash]],
columns=['Side','Ticker','Volume','Price','Date','Cash'])
blotter = blotter.append(entry, ignore_index=True)
db.blotter.insert_one(entry.to_dict('records')[0])
# Output status
print ('Buy successful. Purchased {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price))
return True
def doSell(symbol, volume):
'''
Sells given amount of selected stock.
Args:
symbol: Stock to sell.
volume: Number of shares to sell.
Returns:
TRUE if successful and FALSE otherwise.
'''
global cash
global blotter
global pl
global db
# Check that it's a valid symbol
if symbol not in symbols:
print ('Sell unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol))
return False
# Check that have any shares
if symbol not in pl.index:
print ('Sell unsuccessful. Not holding ({:s}).\n'.format(symbol))
return False
# Check that have enough shares
if volume>pl.loc[symbol]['Position']:
print ('Sell unsuccessful. Not enough shares.\n')
return False
# Refresh price to get most up to date information
price = getPrice(symbol)
if price<0:
print ('Sell unsuccessful. Could not get valid market price.\n')
return False
# Perform sell
pl.at[symbol,'RPL'] += getPL(volume, price, pl.loc[symbol]['WAP'])
pl.at[symbol,'Position'] -= volume
cash += volume*price
saveCash()
# Reset WAP if closing the position
if pl.loc[symbol]['Position']==0:
pl.at[symbol,'WAP']=0
savePL()
# Add to blotter
entry = pd.DataFrame([['Sell',symbol,volume,price,datetime.now(),cash]],
columns=['Side','Ticker','Volume','Price','Date','Cash'])
blotter = blotter.append(entry, ignore_index=True)
db.blotter.insert_one(entry.to_dict('records')[0])
# Output status
print ('Sell successful. Sold {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price))
return True
def showMenu():
'''Displays main menu and prompts for choice. Returns valid choice.'''
while True:
print(' - ', end='')
for i in menu:
print(i, end=' - ')
print('')
option = input('Select option: ').upper()
if option in ['1','2','3','4','5','B','S','P','R','Q','1929']:
return option
print('Invalid choice. Please try again.\n')
def connectDB():
'''Connects to database.'''
global db
client = MongoClient("mongodb://trader:traderpw@data602-shard-00-00-thsko.mongodb.net:27017,data602-shard-00-01-thsko.mongodb.net:27017,data602-shard-00-02-thsko.mongodb.net:27017/test?ssl=true&replicaSet=Data602-shard-0&authSource=admin")
db = client.web_trader
def retrievePL():
'''Retrieves full P/L information from the database.'''
global pl
global db
if db.pl.count()==0:
initializePL()
else:
pl = pd.DataFrame(list(db.pl.find({}
|
showPL
|
identifier_name
|
web-trader - console.py
|
Returns:
Current stock price for a symbol from Yahoo! Finance or
-1 if price could not be extracted.
'''
price = -1
url = 'https://finance.yahoo.com/quote/'+symbol
page = req.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
scripts = soup.findAll('script')
# Cycle through all script blocks to find the one with data
# Ideally, data in JSON format should be properly converted and read
# Here it is simply extracted with string functions
for s in scripts:
pos = s.text.find('currentPrice')
if pos>0:
sPrice = s.text[pos:s.text.find(',',pos)]
try:
price = float(sPrice[sPrice.rfind(':')+1:])
except ValueError:
return -1
break
return price
def showBlotter():
'''Displays entire blotter'''
print('\nCURRENT BLOTTER')
print('{:<5s} {:<7s} {:>8s} {:>10s} {:>30s} {:>15s}'.format(
'Side','Ticker','Volume','Price','Date and Time','Cash')
)
for index, row in blotter.iterrows():
print('{:<5s} {:<7s} {:>8d} {:>10.2f} {:>30s} {:>15.2f}'.format(
row['Side'],
row['Ticker'],
row['Volume'],
row['Price'],
str(row['Date']),
row['Cash']
))
if blotter.empty:
print('[No Trades Recorded]')
print('')
# Using same function for calculating UPL and RPL since the formula is the same
def getPL(position, price, wap):
'''Calculates UPL or RPL based on position/volume, market/sell price and WAP.'''
return (position*(price-wap))
def updateWAP(currentWAP, currentPosition, price, volume):
'''Calculates new WAP based on previous WAP and new buy information.'''
return ((currentWAP*currentPosition)+(price*volume))/(currentPosition+volume)
# Display current P/L with refreshed market price
def showPL():
'''Displays current P/L with updated market price.'''
print('\nCURRENT P/L')
print('{:<7s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'Ticker','Position','Market','WAP','UPL','RPL')
)
for index, row in pl.iterrows():
price = getPrice(row['Ticker'])
print('{:<7s} {:>10d} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'.format(
row['Ticker'],
row['Position'],
price,
row['WAP'],
getPL(row['Position'], price, row['WAP']),
row['RPL'])
)
if pl.empty:
print('[Holding no positions]')
print('Cash: ${:,.2f}\n'.format(cash))
def getShares(side):
'''Prompt for and return number of shares to buy or sell.
Argument is either "buy" or "sell".'''
shares = input('Enter number of shares to {:s}: '.format(side))
try:
numShares = int(shares)
except ValueError:
print ('Invalid number of shares.\n')
return -1
if numShares<0:
print ('Invalid number of shares. Must be positive.\n')
return -1
return numShares
def getSymbol(side):
|
def doBuy(symbol, volume):
'''
Buys given amount of selected stock.
Args:
symbol: Stock to purchase.
volume: Number of shares to purchase.
Returns:
TRUE if successful and FALSE otherwise.
'''
global cash
global blotter
global pl
global db
# Check that it's a valid symbol
if symbol not in symbols:
print ('Buy unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol))
return False
# Refresh price to get most up to date information
price = getPrice(symbol)
if price<0:
print ('Buy unsuccessful. Could not get valid market price.\n')
return False
# Check that have enough cash
if (volume*price)>cash:
print ('Buy unsuccessful. Not enough cash.\n')
return False
# Perform buy - add to P/L and adjust cash position
if symbol in pl.index:
pl.at[symbol,'WAP'] = updateWAP(pl.loc[symbol]['WAP'],
pl.loc[symbol]['Position'],
price, volume)
pl.at[symbol,'Position'] += volume
else:
entry = pd.DataFrame([[symbol,volume,updateWAP(0,0,price,volume),0]],
columns=['Ticker','Position','WAP','RPL'],
index=[symbol])
pl = pl.append(entry)
savePL()
cash -= volume*price
saveCash()
# Add to blotter
entry = pd.DataFrame([['Buy',symbol,volume,price,datetime.now(),cash]],
columns=['Side','Ticker','Volume','Price','Date','Cash'])
blotter = blotter.append(entry, ignore_index=True)
db.blotter.insert_one(entry.to_dict('records')[0])
# Output status
print ('Buy successful. Purchased {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price))
return True
def doSell(symbol, volume):
'''
Sells given amount of selected stock.
Args:
symbol: Stock to sell.
volume: Number of shares to sell.
Returns:
TRUE if successful and FALSE otherwise.
'''
global cash
global blotter
global pl
global db
# Check that it's a valid symbol
if symbol not in symbols:
print ('Sell unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol))
return False
# Check that have any shares
if symbol not in pl.index:
print ('Sell unsuccessful. Not holding ({:s}).\n'.format(symbol))
return False
# Check that have enough shares
if volume>pl.loc[symbol]['Position']:
print ('Sell unsuccessful. Not enough shares.\n')
return False
# Refresh price to get most up to date information
price = getPrice(symbol)
if price<0:
print ('Sell unsuccessful. Could not get valid market price.\n')
return False
# Perform sell
pl.at[symbol,'RPL'] += getPL(volume, price, pl.loc[symbol]['WAP'])
pl.at[symbol,'Position'] -= volume
cash += volume*price
saveCash()
# Reset WAP if closing the position
if pl.loc[symbol]['Position']==0:
pl.at[symbol,'WAP']=0
savePL()
# Add to blotter
entry = pd.DataFrame([['Sell',symbol,volume,price,datetime.now(),cash]],
columns=['Side','Ticker','Volume','Price','Date','Cash'])
blotter = blotter.append(entry, ignore_index=True)
db.blotter.insert_one(entry.to_dict('records')[0])
# Output status
print ('Sell successful. Sold {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price))
return True
def showMenu():
'''Displays main menu and prompts for choice. Returns valid choice.'''
while True:
print(' - ', end='')
for i in menu:
print(i, end=' - ')
print('')
option = input('Select option: ').upper()
if option in ['1','2','3','4','5','B','S','P','R','Q','1929']:
return option
print('Invalid choice. Please try again.\n')
def connectDB():
'''Connects to database.'''
global db
client = MongoClient("mongodb://trader:traderpw@data602-shard-00-00-thsko.mongodb.net:27017,data602-shard-00-01-thsko.mongodb.net:27017,data602-shard-00-02-thsko.mongodb.net:27017/test?ssl=true&replicaSet=Data602-shard-0&authSource=admin")
db = client.web_trader
def retrievePL():
'''Retrieves full P/L information from the database.'''
global pl
global db
if db.pl.count()==0:
initializePL()
else:
pl = pd.DataFrame(list(db.pl.find({}
|
'''Prompt for and return stock symbol to buy or sell.
Argument is either "buy" or "sell".'''
symbol = input('Enter stock symbol to {:s}: '.format(side)).upper()
if symbol not in symbols:
print ('Invalid symbol. Valid symbols:')
for s in symbols:
print(s, end=" ")
print('\n')
return ''
return symbol
|
identifier_body
|
web-trader - console.py
|
# Global variables
symbols = ('AAPL','AMZN','MSFT','INTC','SNAP')
menu = ['[B]uy','[S]ell','Show [P]/L','Show Blotte[r]','[Q]uit']
initial_cash = 10000000.00
cash = 0.00
blotter = pd.DataFrame(columns=['Side','Ticker','Volume','Price','Date','Cash'])
pl = pd.DataFrame(columns=['Ticker','Position','WAP','RPL'])
# Stock price is extracted from Yahoo! Finance page.
# Rather than extracting display value from HTML,
# the price is extracted from data stored in JavaScript code.
def getPrice(symbol):
'''
Gets current market price.
Args:
symbol: Ticker symbol.
Returns:
Current stock price for a symbol from Yahoo! Finance or
-1 if price could not be extracted.
'''
price = -1
url = 'https://finance.yahoo.com/quote/'+symbol
page = req.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
scripts = soup.findAll('script')
# Cycle through all script blocks to find the one with data
# Ideally, data in JSON format should be properly converted and read
# Here it is simply extracted with string functions
for s in scripts:
pos = s.text.find('currentPrice')
if pos>0:
sPrice = s.text[pos:s.text.find(',',pos)]
try:
price = float(sPrice[sPrice.rfind(':')+1:])
except ValueError:
return -1
break
return price
def showBlotter():
'''Displays entire blotter'''
print('\nCURRENT BLOTTER')
print('{:<5s} {:<7s} {:>8s} {:>10s} {:>30s} {:>15s}'.format(
'Side','Ticker','Volume','Price','Date and Time','Cash')
)
for index, row in blotter.iterrows():
print('{:<5s} {:<7s} {:>8d} {:>10.2f} {:>30s} {:>15.2f}'.format(
row['Side'],
row['Ticker'],
row['Volume'],
row['Price'],
str(row['Date']),
row['Cash']
))
if blotter.empty:
print('[No Trades Recorded]')
print('')
# Using same function for calculating UPL and RPL since the formula is the same
def getPL(position, price, wap):
'''Calculates UPL or RPL based on position/volume, market/sell price and WAP.'''
return (position*(price-wap))
def updateWAP(currentWAP, currentPosition, price, volume):
'''Calculates new WAP based on previous WAP and new buy information.'''
return ((currentWAP*currentPosition)+(price*volume))/(currentPosition+volume)
# Display current P/L with refreshed market price
def showPL():
'''Displays current P/L with updated market price.'''
print('\nCURRENT P/L')
print('{:<7s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'Ticker','Position','Market','WAP','UPL','RPL')
)
for index, row in pl.iterrows():
price = getPrice(row['Ticker'])
print('{:<7s} {:>10d} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'.format(
row['Ticker'],
row['Position'],
price,
row['WAP'],
getPL(row['Position'], price, row['WAP']),
row['RPL'])
)
if pl.empty:
print('[Holding no positions]')
print('Cash: ${:,.2f}\n'.format(cash))
def getShares(side):
'''Prompt for and return number of shares to buy or sell.
Argument is either "buy" or "sell".'''
shares = input('Enter number of shares to {:s}: '.format(side))
try:
numShares = int(shares)
except ValueError:
print ('Invalid number of shares.\n')
return -1
if numShares<0:
print ('Invalid number of shares. Must be positive.\n')
return -1
return numShares
def getSymbol(side):
'''Prompt for and return stock symbol to buy or sell.
Argument is either "buy" or "sell".'''
symbol = input('Enter stock symbol to {:s}: '.format(side)).upper()
if symbol not in symbols:
print ('Invalid symbol. Valid symbols:')
for s in symbols:
print(s, end=" ")
print('\n')
return ''
return symbol
def doBuy(symbol, volume):
'''
Buys given amount of selected stock.
Args:
symbol: Stock to purchase.
volume: Number of shares to purchase.
Returns:
TRUE if successful and FALSE otherwise.
'''
global cash
global blotter
global pl
global db
# Check that it's a valid symbol
if symbol not in symbols:
print ('Buy unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol))
return False
# Refresh price to get most up to date information
price = getPrice(symbol)
if price<0:
print ('Buy unsuccessful. Could not get valid market price.\n')
return False
# Check that have enough cash
if (volume*price)>cash:
print ('Buy unsuccessful. Not enough cash.\n')
return False
# Perform buy - add to P/L and adjust cash position
if symbol in pl.index:
pl.at[symbol,'WAP'] = updateWAP(pl.loc[symbol]['WAP'],
pl.loc[symbol]['Position'],
price, volume)
pl.at[symbol,'Position'] += volume
else:
entry = pd.DataFrame([[symbol,volume,updateWAP(0,0,price,volume),0]],
columns=['Ticker','Position','WAP','RPL'],
index=[symbol])
pl = pl.append(entry)
savePL()
cash -= volume*price
saveCash()
# Add to blotter
entry = pd.DataFrame([['Buy',symbol,volume,price,datetime.now(),cash]],
columns=['Side','Ticker','Volume','Price','Date','Cash'])
blotter = blotter.append(entry, ignore_index=True)
db.blotter.insert_one(entry.to_dict('records')[0])
# Output status
print ('Buy successful. Purchased {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price))
return True
def doSell(symbol, volume):
'''
Sells given amount of selected stock.
Args:
symbol: Stock to sell.
volume: Number of shares to sell.
Returns:
TRUE if successful and FALSE otherwise.
'''
global cash
global blotter
global pl
global db
# Check that it's a valid symbol
if symbol not in symbols:
print ('Sell unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol))
return False
# Check that have any shares
if symbol not in pl.index:
print ('Sell unsuccessful. Not holding ({:s}).\n'.format(symbol))
return False
# Check that have enough shares
if volume>pl.loc[symbol]['Position']:
print ('Sell unsuccessful. Not enough shares.\n')
return False
# Refresh price to get most up to date information
price = getPrice(symbol)
if price<0:
print ('Sell unsuccessful. Could not get valid market price.\n')
return False
# Perform sell
pl.at[symbol,'RPL'] += getPL(volume, price, pl.loc[symbol]['WAP'])
pl.at[symbol,'Position'] -= volume
cash += volume*price
saveCash()
# Reset WAP if closing the position
if pl.loc[symbol]['Position']==0:
pl.at[symbol,'WAP']=0
savePL()
# Add to blotter
entry = pd.DataFrame([['Sell',symbol,volume,price,datetime.now(),cash]],
columns=['Side','Ticker','Volume','Price','Date','Cash'])
blotter = blotter.append(entry, ignore_index=True)
db.blotter.insert_one(entry.to_dict('records')[0])
# Output status
print ('Sell successful. Sold {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price))
return True
def showMenu():
'''Displays main menu and prompts for choice. Returns valid choice.'''
while True:
print(' - ', end='')
for i in menu:
print(i, end=' - ')
print('')
option = input('Select option: ').upper()
if option in ['1','2','3','4','5','B','S','P','R','Q','1929']:
return option
print('Invalid choice. Please try again.\n')
|
from datetime import datetime
from pymongo import MongoClient
|
random_line_split
|
|
web-trader - console.py
|
Returns:
Current stock price for a symbol from Yahoo! Finance or
-1 if price could not be extracted.
'''
price = -1
url = 'https://finance.yahoo.com/quote/'+symbol
page = req.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
scripts = soup.findAll('script')
# Cycle through all script blocks to find the one with data
# Ideally, data in JSON format should be properly converted and read
# Here it is simply extracted with string functions
for s in scripts:
pos = s.text.find('currentPrice')
if pos>0:
sPrice = s.text[pos:s.text.find(',',pos)]
try:
price = float(sPrice[sPrice.rfind(':')+1:])
except ValueError:
return -1
break
return price
def showBlotter():
'''Displays entire blotter'''
print('\nCURRENT BLOTTER')
print('{:<5s} {:<7s} {:>8s} {:>10s} {:>30s} {:>15s}'.format(
'Side','Ticker','Volume','Price','Date and Time','Cash')
)
for index, row in blotter.iterrows():
print('{:<5s} {:<7s} {:>8d} {:>10.2f} {:>30s} {:>15.2f}'.format(
row['Side'],
row['Ticker'],
row['Volume'],
row['Price'],
str(row['Date']),
row['Cash']
))
if blotter.empty:
print('[No Trades Recorded]')
print('')
# Using same function for calculating UPL and RPL since the formula is the same
def getPL(position, price, wap):
'''Calculates UPL or RPL based on position/volume, market/sell price and WAP.'''
return (position*(price-wap))
def updateWAP(currentWAP, currentPosition, price, volume):
'''Calculates new WAP based on previous WAP and new buy information.'''
return ((currentWAP*currentPosition)+(price*volume))/(currentPosition+volume)
# Display current P/L with refreshed market price
def showPL():
'''Displays current P/L with updated market price.'''
print('\nCURRENT P/L')
print('{:<7s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'Ticker','Position','Market','WAP','UPL','RPL')
)
for index, row in pl.iterrows():
price = getPrice(row['Ticker'])
print('{:<7s} {:>10d} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'.format(
row['Ticker'],
row['Position'],
price,
row['WAP'],
getPL(row['Position'], price, row['WAP']),
row['RPL'])
)
if pl.empty:
print('[Holding no positions]')
print('Cash: ${:,.2f}\n'.format(cash))
def getShares(side):
'''Prompt for and return number of shares to buy or sell.
Argument is either "buy" or "sell".'''
shares = input('Enter number of shares to {:s}: '.format(side))
try:
numShares = int(shares)
except ValueError:
print ('Invalid number of shares.\n')
return -1
if numShares<0:
print ('Invalid number of shares. Must be positive.\n')
return -1
return numShares
def getSymbol(side):
'''Prompt for and return stock symbol to buy or sell.
Argument is either "buy" or "sell".'''
symbol = input('Enter stock symbol to {:s}: '.format(side)).upper()
if symbol not in symbols:
print ('Invalid symbol. Valid symbols:')
for s in symbols:
print(s, end=" ")
print('\n')
return ''
return symbol
def doBuy(symbol, volume):
'''
Buys given amount of selected stock.
Args:
symbol: Stock to purchase.
volume: Number of shares to purchase.
Returns:
TRUE if successful and FALSE otherwise.
'''
global cash
global blotter
global pl
global db
# Check that it's a valid symbol
if symbol not in symbols:
print ('Buy unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol))
return False
# Refresh price to get most up to date information
price = getPrice(symbol)
if price<0:
print ('Buy unsuccessful. Could not get valid market price.\n')
return False
# Check that have enough cash
if (volume*price)>cash:
print ('Buy unsuccessful. Not enough cash.\n')
return False
# Perform buy - add to P/L and adjust cash position
if symbol in pl.index:
pl.at[symbol,'WAP'] = updateWAP(pl.loc[symbol]['WAP'],
pl.loc[symbol]['Position'],
price, volume)
pl.at[symbol,'Position'] += volume
else:
entry = pd.DataFrame([[symbol,volume,updateWAP(0,0,price,volume),0]],
columns=['Ticker','Position','WAP','RPL'],
index=[symbol])
pl = pl.append(entry)
savePL()
cash -= volume*price
saveCash()
# Add to blotter
entry = pd.DataFrame([['Buy',symbol,volume,price,datetime.now(),cash]],
columns=['Side','Ticker','Volume','Price','Date','Cash'])
blotter = blotter.append(entry, ignore_index=True)
db.blotter.insert_one(entry.to_dict('records')[0])
# Output status
print ('Buy successful. Purchased {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price))
return True
def doSell(symbol, volume):
'''
Sells given amount of selected stock.
Args:
symbol: Stock to sell.
volume: Number of shares to sell.
Returns:
TRUE if successful and FALSE otherwise.
'''
global cash
global blotter
global pl
global db
# Check that it's a valid symbol
if symbol not in symbols:
print ('Sell unsuccessful. Not a valid symbol ({:s}).\n'.format(symbol))
return False
# Check that have any shares
if symbol not in pl.index:
print ('Sell unsuccessful. Not holding ({:s}).\n'.format(symbol))
return False
# Check that have enough shares
if volume>pl.loc[symbol]['Position']:
print ('Sell unsuccessful. Not enough shares.\n')
return False
# Refresh price to get most up to date information
price = getPrice(symbol)
if price<0:
print ('Sell unsuccessful. Could not get valid market price.\n')
return False
# Perform sell
pl.at[symbol,'RPL'] += getPL(volume, price, pl.loc[symbol]['WAP'])
pl.at[symbol,'Position'] -= volume
cash += volume*price
saveCash()
# Reset WAP if closing the position
if pl.loc[symbol]['Position']==0:
pl.at[symbol,'WAP']=0
savePL()
# Add to blotter
entry = pd.DataFrame([['Sell',symbol,volume,price,datetime.now(),cash]],
columns=['Side','Ticker','Volume','Price','Date','Cash'])
blotter = blotter.append(entry, ignore_index=True)
db.blotter.insert_one(entry.to_dict('records')[0])
# Output status
print ('Sell successful. Sold {:,d} shares of {:s} at ${:,.2f}.\n'.format(volume, symbol, price))
return True
def showMenu():
'''Displays main menu and prompts for choice. Returns valid choice.'''
while True:
print(' - ', end='')
for i in menu:
print(i, end=' - ')
print('')
option = input('Select option: ').upper()
if option in ['1','2','3','4','5','B','S','P','R','Q','1929']:
|
print('Invalid choice. Please try again.\n')
def connectDB():
'''Connects to database.'''
global db
client = MongoClient("mongodb://trader:traderpw@data602-shard-00-00-thsko.mongodb.net:27017,data602-shard-00-01-thsko.mongodb.net:27017,data602-shard-00-02-thsko.mongodb.net:27017/test?ssl=true&replicaSet=Data602-shard-0&authSource=admin")
db = client.web_trader
def retrievePL():
'''Retrieves full P/L information from the database.'''
global pl
global db
if db.pl.count()==0:
initializePL()
else:
pl = pd.DataFrame(list(db.pl.find({}
|
return option
|
conditional_block
|
provider_test.go
|
Mxnhfag5qlrObQW3K2miTpGYkl
CIRRNFMIvwJBAPtatE1evu25R3NSTU2YwQgkEymh40PW+lncYge6ZqZGfK7J5JBK
wr1ug7KjTJgIfY2Sg2VHn56HAdA4RUl2xOcCQQDZqnTxpQ6DHYSFqwg04cHhYP8H
QOF0Z8WnEX4g8Em/N2X26BK+wKXig2d6fIhghu/fLaNKZJK8FOK8CE1GDuWPAkEA
wrP6Ysx3vZH+JPil5Ovk6zd2mJNMhmpqt10dmrrrdPW483R01sjynOaUobYZSNOa
3iWWHsgifxw5bV+JXGTiFQJBAKwh6Hvli5hcfoepPMz2RQnmU1NM8hJOHHeZh+eT
z6hlMpOS9rSjABcBdXxXjFXtIEjWUG5Tj8yOYd735zY8Ny8=
-----END RSA PRIVATE KEY-----`
pemToCertificate := func(bytes []byte) x509.Certificate {
pem, _ := pem.Decode(bytes)
if pem == nil {
panic("failed decoding PEM")
}
certificate, err := x509.ParseCertificate(pem.Bytes)
if err != nil {
panic(err)
}
return *certificate
}
BeforeEach(func() {
fs = *boshsysfakes.NewFakeFileSystem()
fs.WriteFileString("/path/ca1/certificate", ca1crtStr)
fs.WriteFileString("/path/ca1/private_key", ca1keyStr)
fs.WriteFileString("/path/ca2/certificate", "broken")
fs.WriteFileString("/path/ca2/private_key", "broken")
logger, _ = logrustest.NewNullLogger()
})
Describe("SignCertificate", func() {
var testKey *rsa.PrivateKey
var template x509.Certificate
BeforeEach(func() {
var err error
testKey, err = rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
Fail("generating private key")
}
template = x509.Certificate{
SerialNumber: big.NewInt(12345),
Subject: pkix.Name{
CommonName: "ssoca-fake1",
},
}
})
It("signs certificate", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca1/private_key",
},
&fs,
logger,
)
bytes, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).ToNot(HaveOccurred())
Expect(len(bytes)).To(BeNumerically(">", 0))
certificate := pemToCertificate(bytes)
Expect(certificate.SerialNumber).To(BeEquivalentTo(big.NewInt(12345)))
Expect(certificate.Subject.CommonName).To(Equal("ssoca-fake1"))
caCertificate := pemToCertificate([]byte(ca1crtStr))
err = certificate.CheckSignatureFrom(&caCertificate)
Expect(err).ToNot(HaveOccurred())
})
Context("certificate/key errors", func() {
It("errors on missing certificate", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca0/certificate",
PrivateKeyPath: "/path/ca1/private_key",
},
&fs,
logger,
)
_, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA certificate"))
})
It("errors on missing private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca0/private_key",
},
&fs,
logger,
)
_, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
It("errors on misconfigured private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca1/certificate",
},
&fs,
logger,
)
_, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
It("errors on invalid private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca2/certificate",
},
&fs,
logger,
)
_, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
})
})
Describe("SignSSHCertificate", func() {
var testKey *rsa.PrivateKey
var cert ssh.Certificate
BeforeEach(func() {
var err error
testKey, err = rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
Fail("generating private key")
}
publicKey, err := ssh.NewPublicKey(&testKey.PublicKey)
if err != nil {
Fail("parsing to public key")
}
cert = ssh.Certificate{
Nonce: []byte("ssoca-fake1"),
Key: publicKey,
CertType: ssh.UserCert,
}
})
It("signs certificate", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca1/private_key",
},
&fs,
logger,
)
Expect(cert.Signature).To(BeNil())
err := subject.SignSSHCertificate(&cert, logrus.Fields{})
Expect(err).ToNot(HaveOccurred())
// @todo use Verify instead
Expect(cert.Signature).ToNot(BeNil())
})
Context("certificate/key errors", func() {
It("errors on missing private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca0/private_key",
},
&fs,
logger,
)
err := subject.SignSSHCertificate(&cert, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
It("errors on misconfigured private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca1/certificate",
},
&fs,
logger,
)
err := subject.SignSSHCertificate(&cert, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
It("errors on invalid private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca2/certificate",
},
&fs,
logger,
)
err := subject.SignSSHCertificate(&cert, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
})
})
Describe("GetCertificate", func() {
It("provides certificate", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca1/private_key",
},
&fs,
logger,
)
crt, err := subject.GetCertificate()
Expect(err).ToNot(HaveOccurred())
Expect(crt).To(BeAssignableToTypeOf(&x509.Certificate{}))
Expect(crt.IsCA).To(BeTrue())
Expect(crt.Subject.CommonName).To(Equal("ssoca-test"))
})
Context("filesystem errors", func() {
It("errors", func() {
subject = NewProvider(
|
"name1",
|
random_line_split
|
|
provider_test.go
|
RM9KAlUipIFkCAwEAAaNFMEMwDgYDVR0PAQH/BAQD
AgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFP8lIbNl3zZPEHF17cFU
NFsK/0/oMA0GCSqGSIb3DQEBCwUAA4GBADMCd4nzc19voa60lNknhsihcfyNUeUt
EEsLCceK+9F1u2Xdj+mTNOh3MI+5m7wmFLiHuUtQovHMJ4xUpoHa6Iznc+QCbow4
SMO3sf1847tASv3eUFwEUt9vv39vtey6C6ftiUUImzZYfx6FO/A62uGEg2w3IOJ+
3cCXYiulfsyv
-----END CERTIFICATE-----`
var ca1keyStr = `-----BEGIN RSA PRIVATE KEY-----
MIICXwIBAAKBgQDpN3e+wD9/2UdA94jMkH0nUlNdoNV+qgKVTZlGW5rsGaV9c5Ce
Cj/U6Z607dMcSv6bEUwaB8lnoK6MFF3cDv/eH4jDvaMoYFqDiIQEj24FzJ5GBZ3N
xXuXt3NBPTRcIGeQklElXiOgMii+q4DPqIp/gCXjDJDmTVEz0oCVSKkgWQIDAQAB
AoGBANC3T3drXmjw74/4+Hj7Jsa2Kt20Pt1pEX7FP9Nz0CZUnYK0lkyaJ55IpjyO
S00a4NmulUkGhv0zFINRBt8WnW1bjBxNmqyBYh2diO3vA/gk8U1gcifW1LQt8WmE
ietvN3OFXI1a7FipchCZYQn5Rr8O3a/tjwohtWIDdaDltw+xAkEA7Ybxu8OXQnvy
Y+fDISRGG5vDFGnNGe9KcREIxSF6LWJ7+ap5LmMxnhfag5qlrObQW3K2miTpGYkl
CIRRNFMIvwJBAPtatE1evu25R3NSTU2YwQgkEymh40PW+lncYge6ZqZGfK7J5JBK
wr1ug7KjTJgIfY2Sg2VHn56HAdA4RUl2xOcCQQDZqnTxpQ6DHYSFqwg04cHhYP8H
QOF0Z8WnEX4g8Em/N2X26BK+wKXig2d6fIhghu/fLaNKZJK8FOK8CE1GDuWPAkEA
wrP6Ysx3vZH+JPil5Ovk6zd2mJNMhmpqt10dmrrrdPW483R01sjynOaUobYZSNOa
3iWWHsgifxw5bV+JXGTiFQJBAKwh6Hvli5hcfoepPMz2RQnmU1NM8hJOHHeZh+eT
z6hlMpOS9rSjABcBdXxXjFXtIEjWUG5Tj8yOYd735zY8Ny8=
-----END RSA PRIVATE KEY-----`
pemToCertificate := func(bytes []byte) x509.Certificate {
pem, _ := pem.Decode(bytes)
if pem == nil {
panic("failed decoding PEM")
}
certificate, err := x509.ParseCertificate(pem.Bytes)
if err != nil {
panic(err)
}
return *certificate
}
BeforeEach(func() {
fs = *boshsysfakes.NewFakeFileSystem()
fs.WriteFileString("/path/ca1/certificate", ca1crtStr)
fs.WriteFileString("/path/ca1/private_key", ca1keyStr)
fs.WriteFileString("/path/ca2/certificate", "broken")
fs.WriteFileString("/path/ca2/private_key", "broken")
logger, _ = logrustest.NewNullLogger()
})
Describe("SignCertificate", func() {
var testKey *rsa.PrivateKey
var template x509.Certificate
BeforeEach(func() {
var err error
testKey, err = rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
Fail("generating private key")
}
template = x509.Certificate{
SerialNumber: big.NewInt(12345),
Subject: pkix.Name{
CommonName: "ssoca-fake1",
},
}
})
It("signs certificate", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca1/private_key",
},
&fs,
logger,
)
bytes, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).ToNot(HaveOccurred())
Expect(len(bytes)).To(BeNumerically(">", 0))
certificate := pemToCertificate(bytes)
Expect(certificate.SerialNumber).To(BeEquivalentTo(big.NewInt(12345)))
Expect(certificate.Subject.CommonName).To(Equal("ssoca-fake1"))
caCertificate := pemToCertificate([]byte(ca1crtStr))
err = certificate.CheckSignatureFrom(&caCertificate)
Expect(err).ToNot(HaveOccurred())
})
Context("certificate/key errors", func() {
It("errors on missing certificate", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca0/certificate",
PrivateKeyPath: "/path/ca1/private_key",
},
&fs,
logger,
)
_, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA certificate"))
})
It("errors on missing private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca0/private_key",
},
&fs,
logger,
)
_, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
It("errors on misconfigured private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca1/certificate",
},
&fs,
logger,
)
_, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
It("errors on invalid private key", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca2/certificate",
},
&fs,
logger,
)
_, err := subject.SignCertificate(&template, &testKey.PublicKey, logrus.Fields{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("getting CA private key"))
})
})
})
Describe("SignSSHCertificate", func() {
var testKey *rsa.PrivateKey
var cert ssh.Certificate
BeforeEach(func() {
var err error
testKey, err = rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
Fail("generating private key")
}
publicKey, err := ssh.NewPublicKey(&testKey.PublicKey)
if err != nil
|
cert = ssh.Certificate{
Nonce: []byte("ssoca-fake1"),
Key: publicKey,
CertType: ssh.UserCert,
}
})
It("signs certificate", func() {
subject = NewProvider(
"name1",
Config{
CertificatePath: "/path/ca1/certificate",
PrivateKeyPath: "/path/ca1/private_key",
|
{
Fail("parsing to public key")
}
|
conditional_block
|
MessageSigner.go
|
dsa.PublicKey) error {
var err error
message := payload
// first sign, then encrypt as per RFC
if signer.signMessages {
message, _ = CreateJWSSignature(string(payload), signer.privateKey)
}
emessage, err := EncryptMessage(message, publicKey)
err = signer.messenger.Publish(address, retained, emessage)
return err
}
// PublishSigned sign the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishSigned(
address string, retained bool, payload string) error {
var err error
// default is unsigned
message := payload
if signer.signMessages {
message, err = CreateJWSSignature(string(payload), signer.privateKey)
if err != nil {
logrus.Errorf("Publisher.publishMessage: Error signing message for address %s: %s", address, err)
}
}
err = signer.messenger.Publish(address, retained, message)
return err
}
// NewMessageSigner creates a new instance for signing and verifying published messages
// If getPublicKey is not provided, verification of signature is skipped
func NewMessageSigner(messenger IMessenger, signingKey *ecdsa.PrivateKey,
getPublicKey func(address string) *ecdsa.PublicKey,
) *MessageSigner {
signer := &MessageSigner{
GetPublicKey: getPublicKey,
messenger: messenger,
signMessages: true,
privateKey: signingKey, // private key for signing
}
return signer
}
/*
* Helper Functions for signing and verification
*/
// CreateEcdsaSignature creates a ECDSA256 signature from the payload using the provided private key
// This returns a base64url encoded signature
func CreateEcdsaSignature(payload []byte, privateKey *ecdsa.PrivateKey) string {
if privateKey == nil {
return ""
}
hashed := sha256.Sum256(payload)
r, s, err := ecdsa.Sign(rand.Reader, privateKey, hashed[:])
if err != nil {
return ""
}
sig, err := asn1.Marshal(ECDSASignature{r, s})
return base64.URLEncoding.EncodeToString(sig)
}
// SignIdentity updates the base64URL encoded ECDSA256 signature of the public identity
func SignIdentity(publicIdent *types.PublisherIdentityMessage, privKey *ecdsa.PrivateKey) {
identCopy := *publicIdent
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
sigStr := CreateEcdsaSignature(payload, privKey)
publicIdent.IdentitySignature = sigStr
}
// CreateJWSSignature signs the payload using JSE ES256 and return the JSE compact serialized message
func CreateJWSSignature(payload string, privateKey *ecdsa.PrivateKey) (string, error) {
joseSigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: privateKey}, nil)
if err != nil {
return "", err
}
signedObject, err := joseSigner.Sign([]byte(payload))
if err != nil {
return "", err
}
// serialized := signedObject.FullSerialize()
serialized, err := signedObject.CompactSerialize()
return serialized, err
}
// DecryptMessage deserializes and decrypts the message using JWE
// This returns the decrypted message, or the input message if the message was not encrypted
func DecryptMessage(serialized string, privateKey *ecdsa.PrivateKey) (message string, isEncrypted bool, err error) {
message = serialized
decrypter, err := jose.ParseEncrypted(serialized)
if err == nil {
dmessage, err := decrypter.Decrypt(privateKey)
message = string(dmessage)
return message, true, err
}
return message, false, err
}
// EncryptMessage encrypts and serializes the message using JWE
func EncryptMessage(message string, publicKey *ecdsa.PublicKey) (serialized string, err error) {
var jwe *jose.JSONWebEncryption
recpnt := jose.Recipient{Algorithm: jose.ECDH_ES, Key: publicKey}
encrypter, err := jose.NewEncrypter(jose.A128CBC_HS256, recpnt, nil)
if encrypter != nil {
jwe, err = encrypter.Encrypt([]byte(message))
}
if err != nil {
return message, err
}
serialized, _ = jwe.CompactSerialize()
return serialized, err
}
// VerifyIdentitySignature verifies a base64URL encoded ECDSA256 signature in the identity
// against the identity itself using the sender's public key.
func VerifyIdentitySignature(ident *types.PublisherIdentityMessage, pubKey *ecdsa.PublicKey) error {
// the signing took place with the signature field empty
identCopy := *ident
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
err := VerifyEcdsaSignature(payload, ident.IdentitySignature, pubKey)
// signingKey := jose.SigningKey{Algorithm: jose.ES256, Key: privKey}
// joseSigner, _ := jose.NewSigner(signingKey, nil)
// jwsObject, _ := joseSigner.Verify(payload)
// sig := jwsObject.Signatures[0].Signature
// sigStr := base64.URLEncoding.EncodeToString(sig)
// return sigStr
return err
}
// VerifyEcdsaSignature the payload using the base64url encoded signature and public key
// payload is any raw data
// signatureB64urlEncoded is the ecdsa 256 URL encoded signature
// Intended for signing an object like the publisher identity. Use VerifyJWSMessage for
// verifying JWS signed messages.
func VerifyEcdsaSignature(payload []byte, signatureB64urlEncoded string, publicKey *ecdsa.PublicKey) error {
var rs ECDSASignature
if publicKey == nil {
return errors.New("VerifyEcdsaSignature: publicKey is nil")
}
signature, err := base64.URLEncoding.DecodeString(signatureB64urlEncoded)
if err != nil {
return errors.New("VerifyEcdsaSignature: Invalid signature")
}
if _, err = asn1.Unmarshal(signature, &rs); err != nil {
return errors.New("VerifyEcdsaSignature: Payload is not ASN")
}
hashed := sha256.Sum256(payload)
verified := ecdsa.Verify(publicKey, hashed[:], rs.R, rs.S)
if !verified {
return errors.New("VerifyEcdsaSignature: Signature does not match payload")
}
return nil
}
// VerifyJWSMessage verifies a signed message and returns its payload
// The message is a JWS encoded string. The public key of the sender is
// needed to verify the message.
// Intended for testing, as the application uses VerifySenderJWSSignature instead.
func VerifyJWSMessage(message string, publicKey *ecdsa.PublicKey) (payload string, err error) {
if publicKey == nil {
err := errors.New("VerifyJWSMessage: public key is nil")
return "", err
}
jwsSignature, err := jose.ParseSigned(message)
if err != nil {
return "", err
}
payloadB, err := jwsSignature.Verify(publicKey)
return string(payloadB), err
}
// VerifySenderJWSSignature verifies if a message is JWS signed. If signed then the signature is verified
// using the 'Sender' or 'Address' attributes to determine the public key to verify with.
// To verify correctly, the sender has to be a known publisher and verified with the DSS.
// object MUST be a pointer to the type otherwise unmarshal fails.
//
// getPublicKey is a lookup function for providing the public key from the given sender address.
// it should only provide a public key if the publisher is known and verified by the DSS, or
// if this zone does not use a DSS (publisher are protected through message bus ACLs)
// If not provided then signature verification will succeed.
//
// The rawMessage is json unmarshalled into the given object.
//
// This returns a flag if the message was signed and if so, an error if the verification failed
func VerifySenderJWSSignature(rawMessage string, object interface{}, getPublicKey func(address string) *ecdsa.PublicKey) (isSigned bool, err error) {
jwsSignature, err := jose.ParseSigned(rawMessage)
if err != nil {
// message is (probably) not signed, try to unmarshal it directly
err = json.Unmarshal([]byte(rawMessage), object)
return false, err
}
payload := jwsSignature.UnsafePayloadWithoutVerification()
err = json.Unmarshal([]byte(payload), object)
if err != nil {
// message doesn't have a json payload
errTxt := fmt.Sprintf("VerifySenderSignature: Signature okay but message unmarshal failed: %s", err)
return true, errors.New(errTxt)
}
// determine who the sender is
reflObject := reflect.ValueOf(object).Elem()
reflSender := reflObject.FieldByName("Sender")
if !reflSender.IsValid()
|
{
reflSender = reflObject.FieldByName("Address")
if !reflSender.IsValid() {
err = errors.New("VerifySenderJWSSignature: object doesn't have a Sender or Address field")
return true, err
}
}
|
conditional_block
|
|
MessageSigner.go
|
ignature(dmessage, object, signer.GetPublicKey)
return isEncrypted, isSigned, err
}
// SignMessages returns whether messages MUST be signed on sending or receiving
func (signer *MessageSigner) SignMessages() bool {
return signer.signMessages
}
// VerifySignedMessage parses and verifies the message signature
// as per standard, the sender and signer of the message is in the message 'Sender' field. If the
// Sender field is missing then the 'address' field contains the publisher.
// or 'address' field
func (signer *MessageSigner) VerifySignedMessage(rawMessage string, object interface{}) (isSigned bool, err error) {
isSigned, err = VerifySenderJWSSignature(rawMessage, object, signer.GetPublicKey)
return isSigned, err
}
// PublishObject encapsulates the message object in a payload, signs the message, and sends it.
// If an encryption key is provided then the signed message will be encrypted.
// The object to publish will be marshalled to JSON and signed by this publisher
func (signer *MessageSigner) PublishObject(address string, retained bool, object interface{}, encryptionKey *ecdsa.PublicKey) error {
// payload, err := json.Marshal(object)
payload, err := json.MarshalIndent(object, " ", " ")
if err != nil || object == nil {
errText := fmt.Sprintf("Publisher.publishMessage: Error marshalling message for address %s: %s", address, err)
return errors.New(errText)
}
if encryptionKey != nil {
err = signer.PublishEncrypted(address, retained, string(payload), encryptionKey)
} else {
err = signer.PublishSigned(address, retained, string(payload))
}
return err
}
// SetSignMessages enables or disables message signing. Intended for testing.
func (signer *MessageSigner) SetSignMessages(sign bool) {
signer.signMessages = sign
}
// Subscribe to messages on the given address
func (signer *MessageSigner) Subscribe(
address string,
handler func(address string, message string) error) {
signer.messenger.Subscribe(address, handler)
}
// Unsubscribe to messages on the given address
func (signer *MessageSigner) Unsubscribe(
address string,
handler func(address string, message string) error) {
signer.messenger.Unsubscribe(address, handler)
|
func (signer *MessageSigner) PublishEncrypted(
address string, retained bool, payload string, publicKey *ecdsa.PublicKey) error {
var err error
message := payload
// first sign, then encrypt as per RFC
if signer.signMessages {
message, _ = CreateJWSSignature(string(payload), signer.privateKey)
}
emessage, err := EncryptMessage(message, publicKey)
err = signer.messenger.Publish(address, retained, emessage)
return err
}
// PublishSigned sign the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishSigned(
address string, retained bool, payload string) error {
var err error
// default is unsigned
message := payload
if signer.signMessages {
message, err = CreateJWSSignature(string(payload), signer.privateKey)
if err != nil {
logrus.Errorf("Publisher.publishMessage: Error signing message for address %s: %s", address, err)
}
}
err = signer.messenger.Publish(address, retained, message)
return err
}
// NewMessageSigner creates a new instance for signing and verifying published messages
// If getPublicKey is not provided, verification of signature is skipped
func NewMessageSigner(messenger IMessenger, signingKey *ecdsa.PrivateKey,
getPublicKey func(address string) *ecdsa.PublicKey,
) *MessageSigner {
signer := &MessageSigner{
GetPublicKey: getPublicKey,
messenger: messenger,
signMessages: true,
privateKey: signingKey, // private key for signing
}
return signer
}
/*
* Helper Functions for signing and verification
*/
// CreateEcdsaSignature creates a ECDSA256 signature from the payload using the provided private key
// This returns a base64url encoded signature
func CreateEcdsaSignature(payload []byte, privateKey *ecdsa.PrivateKey) string {
if privateKey == nil {
return ""
}
hashed := sha256.Sum256(payload)
r, s, err := ecdsa.Sign(rand.Reader, privateKey, hashed[:])
if err != nil {
return ""
}
sig, err := asn1.Marshal(ECDSASignature{r, s})
return base64.URLEncoding.EncodeToString(sig)
}
// SignIdentity updates the base64URL encoded ECDSA256 signature of the public identity
func SignIdentity(publicIdent *types.PublisherIdentityMessage, privKey *ecdsa.PrivateKey) {
identCopy := *publicIdent
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
sigStr := CreateEcdsaSignature(payload, privKey)
publicIdent.IdentitySignature = sigStr
}
// CreateJWSSignature signs the payload using JSE ES256 and return the JSE compact serialized message
func CreateJWSSignature(payload string, privateKey *ecdsa.PrivateKey) (string, error) {
joseSigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: privateKey}, nil)
if err != nil {
return "", err
}
signedObject, err := joseSigner.Sign([]byte(payload))
if err != nil {
return "", err
}
// serialized := signedObject.FullSerialize()
serialized, err := signedObject.CompactSerialize()
return serialized, err
}
// DecryptMessage deserializes and decrypts the message using JWE
// This returns the decrypted message, or the input message if the message was not encrypted
func DecryptMessage(serialized string, privateKey *ecdsa.PrivateKey) (message string, isEncrypted bool, err error) {
message = serialized
decrypter, err := jose.ParseEncrypted(serialized)
if err == nil {
dmessage, err := decrypter.Decrypt(privateKey)
message = string(dmessage)
return message, true, err
}
return message, false, err
}
// EncryptMessage encrypts and serializes the message using JWE
func EncryptMessage(message string, publicKey *ecdsa.PublicKey) (serialized string, err error) {
var jwe *jose.JSONWebEncryption
recpnt := jose.Recipient{Algorithm: jose.ECDH_ES, Key: publicKey}
encrypter, err := jose.NewEncrypter(jose.A128CBC_HS256, recpnt, nil)
if encrypter != nil {
jwe, err = encrypter.Encrypt([]byte(message))
}
if err != nil {
return message, err
}
serialized, _ = jwe.CompactSerialize()
return serialized, err
}
// VerifyIdentitySignature verifies a base64URL encoded ECDSA256 signature in the identity
// against the identity itself using the sender's public key.
func VerifyIdentitySignature(ident *types.PublisherIdentityMessage, pubKey *ecdsa.PublicKey) error {
// the signing took place with the signature field empty
identCopy := *ident
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
err := VerifyEcdsaSignature(payload, ident.IdentitySignature, pubKey)
// signingKey := jose.SigningKey{Algorithm: jose.ES256, Key: privKey}
// joseSigner, _ := jose.NewSigner(signingKey, nil)
// jwsObject, _ := joseSigner.Verify(payload)
// sig := jwsObject.Signatures[0].Signature
// sigStr := base64.URLEncoding.EncodeToString(sig)
// return sigStr
return err
}
// VerifyEcdsaSignature the payload using the base64url encoded signature and public key
// payload is any raw data
// signatureB64urlEncoded is the ecdsa 256 URL encoded signature
// Intended for signing an object like the publisher identity. Use VerifyJWSMessage for
// verifying JWS signed messages.
func VerifyEcdsaSignature(payload []byte, signatureB64urlEncoded string, publicKey *ecdsa.PublicKey) error {
var rs ECDSASignature
if publicKey == nil {
return errors.New("VerifyEcdsaSignature: publicKey is nil")
}
signature, err := base64.URLEncoding.DecodeString(signatureB64urlEncoded)
if err != nil {
return errors.New("VerifyEcdsaSignature: Invalid signature")
}
if _, err = asn1.Unmarshal(signature, &rs); err != nil {
return errors.New("VerifyEcdsaSignature: Payload is not ASN")
}
hashed := sha256.Sum256(payload)
verified := ecdsa.Verify(publicKey, hashed[:], rs.R, rs.S)
if !verified {
return errors.New("VerifyEcdsaSignature: Signature does not match payload")
}
return nil
}
// VerifyJWSMessage verifies a signed message and returns its payload
// The message is a JWS encoded string. The public key of the sender is
// needed to verify the message.
// Int
|
}
// PublishEncrypted sign and encrypts the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
|
random_line_split
|
MessageSigner.go
|
ature(dmessage, object, signer.GetPublicKey)
return isEncrypted, isSigned, err
}
// SignMessages returns whether messages MUST be signed on sending or receiving
func (signer *MessageSigner) SignMessages() bool {
return signer.signMessages
}
// VerifySignedMessage parses and verifies the message signature
// as per standard, the sender and signer of the message is in the message 'Sender' field. If the
// Sender field is missing then the 'address' field contains the publisher.
// or 'address' field
func (signer *MessageSigner) VerifySignedMessage(rawMessage string, object interface{}) (isSigned bool, err error) {
isSigned, err = VerifySenderJWSSignature(rawMessage, object, signer.GetPublicKey)
return isSigned, err
}
// PublishObject encapsulates the message object in a payload, signs the message, and sends it.
// If an encryption key is provided then the signed message will be encrypted.
// The object to publish will be marshalled to JSON and signed by this publisher
func (signer *MessageSigner) PublishObject(address string, retained bool, object interface{}, encryptionKey *ecdsa.PublicKey) error {
// payload, err := json.Marshal(object)
payload, err := json.MarshalIndent(object, " ", " ")
if err != nil || object == nil {
errText := fmt.Sprintf("Publisher.publishMessage: Error marshalling message for address %s: %s", address, err)
return errors.New(errText)
}
if encryptionKey != nil {
err = signer.PublishEncrypted(address, retained, string(payload), encryptionKey)
} else {
err = signer.PublishSigned(address, retained, string(payload))
}
return err
}
// SetSignMessages enables or disables message signing. Intended for testing.
func (signer *MessageSigner) SetSignMessages(sign bool) {
signer.signMessages = sign
}
// Subscribe to messages on the given address
func (signer *MessageSigner) Subscribe(
address string,
handler func(address string, message string) error) {
signer.messenger.Subscribe(address, handler)
}
// Unsubscribe to messages on the given address
func (signer *MessageSigner) Unsubscribe(
address string,
handler func(address string, message string) error) {
signer.messenger.Unsubscribe(address, handler)
}
// PublishEncrypted sign and encrypts the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishEncrypted(
address string, retained bool, payload string, publicKey *ecdsa.PublicKey) error {
var err error
message := payload
// first sign, then encrypt as per RFC
if signer.signMessages {
message, _ = CreateJWSSignature(string(payload), signer.privateKey)
}
emessage, err := EncryptMessage(message, publicKey)
err = signer.messenger.Publish(address, retained, emessage)
return err
}
// PublishSigned sign the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishSigned(
address string, retained bool, payload string) error {
var err error
// default is unsigned
message := payload
if signer.signMessages {
message, err = CreateJWSSignature(string(payload), signer.privateKey)
if err != nil {
logrus.Errorf("Publisher.publishMessage: Error signing message for address %s: %s", address, err)
}
}
err = signer.messenger.Publish(address, retained, message)
return err
}
// NewMessageSigner creates a new instance for signing and verifying published messages
// If getPublicKey is not provided, verification of signature is skipped
func NewMessageSigner(messenger IMessenger, signingKey *ecdsa.PrivateKey,
getPublicKey func(address string) *ecdsa.PublicKey,
) *MessageSigner {
signer := &MessageSigner{
GetPublicKey: getPublicKey,
messenger: messenger,
signMessages: true,
privateKey: signingKey, // private key for signing
}
return signer
}
/*
* Helper Functions for signing and verification
*/
// CreateEcdsaSignature creates a ECDSA256 signature from the payload using the provided private key
// This returns a base64url encoded signature
func CreateEcdsaSignature(payload []byte, privateKey *ecdsa.PrivateKey) string {
if privateKey == nil {
return ""
}
hashed := sha256.Sum256(payload)
r, s, err := ecdsa.Sign(rand.Reader, privateKey, hashed[:])
if err != nil {
return ""
}
sig, err := asn1.Marshal(ECDSASignature{r, s})
return base64.URLEncoding.EncodeToString(sig)
}
// SignIdentity updates the base64URL encoded ECDSA256 signature of the public identity
func SignIdentity(publicIdent *types.PublisherIdentityMessage, privKey *ecdsa.PrivateKey) {
identCopy := *publicIdent
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
sigStr := CreateEcdsaSignature(payload, privKey)
publicIdent.IdentitySignature = sigStr
}
// CreateJWSSignature signs the payload using JSE ES256 and return the JSE compact serialized message
func CreateJWSSignature(payload string, privateKey *ecdsa.PrivateKey) (string, error) {
joseSigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: privateKey}, nil)
if err != nil {
return "", err
}
signedObject, err := joseSigner.Sign([]byte(payload))
if err != nil {
return "", err
}
// serialized := signedObject.FullSerialize()
serialized, err := signedObject.CompactSerialize()
return serialized, err
}
// DecryptMessage deserializes and decrypts the message using JWE
// This returns the decrypted message, or the input message if the message was not encrypted
func DecryptMessage(serialized string, privateKey *ecdsa.PrivateKey) (message string, isEncrypted bool, err error) {
message = serialized
decrypter, err := jose.ParseEncrypted(serialized)
if err == nil {
dmessage, err := decrypter.Decrypt(privateKey)
message = string(dmessage)
return message, true, err
}
return message, false, err
}
// EncryptMessage encrypts and serializes the message using JWE
func
|
(message string, publicKey *ecdsa.PublicKey) (serialized string, err error) {
var jwe *jose.JSONWebEncryption
recpnt := jose.Recipient{Algorithm: jose.ECDH_ES, Key: publicKey}
encrypter, err := jose.NewEncrypter(jose.A128CBC_HS256, recpnt, nil)
if encrypter != nil {
jwe, err = encrypter.Encrypt([]byte(message))
}
if err != nil {
return message, err
}
serialized, _ = jwe.CompactSerialize()
return serialized, err
}
// VerifyIdentitySignature verifies a base64URL encoded ECDSA256 signature in the identity
// against the identity itself using the sender's public key.
func VerifyIdentitySignature(ident *types.PublisherIdentityMessage, pubKey *ecdsa.PublicKey) error {
// the signing took place with the signature field empty
identCopy := *ident
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
err := VerifyEcdsaSignature(payload, ident.IdentitySignature, pubKey)
// signingKey := jose.SigningKey{Algorithm: jose.ES256, Key: privKey}
// joseSigner, _ := jose.NewSigner(signingKey, nil)
// jwsObject, _ := joseSigner.Verify(payload)
// sig := jwsObject.Signatures[0].Signature
// sigStr := base64.URLEncoding.EncodeToString(sig)
// return sigStr
return err
}
// VerifyEcdsaSignature the payload using the base64url encoded signature and public key
// payload is any raw data
// signatureB64urlEncoded is the ecdsa 256 URL encoded signature
// Intended for signing an object like the publisher identity. Use VerifyJWSMessage for
// verifying JWS signed messages.
func VerifyEcdsaSignature(payload []byte, signatureB64urlEncoded string, publicKey *ecdsa.PublicKey) error {
var rs ECDSASignature
if publicKey == nil {
return errors.New("VerifyEcdsaSignature: publicKey is nil")
}
signature, err := base64.URLEncoding.DecodeString(signatureB64urlEncoded)
if err != nil {
return errors.New("VerifyEcdsaSignature: Invalid signature")
}
if _, err = asn1.Unmarshal(signature, &rs); err != nil {
return errors.New("VerifyEcdsaSignature: Payload is not ASN")
}
hashed := sha256.Sum256(payload)
verified := ecdsa.Verify(publicKey, hashed[:], rs.R, rs.S)
if !verified {
return errors.New("VerifyEcdsaSignature: Signature does not match payload")
}
return nil
}
// VerifyJWSMessage verifies a signed message and returns its payload
// The message is a JWS encoded string. The public key of the sender is
// needed to verify the message.
//
|
EncryptMessage
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.