file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
binder.go
|
Type(models[0])
if pkgName == "" {
return nil, fmt.Errorf("missing package name for %s", name)
}
obj, err := b.FindObject(pkgName, typeName)
if err != nil {
return nil, err
}
return obj.Type(), nil
}
func (b *Binder) FindObject(pkgName string, typeName string) (types.Object, error) {
if pkgName == "" {
return nil, fmt.Errorf("package cannot be nil")
}
pkg := b.pkgs.LoadWithTypes(pkgName)
if pkg == nil {
err := b.pkgs.Errors()
if err != nil {
return nil, fmt.Errorf("package could not be loaded: %s.%s: %w", pkgName, typeName, err)
}
return nil, fmt.Errorf("required package was not loaded: %s.%s", pkgName, typeName)
}
if b.objectCache == nil {
b.objectCache = make(map[string]map[string]types.Object, b.pkgs.Count())
}
defsIndex, ok := b.objectCache[pkgName]
if !ok {
defsIndex = indexDefs(pkg)
b.objectCache[pkgName] = defsIndex
}
// function based marshalers take precedence
if val, ok := defsIndex["Marshal"+typeName]; ok {
return val, nil
}
if val, ok := defsIndex[typeName]; ok {
return val, nil
}
return nil, fmt.Errorf("%w: %s.%s", ErrTypeNotFound, pkgName, typeName)
}
func indexDefs(pkg *packages.Package) map[string]types.Object {
res := make(map[string]types.Object)
scope := pkg.Types.Scope()
for astNode, def := range pkg.TypesInfo.Defs {
// only look at defs in the top scope
if def == nil {
continue
}
parent := def.Parent()
if parent == nil || parent != scope {
continue
}
if _, ok := res[astNode.Name]; !ok {
// The above check may not be really needed, it is only here to have a consistent behavior with
// previous implementation of FindObject() function which only honored the first inclusion of a def.
// If this is still needed, we can consider something like sync.Map.LoadOrStore() to avoid two lookups.
res[astNode.Name] = def
}
}
return res
}
func (b *Binder) PointerTo(ref *TypeReference) *TypeReference {
newRef := *ref
newRef.GO = types.NewPointer(ref.GO)
b.References = append(b.References, &newRef)
return &newRef
}
// TypeReference is used by args and field types. The Definition can refer to both input and output types.
type TypeReference struct {
Definition *ast.Definition
GQL *ast.Type
GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target.
Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields.
CastType types.Type // Before calling marshalling functions cast from/to this base type
Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function
Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function
IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler
IsOmittable bool // Is the type wrapped with Omittable
IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety.
PointersInUmarshalInput bool // Inverse values and pointers in return.
}
func (ref *TypeReference) Elem() *TypeReference {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
newRef := *ref
newRef.GO = p.Elem()
return &newRef
}
if ref.IsSlice() {
newRef := *ref
newRef.GO = ref.GO.(*types.Slice).Elem()
newRef.GQL = ref.GQL.Elem
return &newRef
}
return nil
}
func (ref *TypeReference) IsPtr() bool {
_, isPtr := ref.GO.(*types.Pointer)
return isPtr
}
// fix for https://github.com/golang/go/issues/31103 may make it possible to remove this (may still be useful)
func (ref *TypeReference) IsPtrToPtr() bool {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
_, isPtr := p.Elem().(*types.Pointer)
return isPtr
}
return false
}
func (ref *TypeReference) IsNilable() bool {
return IsNilable(ref.GO)
}
func (ref *TypeReference) IsSlice() bool {
_, isSlice := ref.GO.(*types.Slice)
return ref.GQL.Elem != nil && isSlice
}
func (ref *TypeReference) IsPtrToSlice() bool {
if ref.IsPtr() {
_, isPointerToSlice := ref.GO.(*types.Pointer).Elem().(*types.Slice)
return isPointerToSlice
}
return false
}
func (ref *TypeReference) IsPtrToIntf() bool {
if ref.IsPtr() {
_, isPointerToInterface := ref.GO.(*types.Pointer).Elem().(*types.Interface)
return isPointerToInterface
}
return false
}
func (ref *TypeReference) IsNamed() bool {
_, isSlice := ref.GO.(*types.Named)
return isSlice
}
func (ref *TypeReference) IsStruct() bool {
_, isStruct := ref.GO.Underlying().(*types.Struct)
return isStruct
}
func (ref *TypeReference) IsScalar() bool {
return ref.Definition.Kind == ast.Scalar
}
func (ref *TypeReference) UniquenessKey() string {
nullability := "O"
if ref.GQL.NonNull {
nullability = "N"
}
elemNullability := ""
if ref.GQL.Elem != nil && ref.GQL.Elem.NonNull {
// Fix for #896
elemNullability = "ᚄ"
}
return nullability + ref.Definition.Name + "2" + TypeIdentifier(ref.GO) + elemNullability
}
func (ref *TypeReference) MarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if ref.Definition.Kind == ast.InputObject {
return ""
}
return "marshal" + ref.UniquenessKey()
}
func (ref *TypeReference) UnmarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if !ref.Definition.IsInputType() {
return ""
}
return "unmarshal" + ref.UniquenessKey()
}
func (ref *TypeReference) IsTargetNilable() bool {
return IsNilable(ref.Target)
}
func (b *Binder) PushRef(ret *TypeReference) {
b.References = append(b.References, ret)
}
func isMap(t types.Type) bool {
if t == nil {
return true
}
_, ok := t.(*types.Map)
return ok
}
func isIntf(t types.Type) bool {
if t == nil {
return true
}
_, ok := t.(*types.Interface)
return ok
}
func unwrapOmittable(t types.Type) (types.Type, bool) {
if t == nil {
return t, false
}
named, ok := t.(*types.Named)
if !ok {
return t, false
}
if named.Origin().String() != "github.com/99designs/gqlgen/graphql.Omittable[T any]" {
return t, false
}
return named.TypeArgs().At(0), true
}
func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) {
if innerType, ok := unwrapOmittable(bindTarget); ok {
if schemaType.NonNull {
return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name())
}
ref, err := b.TypeReference(schemaType, innerType)
if err != nil {
return nil, err
}
ref.IsOmittable = true
return ref, err
}
if !isValid(bindTarget) {
b.SawInvalid = true
return nil, fmt.Errorf("%s has an invalid type", schemaType.Name())
}
var pkgName, typeName string
def := b.schema.Types[schemaType.Name()]
defer func() {
if err == nil && ret != nil {
b.PushRef(ret)
}
}()
if len(b.cfg.Models[schemaType.Name()].Model) == 0 {
return nil, fmt.Errorf("%s was not found", schemaType.Name())
}
for _, model := range b.cfg.Models[schemaType.Name()].Model {
if model == "map[string]interface{}" {
if !isMap(bindTarget) {
continue
}
return &TypeReference{
Definition: def,
GQL: schemaType,
GO: MapType,
}, nil
}
|
if model == "interface{}" {
if !isIntf(bindTarget) {
|
random_line_split
|
|
binder.go
|
) (types.Type, error) {
models := b.cfg.Models[name].Model
if len(models) == 0 {
return nil, fmt.Errorf(name + " not found in typemap")
}
if models[0] == "map[string]interface{}" {
return MapType, nil
}
if models[0] == "interface{}" {
return InterfaceType, nil
}
pkgName, typeName := code.PkgAndType(models[0])
if pkgName == "" {
return nil, fmt.Errorf("missing package name for %s", name)
}
obj, err := b.FindObject(pkgName, typeName)
if err != nil {
return nil, err
}
return obj.Type(), nil
}
func (b *Binder) FindObject(pkgName string, typeName string) (types.Object, error) {
if pkgName == "" {
return nil, fmt.Errorf("package cannot be nil")
}
pkg := b.pkgs.LoadWithTypes(pkgName)
if pkg == nil {
err := b.pkgs.Errors()
if err != nil {
return nil, fmt.Errorf("package could not be loaded: %s.%s: %w", pkgName, typeName, err)
}
return nil, fmt.Errorf("required package was not loaded: %s.%s", pkgName, typeName)
}
if b.objectCache == nil {
b.objectCache = make(map[string]map[string]types.Object, b.pkgs.Count())
}
defsIndex, ok := b.objectCache[pkgName]
if !ok {
defsIndex = indexDefs(pkg)
b.objectCache[pkgName] = defsIndex
}
// function based marshalers take precedence
if val, ok := defsIndex["Marshal"+typeName]; ok {
return val, nil
}
if val, ok := defsIndex[typeName]; ok {
return val, nil
}
return nil, fmt.Errorf("%w: %s.%s", ErrTypeNotFound, pkgName, typeName)
}
func indexDefs(pkg *packages.Package) map[string]types.Object {
res := make(map[string]types.Object)
scope := pkg.Types.Scope()
for astNode, def := range pkg.TypesInfo.Defs {
// only look at defs in the top scope
if def == nil {
continue
}
parent := def.Parent()
if parent == nil || parent != scope {
continue
}
if _, ok := res[astNode.Name]; !ok {
// The above check may not be really needed, it is only here to have a consistent behavior with
// previous implementation of FindObject() function which only honored the first inclusion of a def.
// If this is still needed, we can consider something like sync.Map.LoadOrStore() to avoid two lookups.
res[astNode.Name] = def
}
}
return res
}
func (b *Binder) PointerTo(ref *TypeReference) *TypeReference {
newRef := *ref
newRef.GO = types.NewPointer(ref.GO)
b.References = append(b.References, &newRef)
return &newRef
}
// TypeReference is used by args and field types. The Definition can refer to both input and output types.
type TypeReference struct {
Definition *ast.Definition
GQL *ast.Type
GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target.
Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields.
CastType types.Type // Before calling marshalling functions cast from/to this base type
Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function
Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function
IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler
IsOmittable bool // Is the type wrapped with Omittable
IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety.
PointersInUmarshalInput bool // Inverse values and pointers in return.
}
func (ref *TypeReference) Elem() *TypeReference {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
newRef := *ref
newRef.GO = p.Elem()
return &newRef
}
if ref.IsSlice() {
newRef := *ref
newRef.GO = ref.GO.(*types.Slice).Elem()
newRef.GQL = ref.GQL.Elem
return &newRef
}
return nil
}
func (ref *TypeReference) IsPtr() bool {
_, isPtr := ref.GO.(*types.Pointer)
return isPtr
}
// fix for https://github.com/golang/go/issues/31103 may make it possible to remove this (may still be useful)
func (ref *TypeReference) IsPtrToPtr() bool {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
_, isPtr := p.Elem().(*types.Pointer)
return isPtr
}
return false
}
func (ref *TypeReference) IsNilable() bool {
return IsNilable(ref.GO)
}
func (ref *TypeReference) IsSlice() bool {
_, isSlice := ref.GO.(*types.Slice)
return ref.GQL.Elem != nil && isSlice
}
func (ref *TypeReference) IsPtrToSlice() bool {
if ref.IsPtr() {
_, isPointerToSlice := ref.GO.(*types.Pointer).Elem().(*types.Slice)
return isPointerToSlice
}
return false
}
func (ref *TypeReference) IsPtrToIntf() bool {
if ref.IsPtr() {
_, isPointerToInterface := ref.GO.(*types.Pointer).Elem().(*types.Interface)
return isPointerToInterface
}
return false
}
func (ref *TypeReference) IsNamed() bool {
_, isSlice := ref.GO.(*types.Named)
return isSlice
}
func (ref *TypeReference) IsStruct() bool {
_, isStruct := ref.GO.Underlying().(*types.Struct)
return isStruct
}
func (ref *TypeReference) IsScalar() bool {
return ref.Definition.Kind == ast.Scalar
}
func (ref *TypeReference) UniquenessKey() string {
nullability := "O"
if ref.GQL.NonNull {
nullability = "N"
}
elemNullability := ""
if ref.GQL.Elem != nil && ref.GQL.Elem.NonNull {
// Fix for #896
elemNullability = "ᚄ"
}
return nullability + ref.Definition.Name + "2" + TypeIdentifier(ref.GO) + elemNullability
}
func (ref *TypeReference) MarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if ref.Definition.Kind == ast.InputObject {
return ""
}
return "marshal" + ref.UniquenessKey()
}
func (ref *TypeReference) UnmarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if !ref.Definition.IsInputType() {
return ""
}
return "unmarshal" + ref.UniquenessKey()
}
func (ref *TypeReference) IsTargetNilable() bool {
return IsNilable(ref.Target)
}
func (b *Binder) PushRef(ret *TypeReference) {
b.References = append(b.References, ret)
}
func isMap(t types.Type) bool {
if t == nil {
return true
}
_, ok := t.(*types.Map)
return ok
}
func isIntf(t types.Type) bool {
if t == nil {
return true
}
_, ok := t.(*types.Interface)
return ok
}
func unwrapOmittable(t types.Type) (types.Type, bool) {
if t == nil {
|
named, ok := t.(*types.Named)
if !ok {
return t, false
}
if named.Origin().String() != "github.com/99designs/gqlgen/graphql.Omittable[T any]" {
return t, false
}
return named.TypeArgs().At(0), true
}
func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) {
if innerType, ok := unwrapOmittable(bindTarget); ok {
if schemaType.NonNull {
return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name())
}
ref, err := b.TypeReference(schemaType, innerType)
if err != nil {
return nil, err
}
ref.IsOmittable = true
return ref, err
}
if !isValid(bindTarget) {
b.SawInvalid = true
return nil, fmt.Errorf("%s has an invalid type", schemaType.Name())
}
var pkgName, typeName string
def := b.schema.Types[schemaType.Name()]
defer func() {
if err == nil && ret != nil {
b.PushRef(ret)
}
}()
if len(b.cfg.Models[schemaType.Name()].Model) == 0 {
return nil, fmt.Errorf("%s was not found", schemaType.Name())
}
for
|
return t, false
}
|
conditional_block
|
tools.py
|
)
import numpy as np
import copy
from senteval import utils
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler
import torch.nn.functional as F
from tqdm import tqdm
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111,
cudaEfficient=False):
# fix seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def prepare_data(self, args, features):
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_segment_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index, all_segment_ids, all_label_ids)
eval_sampler = RandomSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
return eval_dataloader, eval_sampler
def fit(self, args, model, tokenizer, train_x, train_y, dev_x, dev_y, validation_split=None,
early_stop=True):
self.nepoch = 0
bestaccuracy = -1
stop_train = False
early_stop_count = 0
# Preparing validation data
train_dataloader, train_sampler = self.prepare_data(args, train_x)
# Training
while not stop_train and self.nepoch <= self.max_epoch:
self.trainepoch(args, model, tokenizer, train_dataloader, epoch_size=self.epoch_size)
accuracy = self.score(args, model, tokenizer, dev_x)
if accuracy > bestaccuracy:
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if early_stop_count >= self.tenacity:
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, args, model, tokenizer, train_dataloader, epoch_size=1, log_step = 50):
all_costs = []
for _ in range(self.nepoch, self.nepoch + epoch_size):
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[3]}
ybatch = batch[4]
with torch.no_grad():
_, _, all_encoder_layers = model(**inputs)
layer_output = all_encoder_layers[args.layer]
self.model.train()
output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor))
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += epoch_size
def score(self, args, model, tokenizer, dev_x):
dev_dataloader, dev_sampler = self.prepare_data(args, dev_x)
self.model.eval()
correct = 0
all = 0
with torch.no_grad():
for step, batch in enumerate(dev_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[3]}
ybatch = batch[4]
with torch.no_grad():
_, _, all_encoder_layers = model(**inputs)
layer_output = all_encoder_layers[args.layer]
output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor))
output_pred = output.cpu().data.tolist()
pred = []
for p in output_pred:
pred.append(0 if p[0] > p[1] else 1)
yb = ybatch.data.tolist()
for p, g in zip(pred, yb):
all += 1
if p == g:
correct += 1
accuracy = 1.0 * correct / all
return accuracy
def predict(self, devX):
self.model.eval()
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
output = self.model(Xbatch)
yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if not probas:
|
"""
MLP with Pytorch (nhid=0 --> Logistic Regression)
"""
class MLP(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
if params["nhid"] == 0:
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
).cuda()
else:
self.model = nn.Sequential(
nn.Linear(self.inputdim, params["nhid"]),
nn.Dropout(p=self.dropout),
nn.Sigmoid(),
nn.Linear(params["nhid"], self.nclasses),
).cuda()
self.loss_fn = nn.CrossEntropyLoss().cuda()
self.loss_fn.size_average = False
optim_fn, optim_params = utils.get_optimizer(self.optim)
self.optimizer = optim_fn(self.model.parameters(), **optim_params)
self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
class self_attn_mlp(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 10 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
|
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas
|
random_line_split
|
tools.py
|
)
import numpy as np
import copy
from senteval import utils
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler
import torch.nn.functional as F
from tqdm import tqdm
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111,
cudaEfficient=False):
# fix seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def prepare_data(self, args, features):
|
def fit(self, args, model, tokenizer, train_x, train_y, dev_x, dev_y, validation_split=None,
early_stop=True):
self.nepoch = 0
bestaccuracy = -1
stop_train = False
early_stop_count = 0
# Preparing validation data
train_dataloader, train_sampler = self.prepare_data(args, train_x)
# Training
while not stop_train and self.nepoch <= self.max_epoch:
self.trainepoch(args, model, tokenizer, train_dataloader, epoch_size=self.epoch_size)
accuracy = self.score(args, model, tokenizer, dev_x)
if accuracy > bestaccuracy:
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if early_stop_count >= self.tenacity:
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, args, model, tokenizer, train_dataloader, epoch_size=1, log_step = 50):
all_costs = []
for _ in range(self.nepoch, self.nepoch + epoch_size):
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[3]}
ybatch = batch[4]
with torch.no_grad():
_, _, all_encoder_layers = model(**inputs)
layer_output = all_encoder_layers[args.layer]
self.model.train()
output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor))
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += epoch_size
def score(self, args, model, tokenizer, dev_x):
dev_dataloader, dev_sampler = self.prepare_data(args, dev_x)
self.model.eval()
correct = 0
all = 0
with torch.no_grad():
for step, batch in enumerate(dev_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[3]}
ybatch = batch[4]
with torch.no_grad():
_, _, all_encoder_layers = model(**inputs)
layer_output = all_encoder_layers[args.layer]
output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor))
output_pred = output.cpu().data.tolist()
pred = []
for p in output_pred:
pred.append(0 if p[0] > p[1] else 1)
yb = ybatch.data.tolist()
for p, g in zip(pred, yb):
all += 1
if p == g:
correct += 1
accuracy = 1.0 * correct / all
return accuracy
def predict(self, devX):
self.model.eval()
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
output = self.model(Xbatch)
yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if not probas:
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas
"""
MLP with Pytorch (nhid=0 --> Logistic Regression)
"""
class MLP(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
if params["nhid"] == 0:
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
).cuda()
else:
self.model = nn.Sequential(
nn.Linear(self.inputdim, params["nhid"]),
nn.Dropout(p=self.dropout),
nn.Sigmoid(),
nn.Linear(params["nhid"], self.nclasses),
).cuda()
self.loss_fn = nn.CrossEntropyLoss().cuda()
self.loss_fn.size_average = False
optim_fn, optim_params = utils.get_optimizer(self.optim)
self.optimizer = optim_fn(self.model.parameters(), **optim_params)
self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
class self_attn_mlp(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 10 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
self
|
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_segment_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index, all_segment_ids, all_label_ids)
eval_sampler = RandomSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
return eval_dataloader, eval_sampler
|
identifier_body
|
tools.py
|
)
import numpy as np
import copy
from senteval import utils
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler
import torch.nn.functional as F
from tqdm import tqdm
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111,
cudaEfficient=False):
# fix seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def prepare_data(self, args, features):
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_segment_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index, all_segment_ids, all_label_ids)
eval_sampler = RandomSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
return eval_dataloader, eval_sampler
def fit(self, args, model, tokenizer, train_x, train_y, dev_x, dev_y, validation_split=None,
early_stop=True):
self.nepoch = 0
bestaccuracy = -1
stop_train = False
early_stop_count = 0
# Preparing validation data
train_dataloader, train_sampler = self.prepare_data(args, train_x)
# Training
while not stop_train and self.nepoch <= self.max_epoch:
self.trainepoch(args, model, tokenizer, train_dataloader, epoch_size=self.epoch_size)
accuracy = self.score(args, model, tokenizer, dev_x)
if accuracy > bestaccuracy:
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if early_stop_count >= self.tenacity:
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, args, model, tokenizer, train_dataloader, epoch_size=1, log_step = 50):
all_costs = []
for _ in range(self.nepoch, self.nepoch + epoch_size):
|
self.nepoch += epoch_size
def score(self, args, model, tokenizer, dev_x):
dev_dataloader, dev_sampler = self.prepare_data(args, dev_x)
self.model.eval()
correct = 0
all = 0
with torch.no_grad():
for step, batch in enumerate(dev_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[3]}
ybatch = batch[4]
with torch.no_grad():
_, _, all_encoder_layers = model(**inputs)
layer_output = all_encoder_layers[args.layer]
output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor))
output_pred = output.cpu().data.tolist()
pred = []
for p in output_pred:
pred.append(0 if p[0] > p[1] else 1)
yb = ybatch.data.tolist()
for p, g in zip(pred, yb):
all += 1
if p == g:
correct += 1
accuracy = 1.0 * correct / all
return accuracy
def predict(self, devX):
self.model.eval()
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
output = self.model(Xbatch)
yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if not probas:
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas
"""
MLP with Pytorch (nhid=0 --> Logistic Regression)
"""
class MLP(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
if params["nhid"] == 0:
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
).cuda()
else:
self.model = nn.Sequential(
nn.Linear(self.inputdim, params["nhid"]),
nn.Dropout(p=self.dropout),
nn.Sigmoid(),
nn.Linear(params["nhid"], self.nclasses),
).cuda()
self.loss_fn = nn.CrossEntropyLoss().cuda()
self.loss_fn.size_average = False
optim_fn, optim_params = utils.get_optimizer(self.optim)
self.optimizer = optim_fn(self.model.parameters(), **optim_params)
self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
class self_attn_mlp(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 10 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
|
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[3]}
ybatch = batch[4]
with torch.no_grad():
_, _, all_encoder_layers = model(**inputs)
layer_output = all_encoder_layers[args.layer]
self.model.train()
output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor))
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
|
conditional_block
|
tools.py
|
)
import numpy as np
import copy
from senteval import utils
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler
import torch.nn.functional as F
from tqdm import tqdm
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111,
cudaEfficient=False):
# fix seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def
|
(self, args, features):
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_segment_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index, all_segment_ids, all_label_ids)
eval_sampler = RandomSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
return eval_dataloader, eval_sampler
def fit(self, args, model, tokenizer, train_x, train_y, dev_x, dev_y, validation_split=None,
early_stop=True):
self.nepoch = 0
bestaccuracy = -1
stop_train = False
early_stop_count = 0
# Preparing validation data
train_dataloader, train_sampler = self.prepare_data(args, train_x)
# Training
while not stop_train and self.nepoch <= self.max_epoch:
self.trainepoch(args, model, tokenizer, train_dataloader, epoch_size=self.epoch_size)
accuracy = self.score(args, model, tokenizer, dev_x)
if accuracy > bestaccuracy:
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if early_stop_count >= self.tenacity:
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, args, model, tokenizer, train_dataloader, epoch_size=1, log_step = 50):
all_costs = []
for _ in range(self.nepoch, self.nepoch + epoch_size):
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[3]}
ybatch = batch[4]
with torch.no_grad():
_, _, all_encoder_layers = model(**inputs)
layer_output = all_encoder_layers[args.layer]
self.model.train()
output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor))
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += epoch_size
def score(self, args, model, tokenizer, dev_x):
dev_dataloader, dev_sampler = self.prepare_data(args, dev_x)
self.model.eval()
correct = 0
all = 0
with torch.no_grad():
for step, batch in enumerate(dev_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[3]}
ybatch = batch[4]
with torch.no_grad():
_, _, all_encoder_layers = model(**inputs)
layer_output = all_encoder_layers[args.layer]
output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor))
output_pred = output.cpu().data.tolist()
pred = []
for p in output_pred:
pred.append(0 if p[0] > p[1] else 1)
yb = ybatch.data.tolist()
for p, g in zip(pred, yb):
all += 1
if p == g:
correct += 1
accuracy = 1.0 * correct / all
return accuracy
def predict(self, devX):
self.model.eval()
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
output = self.model(Xbatch)
yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if not probas:
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas
"""
MLP with Pytorch (nhid=0 --> Logistic Regression)
"""
class MLP(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
if params["nhid"] == 0:
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
).cuda()
else:
self.model = nn.Sequential(
nn.Linear(self.inputdim, params["nhid"]),
nn.Dropout(p=self.dropout),
nn.Sigmoid(),
nn.Linear(params["nhid"], self.nclasses),
).cuda()
self.loss_fn = nn.CrossEntropyLoss().cuda()
self.loss_fn.size_average = False
optim_fn, optim_params = utils.get_optimizer(self.optim)
self.optimizer = optim_fn(self.model.parameters(), **optim_params)
self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
class self_attn_mlp(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 10 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
|
prepare_data
|
identifier_name
|
google_drive_data.py
|
False:
status, done = downloader.next_chunk()
with io.open("." + "/" + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def is_duplicate(img1,img2):
response=False
image1 = cv2.imread(img1)
image2 = cv2.imread(img2)
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) #if difference is all zeros it will return False
if result is True:
response=True
#duplicate_image.append(list[i])
#print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1)))
except:
i=0
return response
def check_duplicate_image_new(items):
print("Images is loading to memory..")
#"""given items returned by Google Drive API, prints them in a tabular way"""
map= {}
list=[]
message= set()
duplicate_image=[]
final_result={}
if not items:
print('No files found.')
else:
for item in items:
if item["mimeType"] == "image/jpeg":
list.append(item["name"])
#Creating Map
value=[]
value.append(item["name"])
value.append(item["webViewLink"])
if item["name"] in map:
val=set()
val.add(item["webViewLink"])
map[item["name"]]=item["webViewLink"]
else:
map[item["name"]]=item["webViewLink"]
#Dowloading Image
downloadFile(item["id"],item["name"])
match=[]
flag=False
for i in range(len(list)-1):
temp=[]
dp_count=0
flag=False
if list[i] not in match :
flag=True
for j in range(i+1,len(list)):
istrue=is_duplicate(list[i],list[j])
if istrue==True:
dp_count=dp_count+1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match)==0:
match.append(list[i])
match.append(list[j])
if flag==True and dp_count !=0:
#print(list[i]," - ",dp_count)
final_result[list[i]]=temp
m={}
tdct=0
for x, y in final_result.items():
res=y
tdct=tdct+len(res)
s=set()
for i in res:
#s=set()
for item in items:
if item["mimeType"] == "image/jpeg":
if item["name"]==i:
s.add(item["webViewLink"])
m[x]=s
return m,tdct
def duplicate_image_list(imagelist):
#print(len(imagelist))
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count=0
l=[]
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) # if difference is all zeros it will return False
if result is True:
#print(imagelist[i],"Matching with ",imagelist[j])
l.append(imagelist[j])
count=count+1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
csv_map = {}
def check_duplicate_image(items):
# """given items returned by Google Drive API, prints them in a tabular way"""
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item["mimeType"]
if file_type == "image/jpeg":
image_name_list.append(item["name"])
#append url or
# Creating Map
value = []
value.append(item["name"])
value.append(item["webViewLink"])
map[item["id"]] = value
csv_map[item["name"]] = item["webViewLink"]
# Dowloading Image
downloadFile(item["id"], item["name"])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service,items, newName):
|
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item["mimeType"]
if mime_Type == "image/jpeg":
imageList.append(item["name"])
if mime_Type == "application/vnd.google-apps.folder":
imageList.extend(count_image(item["id"]))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name=[]
rows = []
overview_map = {}
img_nm=0
for item in items:
name = item["name"]
mime_type = item["mimeType"]
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
if mime_type == "application/vnd.google-apps.folder":
folder_count = folder_count + 1
if mime_type == "image/jpeg":
# renameFile(item["id"],"rajj_img"+str(image_count))
image_count = image_count + 1
if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id:
list_all_folder_name.append(item["name"])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item["name"]] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
#duplicate_count = len(check_duplicate_image(items))
lt,duplicate_ct=check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct,items)
# overview chart report page
draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ["Image Name", 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
#print("Device's adding into csv: " + str(count))
csvFile.close()
#print('Device CSV File creation is Done file name is ', fileName)
def duplicateImagehtml(folder_count, image_count, duplicate_ct,items):
uri = []
map1,count=check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization
|
count=1
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
file = service.files().get(fileId=id).execute()
del file['id']
if "jpeg" in mime_type:
file['name'] = newName+str(count)+ ".jpg";
if "png" in mime_type:
file['name'] = newName+str(count)+ ".png";
updated_file = service.files().update(fileId=id, body=file).execute()
count=count+1
|
identifier_body
|
google_drive_data.py
|
:
status, done = downloader.next_chunk()
with io.open("." + "/" + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def is_duplicate(img1,img2):
response=False
image1 = cv2.imread(img1)
image2 = cv2.imread(img2)
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) #if difference is all zeros it will return False
if result is True:
response=True
#duplicate_image.append(list[i])
#print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1)))
except:
i=0
return response
def check_duplicate_image_new(items):
print("Images is loading to memory..")
#"""given items returned by Google Drive API, prints them in a tabular way"""
map= {}
list=[]
message= set()
duplicate_image=[]
final_result={}
if not items:
print('No files found.')
else:
for item in items:
if item["mimeType"] == "image/jpeg":
list.append(item["name"])
#Creating Map
value=[]
value.append(item["name"])
value.append(item["webViewLink"])
if item["name"] in map:
val=set()
val.add(item["webViewLink"])
map[item["name"]]=item["webViewLink"]
else:
map[item["name"]]=item["webViewLink"]
#Dowloading Image
downloadFile(item["id"],item["name"])
match=[]
flag=False
for i in range(len(list)-1):
temp=[]
dp_count=0
flag=False
if list[i] not in match :
flag=True
for j in range(i+1,len(list)):
istrue=is_duplicate(list[i],list[j])
if istrue==True:
dp_count=dp_count+1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match)==0:
match.append(list[i])
match.append(list[j])
if flag==True and dp_count !=0:
#print(list[i]," - ",dp_count)
final_result[list[i]]=temp
m={}
tdct=0
for x, y in final_result.items():
res=y
tdct=tdct+len(res)
s=set()
for i in res:
#s=set()
for item in items:
if item["mimeType"] == "image/jpeg":
if item["name"]==i:
s.add(item["webViewLink"])
m[x]=s
return m,tdct
def duplicate_image_list(imagelist):
#print(len(imagelist))
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count=0
l=[]
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) # if difference is all zeros it will return False
if result is True:
#print(imagelist[i],"Matching with ",imagelist[j])
l.append(imagelist[j])
count=count+1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
csv_map = {}
def check_duplicate_image(items):
# """given items returned by Google Drive API, prints them in a tabular way"""
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item["mimeType"]
if file_type == "image/jpeg":
image_name_list.append(item["name"])
#append url or
# Creating Map
value = []
value.append(item["name"])
value.append(item["webViewLink"])
map[item["id"]] = value
csv_map[item["name"]] = item["webViewLink"]
# Dowloading Image
downloadFile(item["id"], item["name"])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service,items, newName):
count=1
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
file = service.files().get(fileId=id).execute()
del file['id']
if "jpeg" in mime_type:
file['name'] = newName+str(count)+ ".jpg";
if "png" in mime_type:
file['name'] = newName+str(count)+ ".png";
updated_file = service.files().update(fileId=id, body=file).execute()
count=count+1
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item["mimeType"]
if mime_Type == "image/jpeg":
imageList.append(item["name"])
if mime_Type == "application/vnd.google-apps.folder":
imageList.extend(count_image(item["id"]))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name=[]
rows = []
overview_map = {}
img_nm=0
for item in items:
|
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
if mime_type == "application/vnd.google-apps.folder":
folder_count = folder_count + 1
if mime_type == "image/jpeg":
# renameFile(item["id"],"rajj_img"+str(image_count))
image_count = image_count + 1
if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id:
list_all_folder_name.append(item["name"])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item["name"]] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
#duplicate_count = len(check_duplicate_image(items))
lt,duplicate_ct=check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct,items)
# overview chart report page
draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ["Image Name", 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
#print("Device's adding into csv: " + str(count))
csvFile.close()
#print('Device CSV File creation is Done file name is ', fileName)
def duplicateImagehtml(folder_count, image_count, duplicate_ct,items):
uri = []
map1,count=check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization
|
name = item["name"]
mime_type = item["mimeType"]
if name == 'Test Techm':
testtechm_id = item['parents'][0]
|
conditional_block
|
google_drive_data.py
|
:
status, done = downloader.next_chunk()
with io.open("." + "/" + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def is_duplicate(img1,img2):
response=False
image1 = cv2.imread(img1)
image2 = cv2.imread(img2)
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) #if difference is all zeros it will return False
if result is True:
response=True
#duplicate_image.append(list[i])
#print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1)))
except:
i=0
return response
def check_duplicate_image_new(items):
print("Images is loading to memory..")
#"""given items returned by Google Drive API, prints them in a tabular way"""
map= {}
list=[]
message= set()
duplicate_image=[]
final_result={}
if not items:
print('No files found.')
else:
for item in items:
if item["mimeType"] == "image/jpeg":
list.append(item["name"])
#Creating Map
value=[]
value.append(item["name"])
value.append(item["webViewLink"])
if item["name"] in map:
val=set()
val.add(item["webViewLink"])
map[item["name"]]=item["webViewLink"]
else:
map[item["name"]]=item["webViewLink"]
#Dowloading Image
downloadFile(item["id"],item["name"])
match=[]
flag=False
for i in range(len(list)-1):
temp=[]
dp_count=0
flag=False
if list[i] not in match :
flag=True
for j in range(i+1,len(list)):
istrue=is_duplicate(list[i],list[j])
if istrue==True:
dp_count=dp_count+1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match)==0:
match.append(list[i])
match.append(list[j])
if flag==True and dp_count !=0:
#print(list[i]," - ",dp_count)
final_result[list[i]]=temp
m={}
tdct=0
for x, y in final_result.items():
res=y
tdct=tdct+len(res)
s=set()
for i in res:
#s=set()
for item in items:
if item["mimeType"] == "image/jpeg":
if item["name"]==i:
s.add(item["webViewLink"])
m[x]=s
return m,tdct
|
def duplicate_image_list(imagelist):
#print(len(imagelist))
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count=0
l=[]
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) # if difference is all zeros it will return False
if result is True:
#print(imagelist[i],"Matching with ",imagelist[j])
l.append(imagelist[j])
count=count+1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
csv_map = {}
def check_duplicate_image(items):
# """given items returned by Google Drive API, prints them in a tabular way"""
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item["mimeType"]
if file_type == "image/jpeg":
image_name_list.append(item["name"])
#append url or
# Creating Map
value = []
value.append(item["name"])
value.append(item["webViewLink"])
map[item["id"]] = value
csv_map[item["name"]] = item["webViewLink"]
# Dowloading Image
downloadFile(item["id"], item["name"])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service,items, newName):
count=1
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
file = service.files().get(fileId=id).execute()
del file['id']
if "jpeg" in mime_type:
file['name'] = newName+str(count)+ ".jpg";
if "png" in mime_type:
file['name'] = newName+str(count)+ ".png";
updated_file = service.files().update(fileId=id, body=file).execute()
count=count+1
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item["mimeType"]
if mime_Type == "image/jpeg":
imageList.append(item["name"])
if mime_Type == "application/vnd.google-apps.folder":
imageList.extend(count_image(item["id"]))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name=[]
rows = []
overview_map = {}
img_nm=0
for item in items:
name = item["name"]
mime_type = item["mimeType"]
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
if mime_type == "application/vnd.google-apps.folder":
folder_count = folder_count + 1
if mime_type == "image/jpeg":
# renameFile(item["id"],"rajj_img"+str(image_count))
image_count = image_count + 1
if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id:
list_all_folder_name.append(item["name"])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item["name"]] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
#duplicate_count = len(check_duplicate_image(items))
lt,duplicate_ct=check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct,items)
# overview chart report page
draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ["Image Name", 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
#print("Device's adding into csv: " + str(count))
csvFile.close()
#print('Device CSV File creation is Done file name is ', fileName)
def duplicateImagehtml(folder_count, image_count, duplicate_ct,items):
uri = []
map1,count=check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google
|
random_line_split
|
|
google_drive_data.py
|
(service):
file_metadata = {
'name': 'Test Techm',
'mimeType': 'application/vnd.google-apps.folder'
}
file = service.files().create(body=file_metadata,
fields='id').execute()
print('Folder ID: %s' % file.get('id'))
def get_gdrive_service():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
# return Google Drive API service
return build('drive', 'v3', credentials=creds)
def downloadFile(id, name):
service = get_gdrive_service()
request = service.files().get_media(fileId=id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with io.open("." + "/" + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def is_duplicate(img1,img2):
response=False
image1 = cv2.imread(img1)
image2 = cv2.imread(img2)
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) #if difference is all zeros it will return False
if result is True:
response=True
#duplicate_image.append(list[i])
#print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1)))
except:
i=0
return response
def check_duplicate_image_new(items):
print("Images is loading to memory..")
#"""given items returned by Google Drive API, prints them in a tabular way"""
map= {}
list=[]
message= set()
duplicate_image=[]
final_result={}
if not items:
print('No files found.')
else:
for item in items:
if item["mimeType"] == "image/jpeg":
list.append(item["name"])
#Creating Map
value=[]
value.append(item["name"])
value.append(item["webViewLink"])
if item["name"] in map:
val=set()
val.add(item["webViewLink"])
map[item["name"]]=item["webViewLink"]
else:
map[item["name"]]=item["webViewLink"]
#Dowloading Image
downloadFile(item["id"],item["name"])
match=[]
flag=False
for i in range(len(list)-1):
temp=[]
dp_count=0
flag=False
if list[i] not in match :
flag=True
for j in range(i+1,len(list)):
istrue=is_duplicate(list[i],list[j])
if istrue==True:
dp_count=dp_count+1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match)==0:
match.append(list[i])
match.append(list[j])
if flag==True and dp_count !=0:
#print(list[i]," - ",dp_count)
final_result[list[i]]=temp
m={}
tdct=0
for x, y in final_result.items():
res=y
tdct=tdct+len(res)
s=set()
for i in res:
#s=set()
for item in items:
if item["mimeType"] == "image/jpeg":
if item["name"]==i:
s.add(item["webViewLink"])
m[x]=s
return m,tdct
def duplicate_image_list(imagelist):
#print(len(imagelist))
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count=0
l=[]
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) # if difference is all zeros it will return False
if result is True:
#print(imagelist[i],"Matching with ",imagelist[j])
l.append(imagelist[j])
count=count+1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
csv_map = {}
def check_duplicate_image(items):
# """given items returned by Google Drive API, prints them in a tabular way"""
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item["mimeType"]
if file_type == "image/jpeg":
image_name_list.append(item["name"])
#append url or
# Creating Map
value = []
value.append(item["name"])
value.append(item["webViewLink"])
map[item["id"]] = value
csv_map[item["name"]] = item["webViewLink"]
# Dowloading Image
downloadFile(item["id"], item["name"])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service,items, newName):
count=1
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
file = service.files().get(fileId=id).execute()
del file['id']
if "jpeg" in mime_type:
file['name'] = newName+str(count)+ ".jpg";
if "png" in mime_type:
file['name'] = newName+str(count)+ ".png";
updated_file = service.files().update(fileId=id, body=file).execute()
count=count+1
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item["mimeType"]
if mime_Type == "image/jpeg":
imageList.append(item["name"])
if mime_Type == "application/vnd.google-apps.folder":
imageList.extend(count_image(item["id"]))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name=[]
rows = []
overview_map = {}
img_nm=0
for item in items:
name = item["name"]
mime_type = item["mimeType"]
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
if mime_type == "application/vnd.google-apps.folder":
folder_count = folder_count + 1
if mime_type == "image/jpeg":
# renameFile(item["id"],"rajj_img"+str(image_count))
image_count = image_count + 1
if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id:
list_all_folder_name.append(item["name"])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item["name"]] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
#duplicate_count = len(check_duplicate_image(items))
lt,duplicate_ct=check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct,items)
# overview chart report page
draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ["Image Name", 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
#print("Device's adding into csv: " + str(count))
csvFile.close()
#print('Device CSV File creation is Done file name is ', fileName)
def duplicateImagehtml(folder_count, image_count, duplicate_ct,items):
uri
|
create_folder
|
identifier_name
|
|
mod.rs
|
items: &[(&[u8], &[u8])], // &[(key, value)]
) -> Result<()> {
let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?;
for (k, v) in items {
let actual_value = checker
.read_value(k)?
.ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?;
if actual_value.as_slice() != *v {
return Err(anyhow::Error::msg(Error::StorageValueMismatch));
}
}
Ok(())
}
}
#[derive(Debug)]
pub enum Error {
// InvalidStorageProof,
// StorageRootMismatch,
StorageValueUnavailable,
// InvalidValidatorSetProof,
ValidatorSetMismatch,
InvalidAncestryProof,
NoSuchBridgeExists,
InvalidFinalityProof,
// UnknownClientError,
// HeaderAncestryMismatch,
UnexpectedValidatorSetId,
StorageValueMismatch,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
// Error::StorageRootMismatch => write!(f, "storage root mismatch"),
Error::StorageValueUnavailable => write!(f, "storage value unavailable"),
Error::ValidatorSetMismatch => write!(f, "validator set mismatch"),
Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"),
Error::NoSuchBridgeExists => write!(f, "no such bridge exists"),
Error::InvalidFinalityProof => write!(f, "invalid finality proof"),
// Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"),
Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"),
Error::StorageValueMismatch => write!(f, "storage value mismatch"),
}
}
}
impl From<JustificationError> for Error {
fn from(e: JustificationError) -> Self {
match e {
JustificationError::BadJustification(msg) => {
error!("InvalidFinalityProof(BadJustification({}))", msg);
Error::InvalidFinalityProof
}
JustificationError::JustificationDecode => {
error!("InvalidFinalityProof(JustificationDecode)");
Error::InvalidFinalityProof
}
}
}
}
impl<T: Config> LightValidation<T>
where
NumberFor<T::Block>: AsPrimitive<usize>,
{
fn check_validator_set_proof(
state_root: &T::Hash,
proof: StorageProof,
validator_set: &[(AuthorityId, AuthorityWeight)],
_set_id: SetId,
) -> Result<()> {
let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?;
// By encoding the given set we should have an easy way to compare
// with the stuff we get out of storage via `read_value`
let mut encoded_validator_set = validator_set.encode();
encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1
let actual_validator_set = checker
.read_value(b":grandpa_authorities")?
.ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?;
// TODO: check set_id
// checker.read_value(grandpa::CurrentSetId.key())
if encoded_validator_set == actual_validator_set {
Ok(())
} else {
Err(anyhow::Error::msg(Error::ValidatorSetMismatch))
}
}
}
// A naive way to check whether a `child` header is a decendent
// of an `ancestor` header. For this it requires a proof which
// is a chain of headers between (but not including) the `child`
// and `ancestor`. This could be updated to use something like
// Log2 Ancestors (#2053) in the future.
fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()>
where
H: Header<Hash = H256>,
{
{
info!("ancestor_hash: {}", ancestor_hash);
for h in proof.iter() {
info!(
"block {:?} - hash: {} parent: {}",
h.number(),
h.hash(),
h.parent_hash()
);
}
info!(
"child block {:?} - hash: {} parent: {}",
child.number(),
child.hash(),
child.parent_hash()
);
}
let mut parent_hash = child.parent_hash();
if *parent_hash == ancestor_hash {
return Ok(());
}
// If we find that the header's parent hash matches our ancestor's hash we're done
for header in proof.iter() {
// Need to check that blocks are actually related
if header.hash() != *parent_hash {
break;
}
parent_hash = header.parent_hash();
if *parent_hash == ancestor_hash {
return Ok(());
}
}
Err(anyhow::Error::msg(Error::InvalidAncestryProof))
}
fn verify_grandpa_proof<B>(
justification: EncodedJustification,
hash: B::Hash,
number: NumberFor<B>,
set_id: u64,
voters: &VoterSet<AuthorityId>,
) -> Result<()>
where
B: BlockT<Hash = H256>,
NumberFor<B>: finality_grandpa::BlockNumberOps,
{
// We don't really care about the justification, as long as it's valid
let _ = GrandpaJustification::<B>::decode_and_verify_finalizes(
&justification,
(hash, number),
set_id,
voters,
)
.map_err(anyhow::Error::msg)?;
Ok(())
}
impl<T: Config> fmt::Debug for LightValidation<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"LightValidationTest {{ num_bridges: {}, tracked_bridges: {:?} }}",
self.num_bridges, self.tracked_bridges
)
}
}
impl<T: Config> fmt::Debug for BridgeInfo<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "BridgeInfo {{ last_finalized_block_header: {:?}, current_validator_set: {:?}, current_validator_set_id: {} }}",
self.last_finalized_block_header, self.current_set.list, self.current_set.id)
}
}
pub mod utils {
use parity_scale_codec::Encode;
/// Gets the prefix of a storage item
pub fn storage_prefix(module: &str, storage: &str) -> Vec<u8> {
let mut bytes = sp_core::twox_128(module.as_bytes()).to_vec();
bytes.extend(&sp_core::twox_128(storage.as_bytes())[..]);
bytes
}
/// Calculates the Substrate storage key prefix for a StorageMap
pub fn storage_map_prefix_twox_64_concat(
module: &[u8],
storage_item: &[u8],
key: &(impl Encode + ?Sized),
) -> Vec<u8> {
let mut bytes = sp_core::twox_128(module).to_vec();
bytes.extend(&sp_core::twox_128(storage_item)[..]);
let encoded = key.encode();
bytes.extend(sp_core::twox_64(&encoded));
bytes.extend(&encoded);
bytes
}
#[test]
#[ignore = "for debug"]
fn show_keys() {
let modules = [
"System",
"Timestamp",
"RandomnessCollectiveFlip",
"Utility",
"Multisig",
"Proxy",
"Vesting",
"Scheduler",
"Preimage",
"ParachainInfo",
"ParachainSystem",
"XcmpQueue",
"CumulusXcm",
"DmpQueue",
"PolkadotXcm",
"Balances",
"TransactionPayment",
"Authorship",
"CollatorSelection",
"Session",
"Aura",
"AuraExt",
"Identity",
"Democracy",
"Council",
"Treasury",
"Bounties",
"Lottery",
"TechnicalCommittee",
"TechnicalMembership",
"PhragmenElection",
"Tips",
"ChildBounties",
"ChainBridge",
"XcmBridge",
"XTransfer",
"PhalaMq",
"PhalaRegistry",
"PhalaComputation",
"PhalaStakePool",
"Assets",
"AssetsRegistry",
"PhalaStakePoolv2",
"PhalaVault",
"PhalaWrappedBalances",
"PhalaBasePool",
"Uniques",
"RmrkCore",
"RmrkEquip",
"RmrkMarket",
"PWNftSale",
"PWIncubation",
];
for module in modules.iter() {
let key = storage_prefix(module, "");
println!("{module}: 0x{}", hex::encode(key));
}
let storage_keys = [
"Collections",
"Nfts",
"Priorities",
"Children",
"Resources",
"EquippableBases",
"EquippableSlots",
"Properties",
"Lock",
"DummyStorage",
|
proof: StorageProof,
|
random_line_split
|
|
mod.rs
|
}
type BridgeId = u64;
pub trait Config: frame_system::Config<Hash = H256> {
type Block: BlockT<Hash = H256, Header = Self::Header>;
}
impl Config for chain::Runtime {
type Block = chain::Block;
}
#[derive(Encode, Decode, Clone, Serialize, Deserialize)]
pub struct LightValidation<T: Config> {
num_bridges: BridgeId,
#[serde(bound(
serialize = "T::Header: ::serde::Serialize",
deserialize = "T::Header: ::serde::de::DeserializeOwned"
))]
tracked_bridges: BTreeMap<BridgeId, BridgeInfo<T>>,
}
impl<T: Config> LightValidation<T>
where
NumberFor<T::Block>: AsPrimitive<usize>,
{
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
LightValidation {
num_bridges: 0,
tracked_bridges: BTreeMap::new(),
}
}
pub fn initialize_bridge(
&mut self,
block_header: T::Header,
validator_set: AuthoritySet,
proof: StorageProof,
) -> Result<BridgeId> {
let state_root = block_header.state_root();
Self::check_validator_set_proof(state_root, proof, &validator_set.list, validator_set.id)
.map_err(anyhow::Error::msg)?;
let bridge_info = BridgeInfo::new(block_header, validator_set);
let new_bridge_id = self.num_bridges + 1;
self.tracked_bridges.insert(new_bridge_id, bridge_info);
self.num_bridges = new_bridge_id;
Ok(new_bridge_id)
}
/// Submits a sequence of block headers to the light client to validate
///
/// The light client accepts a sequence of block headers, optionally with an authority set change
/// in the last block. Without the authority set change, it assumes the authority set and the set
/// id remains the same after submitting the blocks. One submission can have at most one authortiy
/// set change (change.set_id == last_set_id + 1).
pub fn submit_finalized_headers(
&mut self,
bridge_id: BridgeId,
header: T::Header,
ancestry_proof: Vec<T::Header>,
grandpa_proof: EncodedJustification,
auhtority_set_change: Option<AuthoritySetChange>,
) -> Result<()> {
let bridge = self
.tracked_bridges
.get(&bridge_id)
.ok_or_else(|| anyhow::Error::msg(Error::NoSuchBridgeExists))?;
// Check that the new header is a decendent of the old header
let last_header = &bridge.last_finalized_block_header;
verify_ancestry(ancestry_proof, last_header.hash(), &header)?;
let block_hash = header.hash();
let block_num = *header.number();
// Check that the header has been finalized
let voters = &bridge.current_set;
let voter_set = VoterSet::new(voters.list.clone()).unwrap();
let voter_set_id = voters.id;
verify_grandpa_proof::<T::Block>(
grandpa_proof,
block_hash,
block_num,
voter_set_id,
&voter_set,
)?;
match self.tracked_bridges.get_mut(&bridge_id) {
Some(bridge_info) => {
bridge_info.last_finalized_block_header = header;
if let Some(change) = auhtority_set_change {
// Check the validator set increment
if change.authority_set.id != voter_set_id + 1 {
return Err(anyhow::Error::msg(Error::UnexpectedValidatorSetId));
}
// Check validator set change proof
let state_root = bridge_info.last_finalized_block_header.state_root();
Self::check_validator_set_proof(
state_root,
change.authority_proof,
&change.authority_set.list,
change.authority_set.id,
)?;
// Commit
bridge_info.current_set = AuthoritySet {
list: change.authority_set.list,
id: change.authority_set.id,
}
}
}
_ => panic!("We succesfully got this bridge earlier, therefore it exists; qed"),
};
Ok(())
}
pub fn validate_storage_proof(
&self,
state_root: T::Hash,
proof: StorageProof,
items: &[(&[u8], &[u8])], // &[(key, value)]
) -> Result<()> {
let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?;
for (k, v) in items {
let actual_value = checker
.read_value(k)?
.ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?;
if actual_value.as_slice() != *v {
return Err(anyhow::Error::msg(Error::StorageValueMismatch));
}
}
Ok(())
}
}
#[derive(Debug)]
pub enum Error {
// InvalidStorageProof,
// StorageRootMismatch,
StorageValueUnavailable,
// InvalidValidatorSetProof,
ValidatorSetMismatch,
InvalidAncestryProof,
NoSuchBridgeExists,
InvalidFinalityProof,
// UnknownClientError,
// HeaderAncestryMismatch,
UnexpectedValidatorSetId,
StorageValueMismatch,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
// Error::StorageRootMismatch => write!(f, "storage root mismatch"),
Error::StorageValueUnavailable => write!(f, "storage value unavailable"),
Error::ValidatorSetMismatch => write!(f, "validator set mismatch"),
Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"),
Error::NoSuchBridgeExists => write!(f, "no such bridge exists"),
Error::InvalidFinalityProof => write!(f, "invalid finality proof"),
// Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"),
Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"),
Error::StorageValueMismatch => write!(f, "storage value mismatch"),
}
}
}
impl From<JustificationError> for Error {
fn from(e: JustificationError) -> Self {
match e {
JustificationError::BadJustification(msg) => {
error!("InvalidFinalityProof(BadJustification({}))", msg);
Error::InvalidFinalityProof
}
JustificationError::JustificationDecode => {
error!("InvalidFinalityProof(JustificationDecode)");
Error::InvalidFinalityProof
}
}
}
}
impl<T: Config> LightValidation<T>
where
NumberFor<T::Block>: AsPrimitive<usize>,
{
fn check_validator_set_proof(
state_root: &T::Hash,
proof: StorageProof,
validator_set: &[(AuthorityId, AuthorityWeight)],
_set_id: SetId,
) -> Result<()> {
let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?;
// By encoding the given set we should have an easy way to compare
// with the stuff we get out of storage via `read_value`
let mut encoded_validator_set = validator_set.encode();
encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1
let actual_validator_set = checker
.read_value(b":grandpa_authorities")?
.ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?;
// TODO: check set_id
// checker.read_value(grandpa::CurrentSetId.key())
if encoded_validator_set == actual_validator_set {
Ok(())
} else {
Err(anyhow::Error::msg(Error::ValidatorSetMismatch))
}
}
}
// A naive way to check whether a `child` header is a decendent
// of an `ancestor` header. For this it requires a proof which
// is a chain of headers between (but not including) the `child`
// and `ancestor`. This could be updated to use something like
// Log2 Ancestors (#2053) in the future.
fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()>
where
H: Header<Hash = H256>,
{
{
info!("ancestor_hash: {}", ancestor_hash);
for h in proof.iter() {
info!(
"block {:?} - hash: {} parent: {}",
h.number(),
h.hash(),
h.parent_hash()
);
}
info!(
"child block {:?} - hash: {} parent: {}",
child.number(),
child.hash(),
child.parent_hash()
);
}
let mut parent_hash = child.parent_hash();
if *parent_hash == ancestor_hash {
return Ok(());
}
// If we find that the header's parent hash matches our ancestor's hash we're done
for header in proof.iter() {
// Need to check that blocks are actually related
if header.hash() != *parent_hash {
break;
}
parent_hash = header.parent_hash();
if *parent_hash == ancestor_hash {
return Ok(());
}
}
Err(anyhow::Error::msg(Error
|
{
BridgeInfo {
last_finalized_block_header: block_header,
current_set: validator_set,
}
}
|
identifier_body
|
|
mod.rs
|
(
&mut self,
block_header: T::Header,
validator_set: AuthoritySet,
proof: StorageProof,
) -> Result<BridgeId> {
let state_root = block_header.state_root();
Self::check_validator_set_proof(state_root, proof, &validator_set.list, validator_set.id)
.map_err(anyhow::Error::msg)?;
let bridge_info = BridgeInfo::new(block_header, validator_set);
let new_bridge_id = self.num_bridges + 1;
self.tracked_bridges.insert(new_bridge_id, bridge_info);
self.num_bridges = new_bridge_id;
Ok(new_bridge_id)
}
/// Submits a sequence of block headers to the light client to validate
///
/// The light client accepts a sequence of block headers, optionally with an authority set change
/// in the last block. Without the authority set change, it assumes the authority set and the set
/// id remains the same after submitting the blocks. One submission can have at most one authortiy
/// set change (change.set_id == last_set_id + 1).
pub fn submit_finalized_headers(
&mut self,
bridge_id: BridgeId,
header: T::Header,
ancestry_proof: Vec<T::Header>,
grandpa_proof: EncodedJustification,
auhtority_set_change: Option<AuthoritySetChange>,
) -> Result<()> {
let bridge = self
.tracked_bridges
.get(&bridge_id)
.ok_or_else(|| anyhow::Error::msg(Error::NoSuchBridgeExists))?;
// Check that the new header is a decendent of the old header
let last_header = &bridge.last_finalized_block_header;
verify_ancestry(ancestry_proof, last_header.hash(), &header)?;
let block_hash = header.hash();
let block_num = *header.number();
// Check that the header has been finalized
let voters = &bridge.current_set;
let voter_set = VoterSet::new(voters.list.clone()).unwrap();
let voter_set_id = voters.id;
verify_grandpa_proof::<T::Block>(
grandpa_proof,
block_hash,
block_num,
voter_set_id,
&voter_set,
)?;
match self.tracked_bridges.get_mut(&bridge_id) {
Some(bridge_info) => {
bridge_info.last_finalized_block_header = header;
if let Some(change) = auhtority_set_change {
// Check the validator set increment
if change.authority_set.id != voter_set_id + 1 {
return Err(anyhow::Error::msg(Error::UnexpectedValidatorSetId));
}
// Check validator set change proof
let state_root = bridge_info.last_finalized_block_header.state_root();
Self::check_validator_set_proof(
state_root,
change.authority_proof,
&change.authority_set.list,
change.authority_set.id,
)?;
// Commit
bridge_info.current_set = AuthoritySet {
list: change.authority_set.list,
id: change.authority_set.id,
}
}
}
_ => panic!("We succesfully got this bridge earlier, therefore it exists; qed"),
};
Ok(())
}
pub fn validate_storage_proof(
&self,
state_root: T::Hash,
proof: StorageProof,
items: &[(&[u8], &[u8])], // &[(key, value)]
) -> Result<()> {
let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?;
for (k, v) in items {
let actual_value = checker
.read_value(k)?
.ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?;
if actual_value.as_slice() != *v {
return Err(anyhow::Error::msg(Error::StorageValueMismatch));
}
}
Ok(())
}
}
#[derive(Debug)]
pub enum Error {
// InvalidStorageProof,
// StorageRootMismatch,
StorageValueUnavailable,
// InvalidValidatorSetProof,
ValidatorSetMismatch,
InvalidAncestryProof,
NoSuchBridgeExists,
InvalidFinalityProof,
// UnknownClientError,
// HeaderAncestryMismatch,
UnexpectedValidatorSetId,
StorageValueMismatch,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
// Error::StorageRootMismatch => write!(f, "storage root mismatch"),
Error::StorageValueUnavailable => write!(f, "storage value unavailable"),
Error::ValidatorSetMismatch => write!(f, "validator set mismatch"),
Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"),
Error::NoSuchBridgeExists => write!(f, "no such bridge exists"),
Error::InvalidFinalityProof => write!(f, "invalid finality proof"),
// Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"),
Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"),
Error::StorageValueMismatch => write!(f, "storage value mismatch"),
}
}
}
impl From<JustificationError> for Error {
fn from(e: JustificationError) -> Self {
match e {
JustificationError::BadJustification(msg) => {
error!("InvalidFinalityProof(BadJustification({}))", msg);
Error::InvalidFinalityProof
}
JustificationError::JustificationDecode => {
error!("InvalidFinalityProof(JustificationDecode)");
Error::InvalidFinalityProof
}
}
}
}
impl<T: Config> LightValidation<T>
where
NumberFor<T::Block>: AsPrimitive<usize>,
{
fn check_validator_set_proof(
state_root: &T::Hash,
proof: StorageProof,
validator_set: &[(AuthorityId, AuthorityWeight)],
_set_id: SetId,
) -> Result<()> {
let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?;
// By encoding the given set we should have an easy way to compare
// with the stuff we get out of storage via `read_value`
let mut encoded_validator_set = validator_set.encode();
encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1
let actual_validator_set = checker
.read_value(b":grandpa_authorities")?
.ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?;
// TODO: check set_id
// checker.read_value(grandpa::CurrentSetId.key())
if encoded_validator_set == actual_validator_set {
Ok(())
} else {
Err(anyhow::Error::msg(Error::ValidatorSetMismatch))
}
}
}
// A naive way to check whether a `child` header is a decendent
// of an `ancestor` header. For this it requires a proof which
// is a chain of headers between (but not including) the `child`
// and `ancestor`. This could be updated to use something like
// Log2 Ancestors (#2053) in the future.
fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()>
where
H: Header<Hash = H256>,
{
{
info!("ancestor_hash: {}", ancestor_hash);
for h in proof.iter() {
info!(
"block {:?} - hash: {} parent: {}",
h.number(),
h.hash(),
h.parent_hash()
);
}
info!(
"child block {:?} - hash: {} parent: {}",
child.number(),
child.hash(),
child.parent_hash()
);
}
let mut parent_hash = child.parent_hash();
if *parent_hash == ancestor_hash {
return Ok(());
}
// If we find that the header's parent hash matches our ancestor's hash we're done
for header in proof.iter() {
// Need to check that blocks are actually related
if header.hash() != *parent_hash {
break;
}
parent_hash = header.parent_hash();
if *parent_hash == ancestor_hash {
return Ok(());
}
}
Err(anyhow::Error::msg(Error::InvalidAncestryProof))
}
fn verify_grandpa_proof<B>(
justification: EncodedJustification,
hash: B::Hash,
number: NumberFor<B>,
set_id: u64,
voters: &VoterSet<AuthorityId>,
) -> Result<()>
where
B: BlockT<Hash = H256>,
NumberFor<B>: finality_grandpa::BlockNumberOps,
{
// We don't really care about the justification, as long as it's valid
let _ = GrandpaJustification::<B>::decode_and_verify_finalizes(
&justification,
(hash, number),
set_id,
voters,
)
.map_err(anyhow::Error::msg)?;
Ok(())
}
impl<T: Config> fmt::Debug for LightValidation<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"LightValidationTest {{ num_bridges: {}, tracked_bridges: {:?} }}",
self.num_bridges, self.tracked_bridges
)
}
}
impl<T: Config>
|
initialize_bridge
|
identifier_name
|
|
autodetect.js
|
("Missing target type in SCConfig: " + filepath);
readConfig[opts.target] = opts;
},
});
vm.runInContext(scconfig,ctx,filepath);
return readConfig;
};
var testJSON = function(text){
var ret;
try {
ret = JSON.parse(text);
return ret;
}
catch(e){
return undefined;
}
};
var namedHashToArray = function(hash){
if(SC.typeOf(hash) === SC.T_ARRAY) return hash;
else {
return Object.keys(hash).map(function(k){
if(!hash.name) hash.name = k;
return hash;
});
}
};
var resolveReference = function(ref,context){
// 1. "sproutcore": depending on the context this is either an app, a framework or a module in the root of the project
// 2. "sproutcore:desktop": this is the subframework desktop inside the sproutcore framework
// 3. "sproutcore/lib/index.html": this is a reference to the file lib/index.html inside the sproutcore framework
// 4. "http://my.host.ext": a url, is taken literally
//context is one of "app","framework","module"
var prefix, p;
if(context === "app"){
prefix = "apps";
} else if(context === "framework"){
prefix = "frameworks";
} else if(context === "module"){
prefix = "modules";
}
if(ref.indexOf("http") > -1){
return ref; // don't do anything
}
if(ref.indexOf(":") > -1){
p = ref.replace(/\:/g,"/frameworks/");
return path.join(prefix,p);
}
return path.join(prefix,ref);
};
var projectPath;
// we save all config files, and generate a complex of hashes for every object to create
// in order for the actual creation process of all the levels to be easy and straightforward
// meaning that the apps will be a hash where the name of the app is a key,
// and the value an array of configurations. The same goes for the frameworks inside the apps,
// except that the frameworks will be an array of arrays, where each array will contain the different
// configuration for each level.
var allConfigs = {
project: null,
apps: []
};
var resultConfig;
var cb;
var AppConfigParser = SC.Object.extend({
content: null,
done: false, // observable to indicate ready with parsing
init: function(){
// take the content, and start working
if(!this.content) throw new Error("AppConfigParser: no content!");
this._fwconfigs = [];
},
start: function(){ // we create a separate function in order to allow attaching observers before starting the procedure
// content should be a hash or array
if(!this.content.frameworks){
this.set('done',true); // nothing to do
}
else {
this.content.frameworks = namedHashToArray(this.content.frameworks); // will convert if necessary
if(this.content.frameworks.length === 0){
this.set('done',true); // nothing to do
}
else {
this._currFWIndex = 0;
this.takeNext();
}
}
},
_currFWIndex: null,
takeNext: function(){
if(this._currFWIndex >= this.content.frameworks.length){
this.finish();
return; // done with parsing
}
// if not done with parsing
var curFW = this.content.frameworks[this._currFWIndex];
var pOne,pTwo;
if(SC.typeOf(curFW) === SC.T_STRING){
p = resolveReference(curFW,"framework");
}
else { // hash
if(!fw.path){
p = resolveReference(fw.name,"framework");
}
else p = fw.path;
}
pOne = path.join(p,'sc_config.json');
pTwo = path.join(p,'sc_config');
async.exec(fs.readFile,pOne, { encoding: "utf8"}).notify(this,'nextJSONDidRead', pOne, fw);
async.exec(fs.readFile,pTwo, { encoding: "utf8"}).notify(this,'nextJSDidRead', pTwo, fw);
},
nextJSONDidRead: function(result, args){
//args[0] == path, args[1] == fw
var data, ret;
if(SC.ok(result)){
// there is a config file, check dependencies
data = testJSON(result.get('result'));
ret = jsonValidate(data, SCHEMAS.FRAMEWORK);
if(ret && !ret.isValid){
throw new Error("Found syntax error in " + args[0]);
}
// valid json, now check deps
if(data.dependencies && data.dependencies.length > 0){
data.dependencies.forEach(function(dep){
if(this.content.frameworks.indexOf(dep) === -1){
this.content.frameworks.push(dep);
}
});
}
this._jsonReturn = data;
}
this._jsonDidReturn = true;
this.proceedToNext(args[0],args[1]);
},
nextJSDidRead: function(result, args){
if(SC.ok(result)){
// parse the config in result...
// problem is that this might need another async call to figure out whether
// target is an app or a framework...
}
this._jsDidReturn = true;
this.proceedToNext(args[0],args[1]);
},
// slots to store any return values on. If there is nothing to read, the value is true
_jsonDidReturn: false,
_jsDidReturn: false,
_jsonReturn: false,
_jsReturn: false,
proceedToNext: function(fwpath, fw){
var ret;
if(this._jsDidReturn && this._jsonDidReturn){
this._jsDidReturn = false;
this._jsonDidReturn = false;
// next take the data
var data = this._jsReturn? this._jsReturn : this._jsonReturn? this._jsonReturn: { path: fw };
if(SC.typeOf(fw) === SC.T_HASH){
this._fwconfigs.push([args[1], data]);
}
else {
this._fwconfigs.push(data);
}
this._jsReturn = this._jsonReturn = null; // reset return values
this._currFWIndex += 1;
this.takeNext();
}
},
finish: function(){
// nextDidRead stores the temporary fw configs in this._fwconfigs, we need to replace the
// original frameworks with a reversed _fwconfigs, and set done
this.content.frameworks = this._fwconfigs.reverse();
this.set('done',true);
}
});
var AutoDetection = SC.Statechart.create({
rootState: SC.State.design({
initialSubstate: 'PROJECTCONFIG',
PROJECTCONFIG: SC.State.design({
// detect project config file and load if necessary (the process can be a reload)
enterState: function(){
async.exec(fs.readFile,path.join(projectPath,'sc_config.json'), {encoding: 'utf8'}).notify(AutoDetection,'readFileDidRespond');
},
readFileDidRespond: function(result){
if(SC.ok(result)){
//file exists
var data = testJSON(result.get('result'));
var ret = jsonValidate(data, SCHEMAS.PROJECT);
if(ret && !ret.valid){
// error
util.log('invalid project config: ' + util.inspect(ret.errors));
}
else {
util.log('valid project config, continuing...');
allConfigs.set('project',data);
}
}
else {
// file doesn't exist
util.log('no project config file found');
}
this.gotoState('APPS');
}
}),
APPS: SC.State.design({
// detect apps folder, if exists
enterState: function(){
async.exec(fs.readdir, path.join(projectPath,'apps')).notify(AutoDetection, 'detectAppsDidRespond');
},
appnames: null,
detectAppsDidRespond: function(result){
if(SC.ok(result)){
// apps folder does exist, now check the contents
var list = result.get('result');
if(list.length > 0){
this.appnames = list;
list.forEach(function(app){
async.exec(fs.readFile,path.join(projectPath,'apps',app,'sc_config.json'))
.notify(AutoDetection, 'readAppConfigDidRespond',app);
});
}
else {
// apps folder does exist, but is empty
throw new Error("Did you create any application yet?");
}
}
else { // apps folder doesn't exist... not good
throw new Error("We seem not to be in a project folder...");
}
},
readAppConfigDidRespond: function(result,appname){
var app = appname[0];
if(SC.ok(result)){ // app config exists
var data = testJSON(result.get('result'));
var ret = jsonValidate(data, SCHEMAS.APP);
if(ret && !ret.valid){
// error
util.log('invalid app config: ' + util.inspect(ret.errors));
}
else {
util.log('valid project config, continuing...');
|
allConfigs.apps.push(data);
}
}
else {
util.log('app found with name ' + app + " but no config file detected.");
all
|
if(!data.name) data.name = app;
|
random_line_split
|
product.ts
|
Client": this.values.customerName,
"productUi": this.product.product.id,
"productName": this.product.product.name,
"date": year+'/'+month+'/'+day,
"hour": this.hourInit,
"lat":this.lat,
"lng":this.long,
"onesignal":this.values.userId,
"location" : this.addressesCustomer
});
this.service.sendNotification({
"title":"Nueva solicitud",
"content":`Usted ha recibido una solicitud de servicio de ${this.values.customerName}`,
"onesignalid":this.providerOneSignal
})
this.disableSubmit = true
this.BookNow = 'PleaseWait'
// var date = new Date(this.selectedTime);
this.product_slot.map(result => {
if(this.product.product.id == result.product_id)
{
var date = new Date(new Date(result.date))
var year = date.getFullYear()
var month = date.getMonth() + 1
var day = date.getDate()
this.service
.addToCart(
resource_id,
month,
day,
year,
result.date,
this.product.product,
)
.then(results => {
console.log(results)
})
this.values.count += parseInt(this.quantity)
}
})
this.disableSubmit = false
this.BookNow = 'BookNow'
this.showAlert('Solicitud enviada', '<strong>Exito:</strong> Has enviado una solicitud a tu homer correctamente');
this.returnHome()
// }
}
showAlert(title, text) {
let alert = this.alert.create({
title: title,
subTitle: text,
buttons: ['OK'],
});
alert.present();
}
setVariations() {
this.product.product.attributes.forEach(item => {
if (item.selected) {
this.options['variation[attribute_pa_' + item.name + ']'] =
item.selected
}
})
for (var i = 0; i < this.product.product.attributes.length; i++) {
console.log(this.product.product.attributes[i].name)
if (
this.product.product.attributes[i].variation &&
this.product.product.attributes[i].selected == undefined
) {
this.functions.showAlert(
'Options',
'Please Select Product ' +
this.product.product.attributes[i].name +
' Option',
)
return false
}
}
return true
}
onSelect($event, id) {
let date = new Date($event.time)
console.log({ date })
this.month = date.getUTCMonth() + 1 //months from 1-12
this.day = date.getUTCDate()
this.year = date.getUTCFullYear()
//si cambiamos la fecha reseteamos los horarios
this.schedule = null
this.selectedTime = null
this.disableSubmit = true
if (
this.product.product.resources_full &&
this.product.product.resources_full.length > 0 &&
!this.selectedService
) {
this.functions.showAlert('error', this.lan.pleaseSelect)
return
}
var resource_id = !this.selectedService
? null
: this.selectedService.resource_id
? this.selectedService.resource_id
: null
// if (this.values.isLoggedIn) {
this.service
.getBlocks(this.day, this.month, this.year, id, resource_id)
.then(results => {
let res = results as string
let find = '<li class="block"'
let regex = new RegExp(find, 'g')
res = res.replace(
regex,
'<li class="block" ng-click="selectSchedule()" ',
)
console.log('schedule', res)
var match = res.match(/data-value="(.*?)"/gi)
if (!match) {
this.schedule = null
return
}
match.forEach((el, i, arr) => {
arr[i] = el.replace('data-value=', '').replace(/"/g, '')
})
this.schedule = match
})
}
update_blocks(a) {
if (a.success == 'Success') {
//this.functions.showAlert(a.success, a.message);
this.values.blockslistId[this.product.product.id] = true
} else {
this.functions.showAlert('error', 'error')
}
}
updateCart(a) {
console.log('a:', a)
this.disableSubmit = false
this.values.count += parseInt(this.quantity)
this.BookNow = 'BookNow'
this.returnHome()
// this.getCart()
}
returnHome(){
this.nav.push(ProductsListPage);
}
getCart() {
this.nav.parent.select(2);
}
mySlideOptions = {
initialSlide: 1,
loop: true,
autoplay: 5800,
pager: true,
}
getReviews() {
this.service.getReviews(this.id).then(results => this.handleReview(results))
}
handleReview(a) {
this.reviews = a
for (let item in this.reviews.product_reviews) {
this.reviews.product_reviews[item].avatar = md5(
this.reviews.product_reviews[item].reviewer_email,
)
}
}
addToWishlist(id) {
if (this.values.isLoggedIn) {
this.service.addToWishlist(id).then(results => this.update(results))
} else {
this.functions.showAlert(
'Warning',
'Debe iniciar sesión para agregar un servicio a la lista de deseos',
)
}
}
update(a) {
if (a.success == 'Success') {
//this.functions.showAlert(a.success, a.message);
this.values.wishlistId[this.product.product.id] = true
} else {
this.functions.showAlert('error', 'error')
}
}
removeFromWishlist(id) {
this.values.wishlistId[id] = false
this.service.deleteItem(id).then(results => this.updateWish(results, id))
}
updateWish(results, id) {
if (results.status == 'success') {
this.values.wishlistId[id] = false
}
}
chooseVariationOne(){
this.chooseVariation(this.optionss);
}
chooseVariation(option) {
console.log(option);
console.log(this.selectedService);
if (this.selectedService) {
this.selectedService = null
this.product.product.price = this.product.product.minPrice
}
this.product.product.resources_full.forEach(item => {
if (item.resource_id == option.resource_id) {
this.selectedService = option
this.product.product.price = this.selectedService.price
this.disableSubmit =
(this.product.product.resources_full.length > 0 &&
!this.selectedService) ||
!this.selectedTime
}
})
// this.product.product.variations.forEach(variation => {
// var test = new Array(this.usedVariationAttributes.length)
// test.fill(false)
// this.usedVariationAttributes.forEach(attribute => {
// if (variation.attributes.length == 0) {
// this.options.variation_id = variation.id
// this.product.product.in_stock = variation.in_stock
// this.product.product.price = variation.price
// this.product.product.sale_price = variation.sale_price
// this.product.product.regular_price = variation.regular_price
// } else {
// variation.attributes.forEach((item, index) => {
// if (
// attribute.selected &&
// item.name.toUpperCase() == attribute.name.toUpperCase() &&
// item.option.toUpperCase() == attribute.selected.toUpperCase()
// ) {
// test[index] = true
// }
// })
// if (test.every(v => v == true)) {
// this.options.variation_id = variation.id
// this.product.product.in_stock = variation.in_stock
// this.product.product.price = variation.price
// this.product.product.sale_price = variation.sale_price
// this.product.product.regular_price = variation.regular_price
// test.fill(false)
// }
// }
// })
// })
}
selectTime(time) {
this.selectedTime = time
this.disableSubmit =
(this.product.product.resources_full.length > 0 &&
!this.selectedService) ||
!this.selectedTime
}
getTime(item) {
return moment(item).format('hh:mm a')
}
ngOnInit() {
this.translate.get(['Please select a service']).subscribe(translations => {
this.lan.pleaseSelect = translations['Please select a service'];
});
}
getAddressFromCoords() {
console.log("getAddressFromCoords "+this.miLatitude+" "+this.miLongitude);
let options: NativeGeocoderOptions = {
useLocale: true,
maxResults: 5
};
this.nativeGeocoder.reverseGeocode(this.miLatitude, this.miLongitude, options)
.then((result: NativeGeocoderReverseResult[]) => {
console.log(JSON.stringify(result[0]))
this.autocomplete.input = result[0].locality+', '+ result[0].administrativeArea+', '+ result[0].countryName;
}
)
.catch((error: any) =>{
this.address = "Address Not Available!";
console.log(error)
});
this.lat = this.miLatitude.toString();
this.long = this.miLongitude.toString();
}
getDate(date){
|
this.processDate = date
}
|
identifier_body
|
|
product.ts
|
addressesCustomer: any;
constructor(
public alert:AlertController,
public translate: TranslateService,
public nav: NavController,
public service: ProductService,
public servi:Service,
public otherservice: Service,
params: NavParams,
public functions: Functions,
public values: Values,
private platform: Platform,
private geolocation: Geolocation,
private nativeGeocoder: NativeGeocoder,
) {
console.log("prueba id onesignal", this.values.userId);
this.lat = '';
this.long = '';
this.options = []
this.optionss = []
this.quantity = '1'
this.BookNow = 'BookNow'
this.otherservice.getCustomer()
.then((results) => this.handleCustomer(results));
this.otherservice.getAddress()
.then((resultsAddresses) => this.handleAddress(resultsAddresses));
if (params.data.id) {
this.selectedService = null
this.product.product = params.data.id
this.id = params.data.id
this.product_slot = params.data.product_sl;
this.date = params.data.date;
this.hourInit = params.data.hourInit;
this.hourEnd = params.data.hourEnd;
// this.selectedTime = this.date+'T'+this.hourInit
this.options.product_id = this.id
this.usedVariationAttributes = (this.product.product
.resources_full as Array<any>).map(item => item)
console.log('usedVariationAttributes:', this.usedVariationAttributes)
this.loadDataProduct();
} else {
// this.options.product_id = this.id
this.service
.getProduct(params.data.id)
.then(results => this.handleProductResults(results))
}
this.getReviews()
platform.ready().then(() => {
const subscription = this.geolocation.watchPosition()
.filter((p) => p.coords !== undefined) //Filter Out Errors
.subscribe(position => {
this.miLatitude = position.coords.latitude;
this.miLongitude = position.coords.longitude;
// console.log("locomiLocation=" + position.coords.latitude + ' ' + position.coords.longitude);
});
});
this.servi.getHomerOneSignal(this.product.product.id).then((result:any) => this.providerOneSignal = result.providers[0].onesignal);
//con esto antes obtenia el providerOneSignal
// for (let i = 0; i < this.values.homerOneSignal.length; i++) {
// if(this.values.homerOneSignal[i].product == this.product.product.id){
// this.providerOneSignal = this.values.homerOneSignal[i].providerOneSignal
// }
// }
}
loadDataProduct(){
//según el horario, deshabilitamos los dias de la semana que no están definidos en el Available
this.disableWeekDays = [0, 1, 2, 3, 4, 5, 6]
this.product.product.availability.forEach(element => {
let day = Number((element.type as string).split(':')[1])
console.log({ day })
const index = this.disableWeekDays.indexOf(day)
if (index > -1) {
this.disableWeekDays.splice(index, 1)
}
})
console.log('this.daysConfig', this.daysConfig)
console.log('this.disableWeekDays', this.disableWeekDays)
//Ponemos los dias as marked (para que aparezcan de un color azul) 6 meses hacia adelante
for (let index = 0; index < 180; index++) {
let cur_day = moment()
.add(index, 'days')
.toDate()
.getDay()
const index_cur_day = this.disableWeekDays.indexOf(cur_day)
if (index_cur_day > -1) {
this.daysConfig.push({
date: moment()
.add(index, 'days')
.toDate(),
disable: true,
})
}
this.daysConfig.push({
date: moment()
.add(index, 'days')
.toDate(),
marked: true,
})
}
//Por defecto iniciamos con el booking deshabilitado
this.disableSubmit = true
}
handleAddress(result){
this.addresses = result
this.addressesCustomer = this.addresses.customer.billing_address.address_1
console.log(this.addressesCustomer)
}
handleCustomer(result){
this.customers = result
}
handleProductResults(results) {
this.selectedService = null
this.product.product = results
this.id = results.id
console.log('producto', this.product.product)
this.options.product_id = this.id
console.log('Product: ', this.product.product.resources_full)
this.usedVariationAttributes = (this.product.product
.resources_full as Array<any>).map(item => item)
console.log('usedVariationAttributes:', this.usedVariationAttributes)
this.loadDataProduct();
// this.product = results
// this.usedVariationAttributes = this.product.product.attributes.filter(
// function(attribute) {
// return attribute.variation == true
// },
// )
}
getProduct(id) {
this.nav.push(ProductPage, id)
console.log(id)
}
addToCart() {
// if (!this.values.isLoggedIn) {
// this.functions.showAlert(
// 'Options',
// 'Please login or create an account to continue',
// )
// this.nav.push(AccountLogin)
// }
//Validamos se el producto contiene resources
// if (
// this.product.product.resources_full.length > 0 &&
// !this.selectedService
// ) {
// this.functions.showAlert(
// 'Options',
// 'Select a service and booking information',
// )
// return
// }
var resource_id = !this.selectedService
? null
: this.selectedService.resource_id
? this.selectedService.resource_id
: null
this.getAddressFromCoords();
var date = moment(this.date)
var year = date.year()
var month = date.month()
var day = date.day()
this.service.addOrders({
"clientUi": this.values.customerId,
"nameClient": this.values.customerName,
"productUi": this.product.product.id,
"productName": this.product.product.name,
"date": year+'/'+month+'/'+day,
"hour": this.hourInit,
"lat":this.lat,
"lng":this.long,
"onesignal":this.values.userId,
"location" : this.addressesCustomer
});
this.service.sendNotification({
"title":"Nueva solicitud",
"content":`Usted ha recibido una solicitud de servicio de ${this.values.customerName}`,
"onesignalid":this.providerOneSignal
})
this.disableSubmit = true
this.BookNow = 'PleaseWait'
// var date = new Date(this.selectedTime);
this.product_slot.map(result => {
if(this.product.product.id == result.product_id)
{
var date = new Date(new Date(result.date))
var year = date.getFullYear()
var month = date.getMonth() + 1
var day = date.getDate()
this.service
.addToCart(
resource_id,
month,
day,
year,
result.date,
this.product.product,
)
.then(results => {
console.log(results)
})
this.values.count += parseInt(this.quantity)
}
})
this.disableSubmit = false
this.BookNow = 'BookNow'
this.showAlert('Solicitud enviada', '<strong>Exito:</strong> Has enviado una solicitud a tu homer correctamente');
this.returnHome()
// }
}
showAlert(title, text) {
let alert = this.alert.create({
title: title,
subTitle: text,
buttons: ['OK'],
});
alert.present();
}
setVariations() {
this.product.product.attributes.forEach(item => {
if (item.selected) {
this.options['variation[attribute_pa_' + item.name + ']'] =
item.selected
}
})
for (var i = 0; i < this.product.product.attributes.length; i++) {
console.log(this.product.product.attributes[i].name)
if (
this.product.product.attributes[i].variation &&
this.product.product.attributes[i].selected == undefined
) {
this.functions.showAlert(
'Options',
'Please Select Product ' +
this.product.product.attributes[i].name +
' Option',
)
return false
}
}
return true
}
onSelect($event, id) {
let date = new Date($event.time)
console.log({ date })
this.month = date.getUTCMonth() + 1 //months from 1-12
this.day = date.getUTCDate()
this.year = date.getUTCFullYear()
//si cambiamos la fecha reseteamos los horarios
this.schedule = null
this.selectedTime = null
this.disableSubmit = true
if (
this.product.product.resources_full &&
this.product.product.resources_full.length > 0 &&
!this.selectedService
) {
this.functions.showAlert('error', this.lan.pleaseSelect)
return
}
var resource_id = !this.selectedService
? null
: this.selectedService.resource_id
? this.selectedService
|
customers: any;
addresses: any;
|
random_line_split
|
|
product.ts
|
: TranslateService,
public nav: NavController,
public service: ProductService,
public servi:Service,
public otherservice: Service,
params: NavParams,
public functions: Functions,
public values: Values,
private platform: Platform,
private geolocation: Geolocation,
private nativeGeocoder: NativeGeocoder,
) {
console.log("prueba id onesignal", this.values.userId);
this.lat = '';
this.long = '';
this.options = []
this.optionss = []
this.quantity = '1'
this.BookNow = 'BookNow'
this.otherservice.getCustomer()
.then((results) => this.handleCustomer(results));
this.otherservice.getAddress()
.then((resultsAddresses) => this.handleAddress(resultsAddresses));
if (params.data.id) {
this.selectedService = null
this.product.product = params.data.id
this.id = params.data.id
this.product_slot = params.data.product_sl;
this.date = params.data.date;
this.hourInit = params.data.hourInit;
this.hourEnd = params.data.hourEnd;
// this.selectedTime = this.date+'T'+this.hourInit
this.options.product_id = this.id
this.usedVariationAttributes = (this.product.product
.resources_full as Array<any>).map(item => item)
console.log('usedVariationAttributes:', this.usedVariationAttributes)
this.loadDataProduct();
} else {
// this.options.product_id = this.id
this.service
.getProduct(params.data.id)
.then(results => this.handleProductResults(results))
}
this.getReviews()
platform.ready().then(() => {
const subscription = this.geolocation.watchPosition()
.filter((p) => p.coords !== undefined) //Filter Out Errors
.subscribe(position => {
this.miLatitude = position.coords.latitude;
this.miLongitude = position.coords.longitude;
// console.log("locomiLocation=" + position.coords.latitude + ' ' + position.coords.longitude);
});
});
this.servi.getHomerOneSignal(this.product.product.id).then((result:any) => this.providerOneSignal = result.providers[0].onesignal);
//con esto antes obtenia el providerOneSignal
// for (let i = 0; i < this.values.homerOneSignal.length; i++) {
// if(this.values.homerOneSignal[i].product == this.product.product.id){
// this.providerOneSignal = this.values.homerOneSignal[i].providerOneSignal
// }
// }
}
loadDataProduct(){
//según el horario, deshabilitamos los dias de la semana que no están definidos en el Available
this.disableWeekDays = [0, 1, 2, 3, 4, 5, 6]
this.product.product.availability.forEach(element => {
let day = Number((element.type as string).split(':')[1])
console.log({ day })
const index = this.disableWeekDays.indexOf(day)
if (index > -1) {
this.disableWeekDays.splice(index, 1)
}
})
console.log('this.daysConfig', this.daysConfig)
console.log('this.disableWeekDays', this.disableWeekDays)
//Ponemos los dias as marked (para que aparezcan de un color azul) 6 meses hacia adelante
for (let index = 0; index < 180; index++) {
let cur_day = moment()
.add(index, 'days')
.toDate()
.getDay()
const index_cur_day = this.disableWeekDays.indexOf(cur_day)
if (index_cur_day > -1) {
|
this.daysConfig.push({
date: moment()
.add(index, 'days')
.toDate(),
marked: true,
})
}
//Por defecto iniciamos con el booking deshabilitado
this.disableSubmit = true
}
handleAddress(result){
this.addresses = result
this.addressesCustomer = this.addresses.customer.billing_address.address_1
console.log(this.addressesCustomer)
}
handleCustomer(result){
this.customers = result
}
handleProductResults(results) {
this.selectedService = null
this.product.product = results
this.id = results.id
console.log('producto', this.product.product)
this.options.product_id = this.id
console.log('Product: ', this.product.product.resources_full)
this.usedVariationAttributes = (this.product.product
.resources_full as Array<any>).map(item => item)
console.log('usedVariationAttributes:', this.usedVariationAttributes)
this.loadDataProduct();
// this.product = results
// this.usedVariationAttributes = this.product.product.attributes.filter(
// function(attribute) {
// return attribute.variation == true
// },
// )
}
getProduct(id) {
this.nav.push(ProductPage, id)
console.log(id)
}
addToCart() {
// if (!this.values.isLoggedIn) {
// this.functions.showAlert(
// 'Options',
// 'Please login or create an account to continue',
// )
// this.nav.push(AccountLogin)
// }
//Validamos se el producto contiene resources
// if (
// this.product.product.resources_full.length > 0 &&
// !this.selectedService
// ) {
// this.functions.showAlert(
// 'Options',
// 'Select a service and booking information',
// )
// return
// }
var resource_id = !this.selectedService
? null
: this.selectedService.resource_id
? this.selectedService.resource_id
: null
this.getAddressFromCoords();
var date = moment(this.date)
var year = date.year()
var month = date.month()
var day = date.day()
this.service.addOrders({
"clientUi": this.values.customerId,
"nameClient": this.values.customerName,
"productUi": this.product.product.id,
"productName": this.product.product.name,
"date": year+'/'+month+'/'+day,
"hour": this.hourInit,
"lat":this.lat,
"lng":this.long,
"onesignal":this.values.userId,
"location" : this.addressesCustomer
});
this.service.sendNotification({
"title":"Nueva solicitud",
"content":`Usted ha recibido una solicitud de servicio de ${this.values.customerName}`,
"onesignalid":this.providerOneSignal
})
this.disableSubmit = true
this.BookNow = 'PleaseWait'
// var date = new Date(this.selectedTime);
this.product_slot.map(result => {
if(this.product.product.id == result.product_id)
{
var date = new Date(new Date(result.date))
var year = date.getFullYear()
var month = date.getMonth() + 1
var day = date.getDate()
this.service
.addToCart(
resource_id,
month,
day,
year,
result.date,
this.product.product,
)
.then(results => {
console.log(results)
})
this.values.count += parseInt(this.quantity)
}
})
this.disableSubmit = false
this.BookNow = 'BookNow'
this.showAlert('Solicitud enviada', '<strong>Exito:</strong> Has enviado una solicitud a tu homer correctamente');
this.returnHome()
// }
}
showAlert(title, text) {
let alert = this.alert.create({
title: title,
subTitle: text,
buttons: ['OK'],
});
alert.present();
}
setVariations() {
this.product.product.attributes.forEach(item => {
if (item.selected) {
this.options['variation[attribute_pa_' + item.name + ']'] =
item.selected
}
})
for (var i = 0; i < this.product.product.attributes.length; i++) {
console.log(this.product.product.attributes[i].name)
if (
this.product.product.attributes[i].variation &&
this.product.product.attributes[i].selected == undefined
) {
this.functions.showAlert(
'Options',
'Please Select Product ' +
this.product.product.attributes[i].name +
' Option',
)
return false
}
}
return true
}
onSelect($event, id) {
let date = new Date($event.time)
console.log({ date })
this.month = date.getUTCMonth() + 1 //months from 1-12
this.day = date.getUTCDate()
this.year = date.getUTCFullYear()
//si cambiamos la fecha reseteamos los horarios
this.schedule = null
this.selectedTime = null
this.disableSubmit = true
if (
this.product.product.resources_full &&
this.product.product.resources_full.length > 0 &&
!this.selectedService
) {
this.functions.showAlert('error', this.lan.pleaseSelect)
return
}
var resource_id = !this.selectedService
? null
: this.selectedService.resource_id
? this.selectedService.resource_id
: null
// if (this.values.isLoggedIn) {
this.service
.getBlocks(this.day, this.month
|
this.daysConfig.push({
date: moment()
.add(index, 'days')
.toDate(),
disable: true,
})
}
|
conditional_block
|
product.ts
|
.availability.forEach(element => {
let day = Number((element.type as string).split(':')[1])
console.log({ day })
const index = this.disableWeekDays.indexOf(day)
if (index > -1) {
this.disableWeekDays.splice(index, 1)
}
})
console.log('this.daysConfig', this.daysConfig)
console.log('this.disableWeekDays', this.disableWeekDays)
//Ponemos los dias as marked (para que aparezcan de un color azul) 6 meses hacia adelante
for (let index = 0; index < 180; index++) {
let cur_day = moment()
.add(index, 'days')
.toDate()
.getDay()
const index_cur_day = this.disableWeekDays.indexOf(cur_day)
if (index_cur_day > -1) {
this.daysConfig.push({
date: moment()
.add(index, 'days')
.toDate(),
disable: true,
})
}
this.daysConfig.push({
date: moment()
.add(index, 'days')
.toDate(),
marked: true,
})
}
//Por defecto iniciamos con el booking deshabilitado
this.disableSubmit = true
}
handleAddress(result){
this.addresses = result
this.addressesCustomer = this.addresses.customer.billing_address.address_1
console.log(this.addressesCustomer)
}
handleCustomer(result){
this.customers = result
}
handleProductResults(results) {
this.selectedService = null
this.product.product = results
this.id = results.id
console.log('producto', this.product.product)
this.options.product_id = this.id
console.log('Product: ', this.product.product.resources_full)
this.usedVariationAttributes = (this.product.product
.resources_full as Array<any>).map(item => item)
console.log('usedVariationAttributes:', this.usedVariationAttributes)
this.loadDataProduct();
// this.product = results
// this.usedVariationAttributes = this.product.product.attributes.filter(
// function(attribute) {
// return attribute.variation == true
// },
// )
}
getProduct(id) {
this.nav.push(ProductPage, id)
console.log(id)
}
addToCart() {
// if (!this.values.isLoggedIn) {
// this.functions.showAlert(
// 'Options',
// 'Please login or create an account to continue',
// )
// this.nav.push(AccountLogin)
// }
//Validamos se el producto contiene resources
// if (
// this.product.product.resources_full.length > 0 &&
// !this.selectedService
// ) {
// this.functions.showAlert(
// 'Options',
// 'Select a service and booking information',
// )
// return
// }
var resource_id = !this.selectedService
? null
: this.selectedService.resource_id
? this.selectedService.resource_id
: null
this.getAddressFromCoords();
var date = moment(this.date)
var year = date.year()
var month = date.month()
var day = date.day()
this.service.addOrders({
"clientUi": this.values.customerId,
"nameClient": this.values.customerName,
"productUi": this.product.product.id,
"productName": this.product.product.name,
"date": year+'/'+month+'/'+day,
"hour": this.hourInit,
"lat":this.lat,
"lng":this.long,
"onesignal":this.values.userId,
"location" : this.addressesCustomer
});
this.service.sendNotification({
"title":"Nueva solicitud",
"content":`Usted ha recibido una solicitud de servicio de ${this.values.customerName}`,
"onesignalid":this.providerOneSignal
})
this.disableSubmit = true
this.BookNow = 'PleaseWait'
// var date = new Date(this.selectedTime);
this.product_slot.map(result => {
if(this.product.product.id == result.product_id)
{
var date = new Date(new Date(result.date))
var year = date.getFullYear()
var month = date.getMonth() + 1
var day = date.getDate()
this.service
.addToCart(
resource_id,
month,
day,
year,
result.date,
this.product.product,
)
.then(results => {
console.log(results)
})
this.values.count += parseInt(this.quantity)
}
})
this.disableSubmit = false
this.BookNow = 'BookNow'
this.showAlert('Solicitud enviada', '<strong>Exito:</strong> Has enviado una solicitud a tu homer correctamente');
this.returnHome()
// }
}
showAlert(title, text) {
let alert = this.alert.create({
title: title,
subTitle: text,
buttons: ['OK'],
});
alert.present();
}
setVariations() {
this.product.product.attributes.forEach(item => {
if (item.selected) {
this.options['variation[attribute_pa_' + item.name + ']'] =
item.selected
}
})
for (var i = 0; i < this.product.product.attributes.length; i++) {
console.log(this.product.product.attributes[i].name)
if (
this.product.product.attributes[i].variation &&
this.product.product.attributes[i].selected == undefined
) {
this.functions.showAlert(
'Options',
'Please Select Product ' +
this.product.product.attributes[i].name +
' Option',
)
return false
}
}
return true
}
onSelect($event, id) {
let date = new Date($event.time)
console.log({ date })
this.month = date.getUTCMonth() + 1 //months from 1-12
this.day = date.getUTCDate()
this.year = date.getUTCFullYear()
//si cambiamos la fecha reseteamos los horarios
this.schedule = null
this.selectedTime = null
this.disableSubmit = true
if (
this.product.product.resources_full &&
this.product.product.resources_full.length > 0 &&
!this.selectedService
) {
this.functions.showAlert('error', this.lan.pleaseSelect)
return
}
var resource_id = !this.selectedService
? null
: this.selectedService.resource_id
? this.selectedService.resource_id
: null
// if (this.values.isLoggedIn) {
this.service
.getBlocks(this.day, this.month, this.year, id, resource_id)
.then(results => {
let res = results as string
let find = '<li class="block"'
let regex = new RegExp(find, 'g')
res = res.replace(
regex,
'<li class="block" ng-click="selectSchedule()" ',
)
console.log('schedule', res)
var match = res.match(/data-value="(.*?)"/gi)
if (!match) {
this.schedule = null
return
}
match.forEach((el, i, arr) => {
arr[i] = el.replace('data-value=', '').replace(/"/g, '')
})
this.schedule = match
})
}
update_blocks(a) {
if (a.success == 'Success') {
//this.functions.showAlert(a.success, a.message);
this.values.blockslistId[this.product.product.id] = true
} else {
this.functions.showAlert('error', 'error')
}
}
updateCart(a) {
console.log('a:', a)
this.disableSubmit = false
this.values.count += parseInt(this.quantity)
this.BookNow = 'BookNow'
this.returnHome()
// this.getCart()
}
returnHome(){
this.nav.push(ProductsListPage);
}
getCart() {
this.nav.parent.select(2);
}
mySlideOptions = {
initialSlide: 1,
loop: true,
autoplay: 5800,
pager: true,
}
getReviews() {
this.service.getReviews(this.id).then(results => this.handleReview(results))
}
handleReview(a) {
this.reviews = a
for (let item in this.reviews.product_reviews) {
this.reviews.product_reviews[item].avatar = md5(
this.reviews.product_reviews[item].reviewer_email,
)
}
}
addToWishlist(id) {
if (this.values.isLoggedIn) {
this.service.addToWishlist(id).then(results => this.update(results))
} else {
this.functions.showAlert(
'Warning',
'Debe iniciar sesión para agregar un servicio a la lista de deseos',
)
}
}
update(a) {
if (a.success == 'Success') {
//this.functions.showAlert(a.success, a.message);
this.values.wishlistId[this.product.product.id] = true
} else {
this.functions.showAlert('error', 'error')
}
}
removeFromWishlist(id) {
this.values.wishlistId[id] = false
this.service.deleteItem(id).then(results => this.updateWish(results, id))
}
updateWish(results, id) {
if (results.status == 'success') {
this.values.wishlistId[id] = false
}
}
cho
|
oseVariationOne(){
|
identifier_name
|
|
codegen.rs
|
candidate_k_axes
.iter()
.filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim())
.collect::<TVec<_>>();
let k_axis = if non_trivial_k_axis.len() > 1 {
// TODO: handle case where multiple consecutive k in the same order in both input.
bail!("Multiple k-axis candidate found");
} else {
non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied()
};
let Some(k_axis) = k_axis else {
return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?));
};
let m_axis = op
.axes
.iter_all_axes()
.filter(|a| {
a.inputs[0].len() == 1
&& (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one())
&& a.outputs[0].len() == 1
})
.max_by_key(|a| &output_shape[a.outputs[0][0]]);
let Some(m_axis) = m_axis else {
return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?));
};
let n_axis = op
.axes
.iter_all_axes()
.filter(|a| {
(a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one())
&& a.inputs[1].len() == 1
&& a.outputs[0].len() == 1
})
.max_by_key(|a| &output_shape[a.outputs[0][0]]);
let Some(n_axis) = n_axis else {
return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(
op,
model,
node,
true,
&[k_axis, m_axis],
)?));
};
for axis in op.axes.iter_all_axes() {
let one = TDim::one();
let in_left =
axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one) != &one;
let in_right =
axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one) != &one;
let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one;
if (in_left ^ in_right) && !in_out {
return Ok(AxesOrPatch::NotAMatMul(axis));
}
}
Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis))
}
pub(super) fn inject_k_axis(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
) -> TractResult<TypedModelPatch> {
let mut new_axes = op.axes.clone();
let name = &node.name;
let mut patch = TypedModelPatch::new("inject k axis");
let mut wire = patch.taps(model, &node.inputs)?;
let repr = new_axes.available_label();
new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency(
repr,
InOut::In(1),
0,
)?;
wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0];
wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0];
wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
patch.shunt_outside(model, node.id.into(), wire[0])?;
Ok(patch)
}
pub(super) fn inject_m_or_n_axis(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
is_n: bool,
exclude: &[&Axis],
) -> TractResult<TypedModelPatch> {
let input_to_fix = is_n as usize;
let label = if is_n { "n" } else { "m" };
let input_facts = model.node_input_facts(node.id)?;
let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a| !exclude.contains(a)).find(|a| {
(a.inputs[1 - input_to_fix].len() == 0
|| input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one())
&& (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1)
});
let name = &node.name;
let mut patch = TypedModelPatch::new("Injecting m or n axis");
let mut wire = patch.taps(model, &node.inputs)?;
if let Some(axis) = quasi_m_or_n_axis {
if axis.inputs[input_to_fix].len() == 1 {
let new_axes =
op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?;
wire = patch.wire_node(
format!("{name}.einsum"),
EinSum { axes: new_axes, ..op.clone() },
&wire,
)?;
wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
} else {
let new_axes = op
.axes
.clone()
.with_extra_axis('$', InOut::In(input_to_fix), 0)?
.linking(axis.repr, '$')?;
wire[input_to_fix] = patch.wire_node(
format!("{name}.add_{label}"),
AxisOp::Add(0),
&[wire[input_to_fix]],
)?[0];
wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
}
} else {
let repr = op.axes.available_label();
let new_axes = op
.axes
.clone()
.with_extra_axis(repr, InOut::In(input_to_fix), 0)?
.with_extra_axis('$', InOut::Out(0), 0)?
.linking(repr, '$')?;
wire[input_to_fix] = patch.wire_node(
format!("{name}.add_{label}"),
AxisOp::Add(0),
&[wire[input_to_fix]],
)?[0];
wire = patch.wire_node(
format!("{name}.einsum"),
EinSum { axes: new_axes, ..op.clone() },
&wire,
)?;
wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
}
patch.shunt_outside(model, node.id.into(), wire[0])?;
Ok(patch)
}
fn wire_axes_fix(
patch: &mut TypedModelPatch,
name: &str,
var: &str,
mapping: &AxesMapping,
mut outlet: TVec<OutletId>,
) -> TractResult<TVec<OutletId>> {
for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() {
outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?;
}
Ok(outlet)
}
fn
|
(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
(_, k_axis, _): (&Axis, &Axis, &Axis),
) -> TractResult<Option<TypedModelPatch>> {
let name = &node.name;
let mut patch = TypedModelPatch::new("Dequantizing einsum");
let taps = patch.taps(model, &node.inputs)?;
let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else {
bail!("Expect exactly 9 inputs")
};
if !patch.outlet_fact(a_scale)?.shape.volume().is_one() {
let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0];
let output_rank = node.outputs[0].fact.rank();
for i in 1..(output_rank - q_axis_in_output) {
a_scale = patch.wire_node(
format!("{name}.a_scale_axis_fix_{i}"),
AxisOp::Add(i),
&[a_scale],
)?[0];
}
}
let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?;
let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?;
let mut output = patch.wire_node(
&node.name,
EinSum {
q_params: None,
axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?,
operating_dt: op.operating_dt,
},
&[a
|
dequant
|
identifier_name
|
codegen.rs
|
candidate_k_axes
.iter()
.filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim())
.collect::<TVec<_>>();
let k_axis = if non_trivial_k_axis.len() > 1
|
else {
non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied()
};
let Some(k_axis) = k_axis else {
return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?));
};
let m_axis = op
.axes
.iter_all_axes()
.filter(|a| {
a.inputs[0].len() == 1
&& (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one())
&& a.outputs[0].len() == 1
})
.max_by_key(|a| &output_shape[a.outputs[0][0]]);
let Some(m_axis) = m_axis else {
return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?));
};
let n_axis = op
.axes
.iter_all_axes()
.filter(|a| {
(a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one())
&& a.inputs[1].len() == 1
&& a.outputs[0].len() == 1
})
.max_by_key(|a| &output_shape[a.outputs[0][0]]);
let Some(n_axis) = n_axis else {
return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(
op,
model,
node,
true,
&[k_axis, m_axis],
)?));
};
for axis in op.axes.iter_all_axes() {
let one = TDim::one();
let in_left =
axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one) != &one;
let in_right =
axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one) != &one;
let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one;
if (in_left ^ in_right) && !in_out {
return Ok(AxesOrPatch::NotAMatMul(axis));
}
}
Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis))
}
pub(super) fn inject_k_axis(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
) -> TractResult<TypedModelPatch> {
let mut new_axes = op.axes.clone();
let name = &node.name;
let mut patch = TypedModelPatch::new("inject k axis");
let mut wire = patch.taps(model, &node.inputs)?;
let repr = new_axes.available_label();
new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency(
repr,
InOut::In(1),
0,
)?;
wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0];
wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0];
wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
patch.shunt_outside(model, node.id.into(), wire[0])?;
Ok(patch)
}
pub(super) fn inject_m_or_n_axis(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
is_n: bool,
exclude: &[&Axis],
) -> TractResult<TypedModelPatch> {
let input_to_fix = is_n as usize;
let label = if is_n { "n" } else { "m" };
let input_facts = model.node_input_facts(node.id)?;
let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a| !exclude.contains(a)).find(|a| {
(a.inputs[1 - input_to_fix].len() == 0
|| input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one())
&& (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1)
});
let name = &node.name;
let mut patch = TypedModelPatch::new("Injecting m or n axis");
let mut wire = patch.taps(model, &node.inputs)?;
if let Some(axis) = quasi_m_or_n_axis {
if axis.inputs[input_to_fix].len() == 1 {
let new_axes =
op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?;
wire = patch.wire_node(
format!("{name}.einsum"),
EinSum { axes: new_axes, ..op.clone() },
&wire,
)?;
wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
} else {
let new_axes = op
.axes
.clone()
.with_extra_axis('$', InOut::In(input_to_fix), 0)?
.linking(axis.repr, '$')?;
wire[input_to_fix] = patch.wire_node(
format!("{name}.add_{label}"),
AxisOp::Add(0),
&[wire[input_to_fix]],
)?[0];
wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
}
} else {
let repr = op.axes.available_label();
let new_axes = op
.axes
.clone()
.with_extra_axis(repr, InOut::In(input_to_fix), 0)?
.with_extra_axis('$', InOut::Out(0), 0)?
.linking(repr, '$')?;
wire[input_to_fix] = patch.wire_node(
format!("{name}.add_{label}"),
AxisOp::Add(0),
&[wire[input_to_fix]],
)?[0];
wire = patch.wire_node(
format!("{name}.einsum"),
EinSum { axes: new_axes, ..op.clone() },
&wire,
)?;
wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
}
patch.shunt_outside(model, node.id.into(), wire[0])?;
Ok(patch)
}
fn wire_axes_fix(
patch: &mut TypedModelPatch,
name: &str,
var: &str,
mapping: &AxesMapping,
mut outlet: TVec<OutletId>,
) -> TractResult<TVec<OutletId>> {
for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() {
outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?;
}
Ok(outlet)
}
fn dequant(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
(_, k_axis, _): (&Axis, &Axis, &Axis),
) -> TractResult<Option<TypedModelPatch>> {
let name = &node.name;
let mut patch = TypedModelPatch::new("Dequantizing einsum");
let taps = patch.taps(model, &node.inputs)?;
let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else {
bail!("Expect exactly 9 inputs")
};
if !patch.outlet_fact(a_scale)?.shape.volume().is_one() {
let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0];
let output_rank = node.outputs[0].fact.rank();
for i in 1..(output_rank - q_axis_in_output) {
a_scale = patch.wire_node(
format!("{name}.a_scale_axis_fix_{i}"),
AxisOp::Add(i),
&[a_scale],
)?[0];
}
}
let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?;
let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?;
let mut output = patch.wire_node(
&node.name,
EinSum {
q_params: None,
axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?,
operating_dt: op.operating_dt,
},
&[a
|
{
// TODO: handle case where multiple consecutive k in the same order in both input.
bail!("Multiple k-axis candidate found");
}
|
conditional_block
|
codegen.rs
|
// TODO: handle case where multiple consecutive k in the same order in both input.
bail!("Multiple k-axis candidate found");
} else {
non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied()
};
let Some(k_axis) = k_axis else {
return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?));
};
let m_axis = op
.axes
.iter_all_axes()
.filter(|a| {
a.inputs[0].len() == 1
&& (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one())
&& a.outputs[0].len() == 1
})
.max_by_key(|a| &output_shape[a.outputs[0][0]]);
let Some(m_axis) = m_axis else {
return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?));
};
let n_axis = op
.axes
.iter_all_axes()
.filter(|a| {
(a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one())
&& a.inputs[1].len() == 1
&& a.outputs[0].len() == 1
})
.max_by_key(|a| &output_shape[a.outputs[0][0]]);
let Some(n_axis) = n_axis else {
return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(
op,
model,
node,
true,
&[k_axis, m_axis],
)?));
};
for axis in op.axes.iter_all_axes() {
let one = TDim::one();
let in_left =
axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one) != &one;
let in_right =
axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one) != &one;
let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one;
if (in_left ^ in_right) && !in_out {
return Ok(AxesOrPatch::NotAMatMul(axis));
}
}
Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis))
}
pub(super) fn inject_k_axis(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
) -> TractResult<TypedModelPatch> {
let mut new_axes = op.axes.clone();
let name = &node.name;
let mut patch = TypedModelPatch::new("inject k axis");
let mut wire = patch.taps(model, &node.inputs)?;
let repr = new_axes.available_label();
new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency(
repr,
InOut::In(1),
0,
)?;
wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0];
wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0];
wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
patch.shunt_outside(model, node.id.into(), wire[0])?;
Ok(patch)
}
pub(super) fn inject_m_or_n_axis(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
is_n: bool,
exclude: &[&Axis],
) -> TractResult<TypedModelPatch> {
let input_to_fix = is_n as usize;
let label = if is_n { "n" } else { "m" };
let input_facts = model.node_input_facts(node.id)?;
let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a| !exclude.contains(a)).find(|a| {
(a.inputs[1 - input_to_fix].len() == 0
|| input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one())
&& (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1)
});
let name = &node.name;
let mut patch = TypedModelPatch::new("Injecting m or n axis");
let mut wire = patch.taps(model, &node.inputs)?;
if let Some(axis) = quasi_m_or_n_axis {
if axis.inputs[input_to_fix].len() == 1 {
let new_axes =
op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?;
wire = patch.wire_node(
format!("{name}.einsum"),
EinSum { axes: new_axes, ..op.clone() },
&wire,
)?;
wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
} else {
let new_axes = op
.axes
.clone()
.with_extra_axis('$', InOut::In(input_to_fix), 0)?
.linking(axis.repr, '$')?;
wire[input_to_fix] = patch.wire_node(
format!("{name}.add_{label}"),
AxisOp::Add(0),
&[wire[input_to_fix]],
)?[0];
wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
}
} else {
let repr = op.axes.available_label();
let new_axes = op
.axes
.clone()
.with_extra_axis(repr, InOut::In(input_to_fix), 0)?
.with_extra_axis('$', InOut::Out(0), 0)?
.linking(repr, '$')?;
wire[input_to_fix] = patch.wire_node(
format!("{name}.add_{label}"),
AxisOp::Add(0),
&[wire[input_to_fix]],
)?[0];
wire = patch.wire_node(
format!("{name}.einsum"),
EinSum { axes: new_axes, ..op.clone() },
&wire,
)?;
wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
}
patch.shunt_outside(model, node.id.into(), wire[0])?;
Ok(patch)
}
fn wire_axes_fix(
patch: &mut TypedModelPatch,
name: &str,
var: &str,
mapping: &AxesMapping,
mut outlet: TVec<OutletId>,
) -> TractResult<TVec<OutletId>> {
for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() {
outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?;
}
Ok(outlet)
}
fn dequant(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
(_, k_axis, _): (&Axis, &Axis, &Axis),
) -> TractResult<Option<TypedModelPatch>> {
let name = &node.name;
let mut patch = TypedModelPatch::new("Dequantizing einsum");
let taps = patch.taps(model, &node.inputs)?;
let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else {
bail!("Expect exactly 9 inputs")
};
if !patch.outlet_fact(a_scale)?.shape.volume().is_one() {
let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0];
let output_rank = node.outputs[0].
|
{
let input_facts = model.node_input_facts(node.id)?;
let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect();
let output_shape = super::eval::output_shape(&op.axes, &input_shapes);
let candidate_k_axes: TVec<&Axis> = op
.axes
.iter_all_axes()
// Filter possible candidates (should be one time in each inputs but not in output)
.filter(|a| {
a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 &&
input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]]
})
.collect();
let non_trivial_k_axis = candidate_k_axes
.iter()
.filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim())
.collect::<TVec<_>>();
let k_axis = if non_trivial_k_axis.len() > 1 {
|
identifier_body
|
|
codegen.rs
|
= candidate_k_axes
.iter()
.filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim())
.collect::<TVec<_>>();
let k_axis = if non_trivial_k_axis.len() > 1 {
// TODO: handle case where multiple consecutive k in the same order in both input.
bail!("Multiple k-axis candidate found");
} else {
non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied()
};
let Some(k_axis) = k_axis else {
return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?));
};
let m_axis = op
.axes
.iter_all_axes()
.filter(|a| {
a.inputs[0].len() == 1
&& (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one())
&& a.outputs[0].len() == 1
})
.max_by_key(|a| &output_shape[a.outputs[0][0]]);
let Some(m_axis) = m_axis else {
return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?));
};
let n_axis = op
.axes
.iter_all_axes()
.filter(|a| {
(a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one())
&& a.inputs[1].len() == 1
&& a.outputs[0].len() == 1
})
.max_by_key(|a| &output_shape[a.outputs[0][0]]);
let Some(n_axis) = n_axis else {
return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(
op,
model,
node,
true,
&[k_axis, m_axis],
)?));
};
for axis in op.axes.iter_all_axes() {
let one = TDim::one();
let in_left =
axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one) != &one;
let in_right =
axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one) != &one;
let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one;
if (in_left ^ in_right) && !in_out {
return Ok(AxesOrPatch::NotAMatMul(axis));
}
}
Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis))
}
pub(super) fn inject_k_axis(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
) -> TractResult<TypedModelPatch> {
let mut new_axes = op.axes.clone();
let name = &node.name;
let mut patch = TypedModelPatch::new("inject k axis");
let mut wire = patch.taps(model, &node.inputs)?;
let repr = new_axes.available_label();
new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency(
repr,
InOut::In(1),
0,
)?;
wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0];
wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0];
wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
patch.shunt_outside(model, node.id.into(), wire[0])?;
Ok(patch)
}
pub(super) fn inject_m_or_n_axis(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
is_n: bool,
exclude: &[&Axis],
) -> TractResult<TypedModelPatch> {
let input_to_fix = is_n as usize;
let label = if is_n { "n" } else { "m" };
let input_facts = model.node_input_facts(node.id)?;
let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a| !exclude.contains(a)).find(|a| {
(a.inputs[1 - input_to_fix].len() == 0
|| input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one())
&& (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1)
});
let name = &node.name;
let mut patch = TypedModelPatch::new("Injecting m or n axis");
let mut wire = patch.taps(model, &node.inputs)?;
if let Some(axis) = quasi_m_or_n_axis {
if axis.inputs[input_to_fix].len() == 1 {
let new_axes =
op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?;
wire = patch.wire_node(
format!("{name}.einsum"),
EinSum { axes: new_axes, ..op.clone() },
&wire,
)?;
wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
} else {
let new_axes = op
.axes
.clone()
.with_extra_axis('$', InOut::In(input_to_fix), 0)?
.linking(axis.repr, '$')?;
wire[input_to_fix] = patch.wire_node(
format!("{name}.add_{label}"),
AxisOp::Add(0),
&[wire[input_to_fix]],
)?[0];
wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
}
} else {
let repr = op.axes.available_label();
let new_axes = op
.axes
.clone()
.with_extra_axis(repr, InOut::In(input_to_fix), 0)?
.with_extra_axis('$', InOut::Out(0), 0)?
.linking(repr, '$')?;
wire[input_to_fix] = patch.wire_node(
format!("{name}.add_{label}"),
AxisOp::Add(0),
&[wire[input_to_fix]],
)?[0];
wire = patch.wire_node(
format!("{name}.einsum"),
EinSum { axes: new_axes, ..op.clone() },
&wire,
)?;
wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
}
patch.shunt_outside(model, node.id.into(), wire[0])?;
Ok(patch)
}
fn wire_axes_fix(
patch: &mut TypedModelPatch,
name: &str,
var: &str,
mapping: &AxesMapping,
mut outlet: TVec<OutletId>,
) -> TractResult<TVec<OutletId>> {
for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() {
outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?;
}
Ok(outlet)
}
fn dequant(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
(_, k_axis, _): (&Axis, &Axis, &Axis),
) -> TractResult<Option<TypedModelPatch>> {
let name = &node.name;
let mut patch = TypedModelPatch::new("Dequantizing einsum");
let taps = patch.taps(model, &node.inputs)?;
let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else {
bail!("Expect exactly 9 inputs")
};
if !patch.outlet_fact(a_scale)?.shape.volume().is_one() {
let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0];
let output_rank = node.outputs[0].fact.rank();
|
)?[0];
}
}
let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?;
let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?;
let mut output = patch.wire_node(
&node.name,
EinSum {
q_params: None,
axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?,
operating_dt: op.operating_dt,
},
&[a
|
for i in 1..(output_rank - q_axis_in_output) {
a_scale = patch.wire_node(
format!("{name}.a_scale_axis_fix_{i}"),
AxisOp::Add(i),
&[a_scale],
|
random_line_split
|
interface.py
|
name})
}
r = requests.get(url, headers={
"X-Auth-Token": self.auth_token,
"Content-Type": "application/json"
})
if r.ok:
data = json.loads(r.text)
assert data
return data
else:
if r.status_code == 404:
# couldn't find it
raise NotFound
class Artifice(object):
"""Produces billable artifacts"""
def __init__(self, config):
super(Artifice, self).__init__()
self.config = config
# This is the Keystone client connection, which provides our
# OpenStack authentication
self.auth = keystone(
username= config["openstack"]["username"],
password= config["openstack"]["password"],
tenant_name= config["openstack"]["default_tenant"],
auth_url= config["openstack"]["authentication_url"]
)
conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % {
"username": config["database"]["username"],
"password": config["database"]["password"],
"host": config["database"]["host"],
"port": config["database"]["port"],
"database": config["database"]["database"]
}
engine = create_engine(conn_string)
Session.configure(bind=engine)
self.session = Session()
self.artifice = None
self.ceilometer = ceilometer(
self.config["ceilometer"]["host"],
# Uses a lambda as ceilometer apparently wants to use it as a callable?
token=lambda: self.auth.auth_token
)
self._tenancy = None
def host_to_dc(self, host):
"""
:param host: The name to use.
:type host: str.
:returns: str -- The datacenter corresponding to this host.
"""
# :raises: AttributeError, KeyError
# How does this get implemented ? Should there be a module injection?
return host # For the moment, passthrough
# TODO: FIXME.
def tenant(self, name):
"""
Returns a Tenant object describing the specified Tenant by
name, or raises a NotFound error.
"""
# Returns a Tenant object for the given name.
# Uses Keystone API to perform a direct name lookup,
# as this is expected to work via name.
data = self.auth.tenant_by_name(name)
t = Tenant(data["tenant"], self)
return t
@property
def tenants(self):
"""All the tenants in our system"""
# print "tenant list is %s" % self.auth.tenants.list()
if not self._tenancy:
|
return self._tenancy
class Tenant(object):
def __init__(self, tenant, conn):
self.tenant = tenant
# Conn is the niceometer object we were instanced from
self.conn = conn
self._meters = set()
self._resources = None
self.invoice_type = None
# Invoice type needs to get set from the config, which is
# part of the Artifice setup above.
def __getitem__(self, item):
try:
return getattr(self.tenant, item)
except AttributeError:
try:
return self.tenant[item]
except KeyError:
raise KeyError("No such key '%s' in tenant" % item)
def __getattr__(self, attr):
if attr not in self.tenant:
return object.__getattribute__(self, attr)
# return super(Tenant, self).__getattr__(attr)
return self.tenant[attr]
def invoice(self, start, end):
"""
Creates a new Invoice.
Invoices are an Artifice datamodel that represent a
set of billable entries assigned to a client on a given Date.
An Invoice offers very little of its own opinions,
requiring a backend plugin to operate.
@returns: invoice
"""
if self.invoice_type is None:
invoice_type = self.conn.config["main"]["invoice:object"]
if ":" not in invoice_type:
raise AttributeError("Invoice configuration incorrect! %s" % invoice_type)
module, call = invoice_type.split(":")
_package = __import__(module, globals(), locals(), [ call ])
funct = getattr(_package, call)
self.invoice_type = funct
config = self.conn.config["invoice_object"]
invoice = self.invoice_type(self, config)
return invoice
def resources(self, start, end):
if not self._resources:
date_fields = [{
"field": "timestamp",
"op": "ge",
"value": start.strftime(date_format)
},
{
"field": "timestamp",
"op": "lt",
"value": end.strftime(date_format)
},
{ "field": "project_id",
"op": "eq",
"value": self.tenant["id"]
},
]
# Sets up our resources as Ceilometer objects.
# That's cool, I think.
self._resources = self.conn.ceilometer.resources.list(date_fields)
return self._resources
# def usage(self, start, end, section=None):
def usage(self, start, end):
"""
Usage is the meat of Artifice, returning a dict of location to
sub-information
"""
# Returns a usage dict, based on regions.
vms = {}
vm_to_region = {}
ports = {}
usage_by_dc = {}
writing_to = None
vms = []
networks = []
storage = []
volumes = []
# Object storage is mapped by project_id
for resource in self.resources(start, end):
# print dir(resource)
rels = [link["rel"] for link in resource.links if link["rel"] != 'self' ]
if "storage.objects" in rels:
# Unknown how this data layout happens yet.
storage.append(Resource(resource, self.conn))
pass
elif "network" in rels:
# Have we seen the VM that owns this yet?
networks.append(Resource(resource , self.conn))
elif "volumne" in rels:
volumes.append( Resource(resource, self.conn) )
elif 'instance' in rels:
vms.append(Resource(resource, self.conn ))
datacenters = {}
region_tmpl = {
"vms": vms,
"network": networks,
"objects": storage,
"volumes": volumes
}
return Usage(region_tmpl, start, end, self.conn)
class Usage(object):
"""
This is a dict-like object containing all the datacenters and
meters available in those datacenters.
"""
def __init__(self, contents, start, end, conn):
self.contents = contents
self.start = start
self.end = end
self.conn = conn
self._vms = []
self._objects = []
self._volumes = []
# Replaces all the internal references with better references to
# actual metered values.
# self._replace()
@property
def vms(self):
if not self._vms:
vms = []
for vm in self.contents["vms"]:
VM = resources.VM(vm, self.start, self.end)
md = vm["metadata"]
host = md["host"]
VM.location = self.conn.host_to_dc( vm["metadata"]["host"] )
vms.append(VM)
self._vms = vms
return self._vms
@property
def objects(self):
if not self._objects:
objs = []
for object_ in self.contents["objects"]:
obj = resources.Object(object_, self.start, self.end)
objs.append(obj)
self._objs = objs
return self._objs
@property
def volumes(self):
if not self._volumes:
objs = []
for obj in self.contents["volumes"]:
obj = resources.Volume(obj, self.start, self.end)
objs.append(obj)
self._volumes = objs
return self._volumes
# def __getitem__(self, item):
# return self.contents[item]
def __iter__(self):
return self
def next(self):
# pass
keys = self.contents.keys()
for key in keys:
yield key
raise StopIteration()
def save(self):
"""
Iterate the list of things; save them to DB.
"""
for vm in self.vms:
vm.save()
for obj in self.objects:
obj.save()
for vol in self.volumes:
vol.save()
class Resource(object):
def __init__(self, resource, conn):
self.resource = resource
self.conn = conn
self._meters = {}
# def __getitem__(self, item):
# return self.resource
def meter(self, name, start, end):
pass # Return a named meter
for meter in self.resource.links:
if meter["rel"] == name:
m = Meter(self, meter["href"], self.conn, start, end)
self._meters[name] = m
return m
raise AttributeError("no such meter %s" % name)
def __getitem__(self
|
self._tenancy = {}
for tenant in self.auth.tenants.list():
t = Tenant(tenant, self)
self._tenancy[t["name"]] = t
|
conditional_block
|
interface.py
|
invoice_type.split(":")
_package = __import__(module, globals(), locals(), [ call ])
funct = getattr(_package, call)
self.invoice_type = funct
config = self.conn.config["invoice_object"]
invoice = self.invoice_type(self, config)
return invoice
def resources(self, start, end):
if not self._resources:
date_fields = [{
"field": "timestamp",
"op": "ge",
"value": start.strftime(date_format)
},
{
"field": "timestamp",
"op": "lt",
"value": end.strftime(date_format)
},
{ "field": "project_id",
"op": "eq",
"value": self.tenant["id"]
},
]
# Sets up our resources as Ceilometer objects.
# That's cool, I think.
self._resources = self.conn.ceilometer.resources.list(date_fields)
return self._resources
# def usage(self, start, end, section=None):
def usage(self, start, end):
"""
Usage is the meat of Artifice, returning a dict of location to
sub-information
"""
# Returns a usage dict, based on regions.
vms = {}
vm_to_region = {}
ports = {}
usage_by_dc = {}
writing_to = None
vms = []
networks = []
storage = []
volumes = []
# Object storage is mapped by project_id
for resource in self.resources(start, end):
# print dir(resource)
rels = [link["rel"] for link in resource.links if link["rel"] != 'self' ]
if "storage.objects" in rels:
# Unknown how this data layout happens yet.
storage.append(Resource(resource, self.conn))
pass
elif "network" in rels:
# Have we seen the VM that owns this yet?
networks.append(Resource(resource , self.conn))
elif "volumne" in rels:
volumes.append( Resource(resource, self.conn) )
elif 'instance' in rels:
vms.append(Resource(resource, self.conn ))
datacenters = {}
region_tmpl = {
"vms": vms,
"network": networks,
"objects": storage,
"volumes": volumes
}
return Usage(region_tmpl, start, end, self.conn)
class Usage(object):
"""
This is a dict-like object containing all the datacenters and
meters available in those datacenters.
"""
def __init__(self, contents, start, end, conn):
self.contents = contents
self.start = start
self.end = end
self.conn = conn
self._vms = []
self._objects = []
self._volumes = []
# Replaces all the internal references with better references to
# actual metered values.
# self._replace()
@property
def vms(self):
if not self._vms:
vms = []
for vm in self.contents["vms"]:
VM = resources.VM(vm, self.start, self.end)
md = vm["metadata"]
host = md["host"]
VM.location = self.conn.host_to_dc( vm["metadata"]["host"] )
vms.append(VM)
self._vms = vms
return self._vms
@property
def objects(self):
if not self._objects:
objs = []
for object_ in self.contents["objects"]:
obj = resources.Object(object_, self.start, self.end)
objs.append(obj)
self._objs = objs
return self._objs
@property
def volumes(self):
if not self._volumes:
objs = []
for obj in self.contents["volumes"]:
obj = resources.Volume(obj, self.start, self.end)
objs.append(obj)
self._volumes = objs
return self._volumes
# def __getitem__(self, item):
# return self.contents[item]
def __iter__(self):
return self
def next(self):
# pass
keys = self.contents.keys()
for key in keys:
yield key
raise StopIteration()
def save(self):
"""
Iterate the list of things; save them to DB.
"""
for vm in self.vms:
vm.save()
for obj in self.objects:
obj.save()
for vol in self.volumes:
vol.save()
class Resource(object):
def __init__(self, resource, conn):
self.resource = resource
self.conn = conn
self._meters = {}
# def __getitem__(self, item):
# return self.resource
def meter(self, name, start, end):
pass # Return a named meter
for meter in self.resource.links:
if meter["rel"] == name:
m = Meter(self, meter["href"], self.conn, start, end)
self._meters[name] = m
return m
raise AttributeError("no such meter %s" % name)
def __getitem__(self, name):
# print name
# print self.resource
# print self.resource[name]
return getattr(self.resource, name)
# return self.resource.name
@property
def meters(self):
if not self._meters:
meters = []
for link in self.resource["links"]:
if link["rel"] == "self":
continue
meter = Meter(self, link, self.conn)
meters.append(meter)
self._meters = meters
return self._meters
class Meter(object):
def __init__(self, resource, link, conn, start=None, end=None):
self.resource = resource
self.link = link
self.conn = conn
self.start = start
self.end = end
# self.meter = meter
def __getitem__(self, x):
if isinstance(x, slice):
# Woo
pass
pass
def volume(self):
return self.usage(self.start, self.end)
def usage(self, start, end):
"""
Usage condenses the entirety of a meter into a single datapoint:
A volume value that we can plot as a single number against previous
usage for a given range.
"""
measurements = get_meter(self, start, end, self.conn.auth.auth_token)
# return measurements
# print measurements
self.measurements = defaultdict(list)
self.type = set([a["counter_type"] for a in measurements])
if len(self.type) > 1:
# That's a big problem
raise RuntimeError("Too many types for measurement!")
elif len(self.type) == 0:
raise RuntimeError("No types!")
else:
self.type = self.type.pop()
type_ = None
if self.type == "cumulative":
# The usage is the last one, which is the highest value.
#
# Base it just on the resource ID.
# Is this a reasonable thing to do?
# Composition style: resource.meter("cpu_util").usage(start, end) == artifact
type_ = Cumulative
elif self.type == "gauge":
type_ = Gauge
# return Gauge(self.Resource, )
elif self.type == "delta":
type_ = Delta
return type_(self.resource, measurements, start, end)
def save(self):
if not self.start and self.end:
raise AttributeError("Needs start and end defined to save")
self.volume().save()
class Artifact(object):
"""
Provides base artifact controls; generic typing information
for the artifact structures.
"""
def __init__(self, resource, usage, start, end):
self.resource = resource
self.usage = usage
self.start = start
self.end = end
def __getitem__(self, item):
if item in self._data:
return self._data[item]
raise KeyError("no such item %s" % item)
def save(self):
"""
Persists to our database backend. Opinionatedly this is a sql datastore.
"""
value = self.volume()
session = self.resource.conn.session
# self.artifice.
try:
tenant_id = self.resource["tenant_id"]
except KeyError:
tenant_id = self.resource["project_id"]
resource_id = self.resource["resource_id"]
tenant = session.query(tenants.Tenant).get(tenant_id)
if tenant is None:
res = resources.Resource()
tenant = tenants.Tenant()
tenant.id = tenant_id
res.id = resource_id
res.tenant = tenant
session.add(res)
session.add(tenant)
else:
try:
res = session.query(resources.Resource).filter(resources.Resource.id == resource_id)[0]
tenant = res.tenant
except IndexError:
res = resources.Resource()
tenant = tenants.Tenant()
tenant.id = tenant_id
res.id = resource_id
res.tenant = tenant
session.add(res)
session.add(tenant)
this_usage = usage.Usage(
res,
tenant,
value,
self.start,
self.end,
)
session.add(this_usage)
session.commit() # Persist to Postgres
def volume(self):
"""
Default billable number for this volume
"""
return sum([x["counter_volume"] for x in self.usage])
class
|
Cumulative
|
identifier_name
|
|
interface.py
|
name})
}
r = requests.get(url, headers={
"X-Auth-Token": self.auth_token,
"Content-Type": "application/json"
})
if r.ok:
data = json.loads(r.text)
assert data
return data
else:
if r.status_code == 404:
# couldn't find it
raise NotFound
class Artifice(object):
"""Produces billable artifacts"""
def __init__(self, config):
super(Artifice, self).__init__()
self.config = config
# This is the Keystone client connection, which provides our
# OpenStack authentication
self.auth = keystone(
username= config["openstack"]["username"],
password= config["openstack"]["password"],
tenant_name= config["openstack"]["default_tenant"],
auth_url= config["openstack"]["authentication_url"]
)
conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % {
"username": config["database"]["username"],
"password": config["database"]["password"],
"host": config["database"]["host"],
"port": config["database"]["port"],
"database": config["database"]["database"]
}
engine = create_engine(conn_string)
Session.configure(bind=engine)
self.session = Session()
self.artifice = None
self.ceilometer = ceilometer(
self.config["ceilometer"]["host"],
# Uses a lambda as ceilometer apparently wants to use it as a callable?
token=lambda: self.auth.auth_token
)
self._tenancy = None
def host_to_dc(self, host):
"""
:param host: The name to use.
:type host: str.
:returns: str -- The datacenter corresponding to this host.
"""
# :raises: AttributeError, KeyError
# How does this get implemented ? Should there be a module injection?
return host # For the moment, passthrough
# TODO: FIXME.
def tenant(self, name):
"""
Returns a Tenant object describing the specified Tenant by
name, or raises a NotFound error.
"""
# Returns a Tenant object for the given name.
# Uses Keystone API to perform a direct name lookup,
# as this is expected to work via name.
data = self.auth.tenant_by_name(name)
t = Tenant(data["tenant"], self)
return t
@property
def tenants(self):
"""All the tenants in our system"""
# print "tenant list is %s" % self.auth.tenants.list()
if not self._tenancy:
self._tenancy = {}
for tenant in self.auth.tenants.list():
t = Tenant(tenant, self)
self._tenancy[t["name"]] = t
return self._tenancy
class Tenant(object):
|
def __getattr__(self, attr):
if attr not in self.tenant:
return object.__getattribute__(self, attr)
# return super(Tenant, self).__getattr__(attr)
return self.tenant[attr]
def invoice(self, start, end):
"""
Creates a new Invoice.
Invoices are an Artifice datamodel that represent a
set of billable entries assigned to a client on a given Date.
An Invoice offers very little of its own opinions,
requiring a backend plugin to operate.
@returns: invoice
"""
if self.invoice_type is None:
invoice_type = self.conn.config["main"]["invoice:object"]
if ":" not in invoice_type:
raise AttributeError("Invoice configuration incorrect! %s" % invoice_type)
module, call = invoice_type.split(":")
_package = __import__(module, globals(), locals(), [ call ])
funct = getattr(_package, call)
self.invoice_type = funct
config = self.conn.config["invoice_object"]
invoice = self.invoice_type(self, config)
return invoice
def resources(self, start, end):
if not self._resources:
date_fields = [{
"field": "timestamp",
"op": "ge",
"value": start.strftime(date_format)
},
{
"field": "timestamp",
"op": "lt",
"value": end.strftime(date_format)
},
{ "field": "project_id",
"op": "eq",
"value": self.tenant["id"]
},
]
# Sets up our resources as Ceilometer objects.
# That's cool, I think.
self._resources = self.conn.ceilometer.resources.list(date_fields)
return self._resources
# def usage(self, start, end, section=None):
def usage(self, start, end):
"""
Usage is the meat of Artifice, returning a dict of location to
sub-information
"""
# Returns a usage dict, based on regions.
vms = {}
vm_to_region = {}
ports = {}
usage_by_dc = {}
writing_to = None
vms = []
networks = []
storage = []
volumes = []
# Object storage is mapped by project_id
for resource in self.resources(start, end):
# print dir(resource)
rels = [link["rel"] for link in resource.links if link["rel"] != 'self' ]
if "storage.objects" in rels:
# Unknown how this data layout happens yet.
storage.append(Resource(resource, self.conn))
pass
elif "network" in rels:
# Have we seen the VM that owns this yet?
networks.append(Resource(resource , self.conn))
elif "volumne" in rels:
volumes.append( Resource(resource, self.conn) )
elif 'instance' in rels:
vms.append(Resource(resource, self.conn ))
datacenters = {}
region_tmpl = {
"vms": vms,
"network": networks,
"objects": storage,
"volumes": volumes
}
return Usage(region_tmpl, start, end, self.conn)
class Usage(object):
"""
This is a dict-like object containing all the datacenters and
meters available in those datacenters.
"""
def __init__(self, contents, start, end, conn):
self.contents = contents
self.start = start
self.end = end
self.conn = conn
self._vms = []
self._objects = []
self._volumes = []
# Replaces all the internal references with better references to
# actual metered values.
# self._replace()
@property
def vms(self):
if not self._vms:
vms = []
for vm in self.contents["vms"]:
VM = resources.VM(vm, self.start, self.end)
md = vm["metadata"]
host = md["host"]
VM.location = self.conn.host_to_dc( vm["metadata"]["host"] )
vms.append(VM)
self._vms = vms
return self._vms
@property
def objects(self):
if not self._objects:
objs = []
for object_ in self.contents["objects"]:
obj = resources.Object(object_, self.start, self.end)
objs.append(obj)
self._objs = objs
return self._objs
@property
def volumes(self):
if not self._volumes:
objs = []
for obj in self.contents["volumes"]:
obj = resources.Volume(obj, self.start, self.end)
objs.append(obj)
self._volumes = objs
return self._volumes
# def __getitem__(self, item):
# return self.contents[item]
def __iter__(self):
return self
def next(self):
# pass
keys = self.contents.keys()
for key in keys:
yield key
raise StopIteration()
def save(self):
"""
Iterate the list of things; save them to DB.
"""
for vm in self.vms:
vm.save()
for obj in self.objects:
obj.save()
for vol in self.volumes:
vol.save()
class Resource(object):
def __init__(self, resource, conn):
self.resource = resource
self.conn = conn
self._meters = {}
# def __getitem__(self, item):
# return self.resource
def meter(self, name, start, end):
pass # Return a named meter
for meter in self.resource.links:
if meter["rel"] == name:
m = Meter(self, meter["href"], self.conn, start, end)
self._meters[name] = m
return m
raise AttributeError("no such meter %s" % name)
def __getitem__(
|
def __init__(self, tenant, conn):
self.tenant = tenant
# Conn is the niceometer object we were instanced from
self.conn = conn
self._meters = set()
self._resources = None
self.invoice_type = None
# Invoice type needs to get set from the config, which is
# part of the Artifice setup above.
def __getitem__(self, item):
try:
return getattr(self.tenant, item)
except AttributeError:
try:
return self.tenant[item]
except KeyError:
raise KeyError("No such key '%s' in tenant" % item)
|
identifier_body
|
interface.py
|
name})
}
r = requests.get(url, headers={
"X-Auth-Token": self.auth_token,
"Content-Type": "application/json"
|
data = json.loads(r.text)
assert data
return data
else:
if r.status_code == 404:
# couldn't find it
raise NotFound
class Artifice(object):
"""Produces billable artifacts"""
def __init__(self, config):
super(Artifice, self).__init__()
self.config = config
# This is the Keystone client connection, which provides our
# OpenStack authentication
self.auth = keystone(
username= config["openstack"]["username"],
password= config["openstack"]["password"],
tenant_name= config["openstack"]["default_tenant"],
auth_url= config["openstack"]["authentication_url"]
)
conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % {
"username": config["database"]["username"],
"password": config["database"]["password"],
"host": config["database"]["host"],
"port": config["database"]["port"],
"database": config["database"]["database"]
}
engine = create_engine(conn_string)
Session.configure(bind=engine)
self.session = Session()
self.artifice = None
self.ceilometer = ceilometer(
self.config["ceilometer"]["host"],
# Uses a lambda as ceilometer apparently wants to use it as a callable?
token=lambda: self.auth.auth_token
)
self._tenancy = None
def host_to_dc(self, host):
"""
:param host: The name to use.
:type host: str.
:returns: str -- The datacenter corresponding to this host.
"""
# :raises: AttributeError, KeyError
# How does this get implemented ? Should there be a module injection?
return host # For the moment, passthrough
# TODO: FIXME.
def tenant(self, name):
"""
Returns a Tenant object describing the specified Tenant by
name, or raises a NotFound error.
"""
# Returns a Tenant object for the given name.
# Uses Keystone API to perform a direct name lookup,
# as this is expected to work via name.
data = self.auth.tenant_by_name(name)
t = Tenant(data["tenant"], self)
return t
@property
def tenants(self):
"""All the tenants in our system"""
# print "tenant list is %s" % self.auth.tenants.list()
if not self._tenancy:
self._tenancy = {}
for tenant in self.auth.tenants.list():
t = Tenant(tenant, self)
self._tenancy[t["name"]] = t
return self._tenancy
class Tenant(object):
def __init__(self, tenant, conn):
self.tenant = tenant
# Conn is the niceometer object we were instanced from
self.conn = conn
self._meters = set()
self._resources = None
self.invoice_type = None
# Invoice type needs to get set from the config, which is
# part of the Artifice setup above.
def __getitem__(self, item):
try:
return getattr(self.tenant, item)
except AttributeError:
try:
return self.tenant[item]
except KeyError:
raise KeyError("No such key '%s' in tenant" % item)
def __getattr__(self, attr):
if attr not in self.tenant:
return object.__getattribute__(self, attr)
# return super(Tenant, self).__getattr__(attr)
return self.tenant[attr]
def invoice(self, start, end):
"""
Creates a new Invoice.
Invoices are an Artifice datamodel that represent a
set of billable entries assigned to a client on a given Date.
An Invoice offers very little of its own opinions,
requiring a backend plugin to operate.
@returns: invoice
"""
if self.invoice_type is None:
invoice_type = self.conn.config["main"]["invoice:object"]
if ":" not in invoice_type:
raise AttributeError("Invoice configuration incorrect! %s" % invoice_type)
module, call = invoice_type.split(":")
_package = __import__(module, globals(), locals(), [ call ])
funct = getattr(_package, call)
self.invoice_type = funct
config = self.conn.config["invoice_object"]
invoice = self.invoice_type(self, config)
return invoice
def resources(self, start, end):
if not self._resources:
date_fields = [{
"field": "timestamp",
"op": "ge",
"value": start.strftime(date_format)
},
{
"field": "timestamp",
"op": "lt",
"value": end.strftime(date_format)
},
{ "field": "project_id",
"op": "eq",
"value": self.tenant["id"]
},
]
# Sets up our resources as Ceilometer objects.
# That's cool, I think.
self._resources = self.conn.ceilometer.resources.list(date_fields)
return self._resources
# def usage(self, start, end, section=None):
def usage(self, start, end):
"""
Usage is the meat of Artifice, returning a dict of location to
sub-information
"""
# Returns a usage dict, based on regions.
vms = {}
vm_to_region = {}
ports = {}
usage_by_dc = {}
writing_to = None
vms = []
networks = []
storage = []
volumes = []
# Object storage is mapped by project_id
for resource in self.resources(start, end):
# print dir(resource)
rels = [link["rel"] for link in resource.links if link["rel"] != 'self' ]
if "storage.objects" in rels:
# Unknown how this data layout happens yet.
storage.append(Resource(resource, self.conn))
pass
elif "network" in rels:
# Have we seen the VM that owns this yet?
networks.append(Resource(resource , self.conn))
elif "volumne" in rels:
volumes.append( Resource(resource, self.conn) )
elif 'instance' in rels:
vms.append(Resource(resource, self.conn ))
datacenters = {}
region_tmpl = {
"vms": vms,
"network": networks,
"objects": storage,
"volumes": volumes
}
return Usage(region_tmpl, start, end, self.conn)
class Usage(object):
"""
This is a dict-like object containing all the datacenters and
meters available in those datacenters.
"""
def __init__(self, contents, start, end, conn):
self.contents = contents
self.start = start
self.end = end
self.conn = conn
self._vms = []
self._objects = []
self._volumes = []
# Replaces all the internal references with better references to
# actual metered values.
# self._replace()
@property
def vms(self):
if not self._vms:
vms = []
for vm in self.contents["vms"]:
VM = resources.VM(vm, self.start, self.end)
md = vm["metadata"]
host = md["host"]
VM.location = self.conn.host_to_dc( vm["metadata"]["host"] )
vms.append(VM)
self._vms = vms
return self._vms
@property
def objects(self):
if not self._objects:
objs = []
for object_ in self.contents["objects"]:
obj = resources.Object(object_, self.start, self.end)
objs.append(obj)
self._objs = objs
return self._objs
@property
def volumes(self):
if not self._volumes:
objs = []
for obj in self.contents["volumes"]:
obj = resources.Volume(obj, self.start, self.end)
objs.append(obj)
self._volumes = objs
return self._volumes
# def __getitem__(self, item):
# return self.contents[item]
def __iter__(self):
return self
def next(self):
# pass
keys = self.contents.keys()
for key in keys:
yield key
raise StopIteration()
def save(self):
"""
Iterate the list of things; save them to DB.
"""
for vm in self.vms:
vm.save()
for obj in self.objects:
obj.save()
for vol in self.volumes:
vol.save()
class Resource(object):
def __init__(self, resource, conn):
self.resource = resource
self.conn = conn
self._meters = {}
# def __getitem__(self, item):
# return self.resource
def meter(self, name, start, end):
pass # Return a named meter
for meter in self.resource.links:
if meter["rel"] == name:
m = Meter(self, meter["href"], self.conn, start, end)
self._meters[name] = m
return m
raise AttributeError("no such meter %s" % name)
def __getitem__(self
|
})
if r.ok:
|
random_line_split
|
fint.py
|
, target):
# only return stories BY the user
user_token = 'id=%s' % target
links = re.findall('(/story.php\?story_fbid=[^"#]+)', html)
return [
'%s%s' % (BASE_URL, x.replace('&', '&')) for x in set(links)
if user_token in x
]
def get_photos_urls(target_id, html):
|
def get_all_photos(driver, target_id, limit=100):
url = 'https://mbasic.facebook.com/profile.php?id=%s&v=photos' % target_id
driver.get(url)
time.sleep(pause())
see_all = re.findall('<a href="([^"#]+)">See All</a>', driver.page_source)
photos = []
if not see_all:
return photos
else:
driver.get(BASE_URL + see_all[0].replace('&', '&'))
while len(photos) < limit:
photos += get_photos_urls(target_id, driver.page_source)
see_more = re.findall(
'<a href="(.[^"#]*)"><span>See More Photos</span></a>',
driver.page_source)
if not see_more:
see_more = re.findall('<a href="(.[^"#]*)">Show more</a>',
driver.page_source)
if see_more:
url = BASE_URL + see_more[0].replace('&', '&')
time.sleep(pause())
driver.get(url)
else:
break
return photos
def get_all_stories(driver, target, limit=100):
url = 'https://mbasic.facebook.com/%s?v=timeline' % target
driver.get(url)
stories = []
while len(stories) < limit:
stories += get_stories_urls(driver.page_source, target)
see_more = re.findall(
'<a href="(.[^"#]*)"><span>See More Stories</span></a>',
driver.page_source)
if not see_more:
see_more = re.findall('<a href="(.[^"#]*)">Show more</a>',
driver.page_source)
if see_more:
url = BASE_URL + see_more[0].replace('&', '&')
time.sleep(pause())
driver.get(url)
else:
break
return stories
def get_all_comments(driver, url, limit=200, cur_length=0):
if cur_length >= limit:
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
commenters = parse_commenters(html)
cur_length += len(commenters)
more_comments_url = re.findall(
'<div class=".[^"]*" id="see_next_[0-9]+"><a href="(.[^"]*)">', html)
more_comments_url = [
'%s%s' % (BASE_URL, x.replace('&', '&')) for x in more_comments_url
]
if (more_comments_url) and limit > cur_length:
time.sleep(pause())
url = more_comments_url[0]
commenters += get_all_comments(driver,
url,
limit,
cur_length=cur_length)
return commenters
# given a driver on a story.php page, extracts all users who have reacted
# takes only 1st level reactions (not consideringr reactions to comments etc.)
def get_all_reactions(driver,
url,
reactions_per_page=999,
limit=2000,
cur_length=0):
if cur_length >= limit:
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
reactions = parse_likers(html)
cur_length += len(reactions)
reaction_urls = re.findall(
'(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)',
html)
reaction_urls = [
'%s%s' % (BASE_URL, x.replace('&', '&').replace(
'?limit=10', '?limit=%d' % reactions_per_page))
for x in reaction_urls
]
if (reaction_urls) and limit > cur_length:
time.sleep(pause())
url = reaction_urls[0]
reactions += get_all_reactions(driver, url, reactions_per_page, limit,
cur_length)
return reactions
# Given a story.php page, return a list of (url, display name)
def parse_commenters(html):
return re.findall(
'<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html)
# Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name)
def parse_likers(html):
return re.findall(
'<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html)
def profile_picture(driver, target_username):
url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username)
driver.get(url)
commenters = parse_commenters(driver.page_source)
# given a list of [username, name] returns a list of [id, name, username]
def fill_user_ids(driver, users):
res = []
c = 0
msg = '[*] Retreiving user ids... '
try:
for u in users:
c += 1
msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg),
c, len(users))
print(msg, end='\r')
time.sleep(pause())
fbid = get_user_id(driver, u[0])
user = (fbid, u[1], u[0])
res.append(user)
except (KeyboardInterrupt, SystemExit):
print('[!] KeyboardInterrupt received. Exiting...')
return res
except Exception as ex:
print('[!] Error while retrieving user ids')
print(ex)
return res
return res
# given a username, finds the fb user id from the source of the profile page
def get_user_id(driver, username):
url = 'https://www.facebook.com/%s' % username.replace('/', '')
driver.get(url)
fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source)
if fbid:
return fbid[0]
else:
print('[!] Error while getting id of user %s' % username)
return -1
def get_username(driver, userid):
url = 'https://www.facebook.com/%s' % userid
driver.get(url)
time.sleep(pause())
return driver.current_url.split('/')[-1].split('?')[0]
def parse_args():
parser = argparse.ArgumentParser(
description='Find users who interacted with a Facebook profile.')
parser.add_argument(
'-fu',
'--user',
metavar='USERNAME',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-fp',
'--password',
metavar='PASSWORD',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-t',
'--target',
metavar='TARGET',
type=str,
help='Username or numeric id of the target Facebook account')
parser.add_argument('-ls',
'--limit-stories',
metavar='LIMIT',
type=int,
default=20,
help='Max number of stories to analyze')
parser.add_argument('-lp',
'--limit-photos',
metavar='LIMIT',
type=int,
default=20,
help='Max number of photos to analyze')
parser.add_argument(
'-lr',
'--limit-reactions',
metavar='LIMIT',
default=1000,
type=int,
help='Max number of reactions to analyze for each story')
parser.add_argument(
'-lc',
'--limit-comments',
metavar='LIMIT',
default=100,
type=int,
help='Max number of comments to analyze for each story')
parser.add_argument('-o',
'--output',
metavar='OUTPUTFILE',
type=str,
help='Specify the name of the pivots output file')
parser.add_argument('-csv',
'--csv-output',
metavar='CSVOUTPUTFILE',
type=str,
help='Store output file also in CSV format')
parser.add_argument(
'-q',
'--headless',
action='store_true',
help='Run browser in headless mode. No browser window will be shown.')
parser.add_argument('-d',
'--driver-path',
metavar='EXECUTABLE',
type=str,
help='Path to geckodriver executable')
args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help'])
return args
def print_statistics(commenters, reactions):
print('-' * 78)
print(' ' * 34, end=' ')
print('STATISTICS')
print('-' * 78)
|
links = re.findall('(/photo\.php\?[^;]*;id=%s[^"]+)' % target_id, html)
return ['%s%s' % (BASE_URL, x.replace('&', '&')) for x in set(links)]
|
identifier_body
|
fint.py
|
target):
# only return stories BY the user
user_token = 'id=%s' % target
links = re.findall('(/story.php\?story_fbid=[^"#]+)', html)
return [
'%s%s' % (BASE_URL, x.replace('&', '&')) for x in set(links)
if user_token in x
]
def get_photos_urls(target_id, html):
links = re.findall('(/photo\.php\?[^;]*;id=%s[^"]+)' % target_id, html)
return ['%s%s' % (BASE_URL, x.replace('&', '&')) for x in set(links)]
def get_all_photos(driver, target_id, limit=100):
url = 'https://mbasic.facebook.com/profile.php?id=%s&v=photos' % target_id
driver.get(url)
time.sleep(pause())
see_all = re.findall('<a href="([^"#]+)">See All</a>', driver.page_source)
photos = []
if not see_all:
return photos
else:
driver.get(BASE_URL + see_all[0].replace('&', '&'))
while len(photos) < limit:
photos += get_photos_urls(target_id, driver.page_source)
see_more = re.findall(
'<a href="(.[^"#]*)"><span>See More Photos</span></a>',
driver.page_source)
if not see_more:
see_more = re.findall('<a href="(.[^"#]*)">Show more</a>',
driver.page_source)
if see_more:
url = BASE_URL + see_more[0].replace('&', '&')
time.sleep(pause())
driver.get(url)
else:
break
return photos
def get_all_stories(driver, target, limit=100):
url = 'https://mbasic.facebook.com/%s?v=timeline' % target
driver.get(url)
stories = []
while len(stories) < limit:
stories += get_stories_urls(driver.page_source, target)
see_more = re.findall(
'<a href="(.[^"#]*)"><span>See More Stories</span></a>',
driver.page_source)
if not see_more:
see_more = re.findall('<a href="(.[^"#]*)">Show more</a>',
driver.page_source)
if see_more:
|
else:
break
return stories
def get_all_comments(driver, url, limit=200, cur_length=0):
if cur_length >= limit:
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
commenters = parse_commenters(html)
cur_length += len(commenters)
more_comments_url = re.findall(
'<div class=".[^"]*" id="see_next_[0-9]+"><a href="(.[^"]*)">', html)
more_comments_url = [
'%s%s' % (BASE_URL, x.replace('&', '&')) for x in more_comments_url
]
if (more_comments_url) and limit > cur_length:
time.sleep(pause())
url = more_comments_url[0]
commenters += get_all_comments(driver,
url,
limit,
cur_length=cur_length)
return commenters
# given a driver on a story.php page, extracts all users who have reacted
# takes only 1st level reactions (not consideringr reactions to comments etc.)
def get_all_reactions(driver,
url,
reactions_per_page=999,
limit=2000,
cur_length=0):
if cur_length >= limit:
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
reactions = parse_likers(html)
cur_length += len(reactions)
reaction_urls = re.findall(
'(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)',
html)
reaction_urls = [
'%s%s' % (BASE_URL, x.replace('&', '&').replace(
'?limit=10', '?limit=%d' % reactions_per_page))
for x in reaction_urls
]
if (reaction_urls) and limit > cur_length:
time.sleep(pause())
url = reaction_urls[0]
reactions += get_all_reactions(driver, url, reactions_per_page, limit,
cur_length)
return reactions
# Given a story.php page, return a list of (url, display name)
def parse_commenters(html):
return re.findall(
'<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html)
# Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name)
def parse_likers(html):
return re.findall(
'<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html)
def profile_picture(driver, target_username):
url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username)
driver.get(url)
commenters = parse_commenters(driver.page_source)
# given a list of [username, name] returns a list of [id, name, username]
def fill_user_ids(driver, users):
res = []
c = 0
msg = '[*] Retreiving user ids... '
try:
for u in users:
c += 1
msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg),
c, len(users))
print(msg, end='\r')
time.sleep(pause())
fbid = get_user_id(driver, u[0])
user = (fbid, u[1], u[0])
res.append(user)
except (KeyboardInterrupt, SystemExit):
print('[!] KeyboardInterrupt received. Exiting...')
return res
except Exception as ex:
print('[!] Error while retrieving user ids')
print(ex)
return res
return res
# given a username, finds the fb user id from the source of the profile page
def get_user_id(driver, username):
url = 'https://www.facebook.com/%s' % username.replace('/', '')
driver.get(url)
fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source)
if fbid:
return fbid[0]
else:
print('[!] Error while getting id of user %s' % username)
return -1
def get_username(driver, userid):
url = 'https://www.facebook.com/%s' % userid
driver.get(url)
time.sleep(pause())
return driver.current_url.split('/')[-1].split('?')[0]
def parse_args():
parser = argparse.ArgumentParser(
description='Find users who interacted with a Facebook profile.')
parser.add_argument(
'-fu',
'--user',
metavar='USERNAME',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-fp',
'--password',
metavar='PASSWORD',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-t',
'--target',
metavar='TARGET',
type=str,
help='Username or numeric id of the target Facebook account')
parser.add_argument('-ls',
'--limit-stories',
metavar='LIMIT',
type=int,
default=20,
help='Max number of stories to analyze')
parser.add_argument('-lp',
'--limit-photos',
metavar='LIMIT',
type=int,
default=20,
help='Max number of photos to analyze')
parser.add_argument(
'-lr',
'--limit-reactions',
metavar='LIMIT',
default=1000,
type=int,
help='Max number of reactions to analyze for each story')
parser.add_argument(
'-lc',
'--limit-comments',
metavar='LIMIT',
default=100,
type=int,
help='Max number of comments to analyze for each story')
parser.add_argument('-o',
'--output',
metavar='OUTPUTFILE',
type=str,
help='Specify the name of the pivots output file')
parser.add_argument('-csv',
'--csv-output',
metavar='CSVOUTPUTFILE',
type=str,
help='Store output file also in CSV format')
parser.add_argument(
'-q',
'--headless',
action='store_true',
help='Run browser in headless mode. No browser window will be shown.')
parser.add_argument('-d',
'--driver-path',
metavar='EXECUTABLE',
type=str,
help='Path to geckodriver executable')
args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help'])
return args
def print_statistics(commenters, reactions):
print('-' * 78)
print(' ' * 34, end=' ')
print('STATISTICS')
print('-' * 78)
|
url = BASE_URL + see_more[0].replace('&', '&')
time.sleep(pause())
driver.get(url)
|
conditional_block
|
fint.py
|
target):
# only return stories BY the user
user_token = 'id=%s' % target
links = re.findall('(/story.php\?story_fbid=[^"#]+)', html)
return [
'%s%s' % (BASE_URL, x.replace('&', '&')) for x in set(links)
if user_token in x
]
def get_photos_urls(target_id, html):
links = re.findall('(/photo\.php\?[^;]*;id=%s[^"]+)' % target_id, html)
return ['%s%s' % (BASE_URL, x.replace('&', '&')) for x in set(links)]
def get_all_photos(driver, target_id, limit=100):
url = 'https://mbasic.facebook.com/profile.php?id=%s&v=photos' % target_id
driver.get(url)
time.sleep(pause())
see_all = re.findall('<a href="([^"#]+)">See All</a>', driver.page_source)
photos = []
if not see_all:
return photos
else:
driver.get(BASE_URL + see_all[0].replace('&', '&'))
while len(photos) < limit:
photos += get_photos_urls(target_id, driver.page_source)
see_more = re.findall(
'<a href="(.[^"#]*)"><span>See More Photos</span></a>',
driver.page_source)
if not see_more:
see_more = re.findall('<a href="(.[^"#]*)">Show more</a>',
driver.page_source)
if see_more:
url = BASE_URL + see_more[0].replace('&', '&')
time.sleep(pause())
driver.get(url)
else:
break
return photos
def get_all_stories(driver, target, limit=100):
url = 'https://mbasic.facebook.com/%s?v=timeline' % target
driver.get(url)
stories = []
while len(stories) < limit:
stories += get_stories_urls(driver.page_source, target)
see_more = re.findall(
'<a href="(.[^"#]*)"><span>See More Stories</span></a>',
driver.page_source)
if not see_more:
see_more = re.findall('<a href="(.[^"#]*)">Show more</a>',
driver.page_source)
if see_more:
url = BASE_URL + see_more[0].replace('&', '&')
time.sleep(pause())
driver.get(url)
else:
break
return stories
def get_all_comments(driver, url, limit=200, cur_length=0):
if cur_length >= limit:
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
commenters = parse_commenters(html)
cur_length += len(commenters)
more_comments_url = re.findall(
'<div class=".[^"]*" id="see_next_[0-9]+"><a href="(.[^"]*)">', html)
more_comments_url = [
'%s%s' % (BASE_URL, x.replace('&', '&')) for x in more_comments_url
]
if (more_comments_url) and limit > cur_length:
time.sleep(pause())
url = more_comments_url[0]
commenters += get_all_comments(driver,
url,
limit,
cur_length=cur_length)
return commenters
# given a driver on a story.php page, extracts all users who have reacted
# takes only 1st level reactions (not consideringr reactions to comments etc.)
def get_all_reactions(driver,
url,
reactions_per_page=999,
limit=2000,
cur_length=0):
if cur_length >= limit:
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
reactions = parse_likers(html)
cur_length += len(reactions)
reaction_urls = re.findall(
'(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)',
html)
reaction_urls = [
'%s%s' % (BASE_URL, x.replace('&', '&').replace(
'?limit=10', '?limit=%d' % reactions_per_page))
for x in reaction_urls
]
if (reaction_urls) and limit > cur_length:
time.sleep(pause())
url = reaction_urls[0]
reactions += get_all_reactions(driver, url, reactions_per_page, limit,
cur_length)
return reactions
# Given a story.php page, return a list of (url, display name)
def
|
(html):
return re.findall(
'<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html)
# Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name)
def parse_likers(html):
return re.findall(
'<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html)
def profile_picture(driver, target_username):
url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username)
driver.get(url)
commenters = parse_commenters(driver.page_source)
# given a list of [username, name] returns a list of [id, name, username]
def fill_user_ids(driver, users):
res = []
c = 0
msg = '[*] Retreiving user ids... '
try:
for u in users:
c += 1
msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg),
c, len(users))
print(msg, end='\r')
time.sleep(pause())
fbid = get_user_id(driver, u[0])
user = (fbid, u[1], u[0])
res.append(user)
except (KeyboardInterrupt, SystemExit):
print('[!] KeyboardInterrupt received. Exiting...')
return res
except Exception as ex:
print('[!] Error while retrieving user ids')
print(ex)
return res
return res
# given a username, finds the fb user id from the source of the profile page
def get_user_id(driver, username):
url = 'https://www.facebook.com/%s' % username.replace('/', '')
driver.get(url)
fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source)
if fbid:
return fbid[0]
else:
print('[!] Error while getting id of user %s' % username)
return -1
def get_username(driver, userid):
url = 'https://www.facebook.com/%s' % userid
driver.get(url)
time.sleep(pause())
return driver.current_url.split('/')[-1].split('?')[0]
def parse_args():
parser = argparse.ArgumentParser(
description='Find users who interacted with a Facebook profile.')
parser.add_argument(
'-fu',
'--user',
metavar='USERNAME',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-fp',
'--password',
metavar='PASSWORD',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-t',
'--target',
metavar='TARGET',
type=str,
help='Username or numeric id of the target Facebook account')
parser.add_argument('-ls',
'--limit-stories',
metavar='LIMIT',
type=int,
default=20,
help='Max number of stories to analyze')
parser.add_argument('-lp',
'--limit-photos',
metavar='LIMIT',
type=int,
default=20,
help='Max number of photos to analyze')
parser.add_argument(
'-lr',
'--limit-reactions',
metavar='LIMIT',
default=1000,
type=int,
help='Max number of reactions to analyze for each story')
parser.add_argument(
'-lc',
'--limit-comments',
metavar='LIMIT',
default=100,
type=int,
help='Max number of comments to analyze for each story')
parser.add_argument('-o',
'--output',
metavar='OUTPUTFILE',
type=str,
help='Specify the name of the pivots output file')
parser.add_argument('-csv',
'--csv-output',
metavar='CSVOUTPUTFILE',
type=str,
help='Store output file also in CSV format')
parser.add_argument(
'-q',
'--headless',
action='store_true',
help='Run browser in headless mode. No browser window will be shown.')
parser.add_argument('-d',
'--driver-path',
metavar='EXECUTABLE',
type=str,
help='Path to geckodriver executable')
args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help'])
return args
def print_statistics(commenters, reactions):
print('-' * 78)
print(' ' * 34, end=' ')
print('STATISTICS')
print('-' * 78)
|
parse_commenters
|
identifier_name
|
fint.py
|
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
reactions = parse_likers(html)
cur_length += len(reactions)
reaction_urls = re.findall(
'(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)',
html)
reaction_urls = [
'%s%s' % (BASE_URL, x.replace('&', '&').replace(
'?limit=10', '?limit=%d' % reactions_per_page))
for x in reaction_urls
]
if (reaction_urls) and limit > cur_length:
time.sleep(pause())
url = reaction_urls[0]
reactions += get_all_reactions(driver, url, reactions_per_page, limit,
cur_length)
return reactions
# Given a story.php page, return a list of (url, display name)
def parse_commenters(html):
return re.findall(
'<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html)
# Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name)
def parse_likers(html):
return re.findall(
'<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html)
def profile_picture(driver, target_username):
url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username)
driver.get(url)
commenters = parse_commenters(driver.page_source)
# given a list of [username, name] returns a list of [id, name, username]
def fill_user_ids(driver, users):
res = []
c = 0
msg = '[*] Retreiving user ids... '
try:
for u in users:
c += 1
msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg),
c, len(users))
print(msg, end='\r')
time.sleep(pause())
fbid = get_user_id(driver, u[0])
user = (fbid, u[1], u[0])
res.append(user)
except (KeyboardInterrupt, SystemExit):
print('[!] KeyboardInterrupt received. Exiting...')
return res
except Exception as ex:
print('[!] Error while retrieving user ids')
print(ex)
return res
return res
# given a username, finds the fb user id from the source of the profile page
def get_user_id(driver, username):
url = 'https://www.facebook.com/%s' % username.replace('/', '')
driver.get(url)
fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source)
if fbid:
return fbid[0]
else:
print('[!] Error while getting id of user %s' % username)
return -1
def get_username(driver, userid):
url = 'https://www.facebook.com/%s' % userid
driver.get(url)
time.sleep(pause())
return driver.current_url.split('/')[-1].split('?')[0]
def parse_args():
parser = argparse.ArgumentParser(
description='Find users who interacted with a Facebook profile.')
parser.add_argument(
'-fu',
'--user',
metavar='USERNAME',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-fp',
'--password',
metavar='PASSWORD',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-t',
'--target',
metavar='TARGET',
type=str,
help='Username or numeric id of the target Facebook account')
parser.add_argument('-ls',
'--limit-stories',
metavar='LIMIT',
type=int,
default=20,
help='Max number of stories to analyze')
parser.add_argument('-lp',
'--limit-photos',
metavar='LIMIT',
type=int,
default=20,
help='Max number of photos to analyze')
parser.add_argument(
'-lr',
'--limit-reactions',
metavar='LIMIT',
default=1000,
type=int,
help='Max number of reactions to analyze for each story')
parser.add_argument(
'-lc',
'--limit-comments',
metavar='LIMIT',
default=100,
type=int,
help='Max number of comments to analyze for each story')
parser.add_argument('-o',
'--output',
metavar='OUTPUTFILE',
type=str,
help='Specify the name of the pivots output file')
parser.add_argument('-csv',
'--csv-output',
metavar='CSVOUTPUTFILE',
type=str,
help='Store output file also in CSV format')
parser.add_argument(
'-q',
'--headless',
action='store_true',
help='Run browser in headless mode. No browser window will be shown.')
parser.add_argument('-d',
'--driver-path',
metavar='EXECUTABLE',
type=str,
help='Path to geckodriver executable')
args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help'])
return args
def print_statistics(commenters, reactions):
print('-' * 78)
print(' ' * 34, end=' ')
print('STATISTICS')
print('-' * 78)
print('Most comments:')
for u in Counter(commenters).most_common():
print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0]))
print()
print('Most reactions:')
for u in Counter(reactions).most_common():
print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0]))
print()
print('Total:')
for u in Counter(commenters + reactions).most_common():
print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0]))
print()
def store_csv(users, csv_file_path):
print('[*] Storing users in csv file %s' % csv_file_path)
with open(csv_file_path, mode='w', newline='',
encoding='utf-8') as csv_file:
writer = csv.writer(csv_file,
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(['id', 'name', 'url'])
for u in users:
writer.writerow(u)
def store_pivots(users, path):
print('[*] Storing users id in file %s' % path)
with open(path, 'w') as f:
for u in users:
f.write('%s\n' % u[0])
def check_file_exists(file):
yes = {'yes', 'y', 'ye'}
if os.path.isfile(file):
print(
'[!] Warning: output file %s already exists. Do you want to overwrite? [y/N]'
% file,
end=' ')
choice = input().lower()
if choice not in yes:
sys.exit(0)
def main():
print(BANNER)
args = parse_args()
options = Options()
if args.headless: options.add_argument("--headless")
driver = webdriver.Firefox(executable_path=args.driver_path,
options=options)
do_login(driver, args.user, args.password)
check_login(driver)
if args.target.isdigit():
target_id = args.target
target_username = get_username(driver, target_id)
else:
target_id = get_user_id(driver, args.target)
target_username = args.target
print('[*] Selected target: %s (%s)' % (target_username, target_id))
urls_to_visit = []
commenters = []
reactions = []
users = []
print('[*] Getting photos links... ', end=" ")
photos = get_all_photos(driver, target_id,
args.limit_photos)[:args.limit_photos]
print('%d photos found' % len(photos))
print('[*] Getting stories links... ', end=" ")
stories = get_all_stories(driver, target_id,
args.limit_stories)[:args.limit_stories]
print('%d stories found' % len(stories))
print(
'[*] Retreiving users who have interacted... press Ctrl+C when you have enough'
)
msg = ''
try:
for url in photos + stories:
commenters += parse_commenters(driver.page_source)
if len(commenters) < args.limit_comments:
commenters += get_all_comments(driver,
url,
limit=args.limit_comments)
if len(reactions) < args.limit_reactions:
reactions += get_all_reactions(driver,
url,
limit=args.limit_reactions)
users = list(set(reactions).union(set(commenters)))
msg = '%sUnique users: %d Comments: %d Reactions: %d' % (
'\r' * len(msg), len(users), len(commenters), len(reactions))
|
print(msg, end='\r')
except (KeyboardInterrupt, SystemExit):
print('[!] KeyboardInterrupt received. %d users retrieved' %
len(users))
|
random_line_split
|
|
headtail_resolution.py
|
(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [
self._get_partition_indices(partition_index)
for partition_index in all_partitions_indexes
if partition_index >= 0
]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(
theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan),
scores=resolved_results.scores.filled(np.nan),
)
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(
theta=np.full_like(shuffled_results.theta[:, 0], np.nan),
skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),
scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),
)
def _make_continuous_partitions(
shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float
) -> _PartitionedResults:
time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))
min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))
partitioned_results = _PartitionedResults(shuffled_results)
# discard low score frames early (use the maximum value of both scores for now)
good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]
for frame_index in good_score_frames:
prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]
# if there is a big gap > time_window we start a new partition, with a random value (0)
if np.all(np.any(prev_theta.mask, axis=1)):
partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)
# otherwise we look in the time_window close past the closest non nan frame see if we can continue the
# partition as long as the values stay continuous
else:
last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]
dists = [
angle_distance(
shuffled_results.theta[frame_index, k, :],
prev_theta[last_valid_index],
)
for k in range(2)
]
partition = int(np.argmin(dists))
if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:
partitioned_results.set_partition(frame_index=frame_index, partition=partition)
# discard short segments
for cur_partition_indices in partitioned_results.get_segments():
if len(cur_partition_indices) < min_segment_size:
partitioned_results.mask(cur_partition_indices)
return partitioned_results
def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
|
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):
# evaluate how far away this segment is from known values
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
# ensure that if no segments have been aligned at all, pick one solution randomly to start
if np.all(segments_alignment.mask):
logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.")
return segments_alignment
# fix in priority the segments with known adjacent frames with little gap
# until all segments are aligned except the isolated ones (further than maximum_gap_allowed)
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
# we first pick the best candidate segment to align (there are known frames nearby before or after or both)
all_gaps = [
_calculate_smallest_gap_to_adjacent(
segment_index=x,
segments=segments,
segments_alignment=segments_alignment,
)
for x in unaligned
]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]
# abort if only isolated segments are left
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton
|
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(
segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]
):
dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(mean_similarity_scores)
|
conditional_block
|
headtail_resolution.py
|
(partitioned_series, shuffled_series, frame_index: int, partition: int):
partitioned_series[frame_index][0] = shuffled_series[frame_index, partition]
partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition]
class _PartitionedResults(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [
self._get_partition_indices(partition_index)
for partition_index in all_partitions_indexes
if partition_index >= 0
]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(
theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan),
scores=resolved_results.scores.filled(np.nan),
)
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(
theta=np.full_like(shuffled_results.theta[:, 0], np.nan),
skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),
scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),
)
def _make_continuous_partitions(
shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float
) -> _PartitionedResults:
time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))
min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))
partitioned_results = _PartitionedResults(shuffled_results)
# discard low score frames early (use the maximum value of both scores for now)
good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]
for frame_index in good_score_frames:
prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]
# if there is a big gap > time_window we start a new partition, with a random value (0)
if np.all(np.any(prev_theta.mask, axis=1)):
partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)
# otherwise we look in the time_window close past the closest non nan frame see if we can continue the
# partition as long as the values stay continuous
else:
last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]
dists = [
angle_distance(
shuffled_results.theta[frame_index, k, :],
prev_theta[last_valid_index],
)
for k in range(2)
]
partition = int(np.argmin(dists))
if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:
partitioned_results.set_partition(frame_index=frame_index, partition=partition)
# discard short segments
for cur_partition_indices in partitioned_results.get_segments():
if len(cur_partition_indices) < min_segment_size:
partitioned_results.mask(cur_partition_indices)
return partitioned_results
def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(
segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]
):
dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(mean_similarity_scores)
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):
# evaluate how far away this segment is from known values
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
# ensure that if no segments have been aligned at all, pick one solution randomly to start
if np.all(segments_alignment.mask):
logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.")
return segments_alignment
# fix in priority the segments with known adjacent frames with little gap
# until all segments are aligned except the isolated ones (further than maximum_gap_allowed)
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
# we first pick the best candidate segment to align (there are known frames nearby before or after or both)
all_gaps = [
_calculate_smallest_gap_to_adjacent(
segment_index=x,
segments=segments,
segments_alignment=segments_alignment,
)
for x in unaligned
]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]
# abort if only isolated segments are left
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix
|
_set_partition
|
identifier_name
|
|
headtail_resolution.py
|
(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [
self._get_partition_indices(partition_index)
for partition_index in all_partitions_indexes
if partition_index >= 0
]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
|
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(
theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan),
scores=resolved_results.scores.filled(np.nan),
)
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(
theta=np.full_like(shuffled_results.theta[:, 0], np.nan),
skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),
scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),
)
def _make_continuous_partitions(
shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float
) -> _PartitionedResults:
time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))
min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))
partitioned_results = _PartitionedResults(shuffled_results)
# discard low score frames early (use the maximum value of both scores for now)
good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]
for frame_index in good_score_frames:
prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]
# if there is a big gap > time_window we start a new partition, with a random value (0)
if np.all(np.any(prev_theta.mask, axis=1)):
partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)
# otherwise we look in the time_window close past the closest non nan frame see if we can continue the
# partition as long as the values stay continuous
else:
last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]
dists = [
angle_distance(
shuffled_results.theta[frame_index, k, :],
prev_theta[last_valid_index],
)
for k in range(2)
]
partition = int(np.argmin(dists))
if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:
partitioned_results.set_partition(frame_index=frame_index, partition=partition)
# discard short segments
for cur_partition_indices in partitioned_results.get_segments():
if len(cur_partition_indices) < min_segment_size:
partitioned_results.mask(cur_partition_indices)
return partitioned_results
def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(
segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]
):
dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(mean_similarity_scores)
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):
# evaluate how far away this segment is from known values
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
# ensure that if no segments have been aligned at all, pick one solution randomly to start
if np.all(segments_alignment.mask):
logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.")
return segments_alignment
# fix in priority the segments with known adjacent frames with little gap
# until all segments are aligned except the isolated ones (further than maximum_gap_allowed)
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
# we first pick the best candidate segment to align (there are known frames nearby before or after or both)
all_gaps = [
_calculate_smallest_gap_to_adjacent(
segment_index=x,
segments=segments,
segments_alignment=segments_alignment,
)
for x in unaligned
]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]
# abort if only isolated segments are left
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton
|
self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]
|
identifier_body
|
headtail_resolution.py
|
(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
|
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [
self._get_partition_indices(partition_index)
for partition_index in all_partitions_indexes
if partition_index >= 0
]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(
theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan),
scores=resolved_results.scores.filled(np.nan),
)
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(
theta=np.full_like(shuffled_results.theta[:, 0], np.nan),
skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),
scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),
)
def _make_continuous_partitions(
shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float
) -> _PartitionedResults:
time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))
min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))
partitioned_results = _PartitionedResults(shuffled_results)
# discard low score frames early (use the maximum value of both scores for now)
good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]
for frame_index in good_score_frames:
prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]
# if there is a big gap > time_window we start a new partition, with a random value (0)
if np.all(np.any(prev_theta.mask, axis=1)):
partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)
# otherwise we look in the time_window close past the closest non nan frame see if we can continue the
# partition as long as the values stay continuous
else:
last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]
dists = [
angle_distance(
shuffled_results.theta[frame_index, k, :],
prev_theta[last_valid_index],
)
for k in range(2)
]
partition = int(np.argmin(dists))
if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:
partitioned_results.set_partition(frame_index=frame_index, partition=partition)
# discard short segments
for cur_partition_indices in partitioned_results.get_segments():
if len(cur_partition_indices) < min_segment_size:
partitioned_results.mask(cur_partition_indices)
return partitioned_results
def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(
segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]
):
dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(mean_similarity_scores)
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):
# evaluate how far away this segment is from known values
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
# ensure that if no segments have been aligned at all, pick one solution randomly to start
if np.all(segments_alignment.mask):
logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.")
return segments_alignment
# fix in priority the segments with known adjacent frames with little gap
# until all segments are aligned except the isolated ones (further than maximum_gap_allowed)
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
# we first pick the best candidate segment to align (there are known frames nearby before or after or both)
all_gaps = [
_calculate_smallest_gap_to_adjacent(
segment_index=x,
segments=segments,
segments_alignment=segments_alignment,
)
for x in unaligned
]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]
# abort if only isolated segments are left
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton =
|
random_line_split
|
|
game.rs
|
in which we will use the method
* get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to
* retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry
* enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not
* then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method
* to try to convert the Entry::App, which we assume to have the Move struct in the second element of App
* variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error
* saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses,
* we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move>
* since that is the defined type for moves.
*/
Ok(moves)
},
None => {
Ok(Vec::new())
}
}
}
pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> {
let moves = get_moves(game_address)?;
let game = get_game(game_address)?;
let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move));
Ok(new_state)
/* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function.
* First we create a vairable named moves and call the get_moves in it with the parameter game_address.
* Since we have the ? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong.
* T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game
* with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we
* use the ? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create
* a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it.
* fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element.
* The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty
* GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the
* initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated
* with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a
* reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will
* add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state)
*/
}
pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> {
utils::get_as_type(game_address.to_owned())
/* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case,
* rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the
* given address to ZomeApiResult<Game>
*/
}
/*===== End of DHT Functions ======*/
/*=============================================
= Local chain functions =
=============================================*/
pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> {
local_chain
.iter()
.filter(|entry| {
entry.address() == game_address.to_owned()
})
.filter_map(|entry| {
if let Entry::App(_, entry_data) = entry {
Some(Game::try_from(entry_data.clone()).unwrap())
} else {
None
}
})
.next()
.ok_or(ZomeApiError::HashNotFound)
/* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>.
* now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator
* which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns
* true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element.
* now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address
* by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone
* the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure
* correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry.
* After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps.
* filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the
* element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure,
* we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from()
* method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element
* is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get
* the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided
* in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn
* Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the
* parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant
* of the ZomeApiError to be returned as an error value.
*/
}
pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> {
Ok(local_chain
.iter()
.filter_map(|entry| {
if let Entry::App(entry_type, entry_data) = entry {
if entry_type.to_string() == "move" {
Some(Move::try_from(entry_data.clone()).unwrap())
} else {
None
}
} else {
None
}
})
.filter(|game_move| {
game_move.game == game_address.to_owned()
})
.rev()
.collect())
/* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns
* a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain
* to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from
* method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there
* is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only
* yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game
* field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse
* the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()??
*/
}
pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> {
let moves = get_moves_local_chain(local_chain.clone(), game_address)?;
|
let game = get_game_local_chain(local_chain, game_address)?;
|
random_line_split
|
|
game.rs
|
(game_address: &Address) -> ZomeApiResult<Vec<Move>> {
match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() {
/* get links returns the ZomeApiResult<GetLinksResult>.
* This will get entries that are linked to the first argument.
* Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult),
* you can use the ? sugar to return the ZomeApiError if error then return the T if get_links is a success.
* GetLinkResult has a method implemented called addresses() which returns a vector of Addresses.
* into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator.
* next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item>
* Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item>
*/
Some(first_move) => {
let mut move_addresses = vec![first_move];
let mut more = true;
while more {
more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() {
Some(addr) => {
move_addresses.push(addr.clone());
true
},
None => {
false
},
}
}
/* In this match operator, we first cater to Some(first_move). The name is first_move because
* the Game entry is always linked to the first_move made by Player 2.
* So we store this first_move to a vector in a variable name move_addresses.
* Then we create a while loop in order to store all the game_move entries that are linked to the first_move.
* while more is true, we get the entries linked to the first_move, then the next move and the next move and
* on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links,
* we get the very last element of the move_addresses vector using last() which returns a Option<&T>.
* Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address.
* In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address.
* Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next().
* Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again.
* Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after
* we get all the moves that are linked together.
*/
let moves: Vec<Move> = move_addresses.iter().map(|addr| {
let move_entry = hdk::get_entry(addr).unwrap().unwrap();
if let Entry::App(_, move_struct) = move_entry {
Move::try_from(move_struct).expect("Entry at address is type other than Move")
} else {
panic!("Not an app entry!")
}
}).collect();
/* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can
* be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable,
* we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want
* to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then
* use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element.
* the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method
* get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to
* retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry
* enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not
* then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method
* to try to convert the Entry::App, which we assume to have the Move struct in the second element of App
* variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error
* saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses,
* we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move>
* since that is the defined type for moves.
*/
Ok(moves)
},
None => {
Ok(Vec::new())
}
}
}
pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> {
let moves = get_moves(game_address)?;
let game = get_game(game_address)?;
let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move));
Ok(new_state)
/* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function.
* First we create a vairable named moves and call the get_moves in it with the parameter game_address.
* Since we have the ? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong.
* T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game
* with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we
* use the ? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create
* a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it.
* fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element.
* The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty
* GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the
* initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated
* with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a
* reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will
* add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state)
*/
}
pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> {
utils::get_as_type(game_address.to_owned())
/* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case,
* rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the
* given address to ZomeApiResult<Game>
*/
}
/*===== End of DHT Functions ======*/
/*=============================================
= Local chain functions =
=============================================*/
pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> {
local_chain
.iter()
.filter(|entry| {
entry.address() == game_address.to_owned()
})
.filter_map(|entry| {
if let Entry::App(_, entry_data) = entry {
Some(Game::try_from(entry_data.clone()).unwrap())
} else {
None
}
})
.next()
.ok_or(ZomeApiError::HashNotFound)
/* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>.
* now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator
* which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns
* true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element.
* now filter's closure check if the address of the each element found in the local_chain is equal
|
get_moves
|
identifier_name
|
|
game.rs
|
mut move_addresses = vec![first_move];
let mut more = true;
while more {
more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() {
Some(addr) => {
move_addresses.push(addr.clone());
true
},
None => {
false
},
}
}
/* In this match operator, we first cater to Some(first_move). The name is first_move because
* the Game entry is always linked to the first_move made by Player 2.
* So we store this first_move to a vector in a variable name move_addresses.
* Then we create a while loop in order to store all the game_move entries that are linked to the first_move.
* while more is true, we get the entries linked to the first_move, then the next move and the next move and
* on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links,
* we get the very last element of the move_addresses vector using last() which returns a Option<&T>.
* Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address.
* In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address.
* Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next().
* Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again.
* Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after
* we get all the moves that are linked together.
*/
let moves: Vec<Move> = move_addresses.iter().map(|addr| {
let move_entry = hdk::get_entry(addr).unwrap().unwrap();
if let Entry::App(_, move_struct) = move_entry {
Move::try_from(move_struct).expect("Entry at address is type other than Move")
} else {
panic!("Not an app entry!")
}
}).collect();
/* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can
* be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable,
* we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want
* to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then
* use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element.
* the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method
* get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to
* retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry
* enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not
* then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method
* to try to convert the Entry::App, which we assume to have the Move struct in the second element of App
* variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error
* saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses,
* we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move>
* since that is the defined type for moves.
*/
Ok(moves)
},
None => {
Ok(Vec::new())
}
}
}
pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> {
let moves = get_moves(game_address)?;
let game = get_game(game_address)?;
let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move));
Ok(new_state)
/* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function.
* First we create a vairable named moves and call the get_moves in it with the parameter game_address.
* Since we have the ? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong.
* T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game
* with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we
* use the ? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create
* a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it.
* fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element.
* The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty
* GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the
* initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated
* with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a
* reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will
* add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state)
*/
}
pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> {
utils::get_as_type(game_address.to_owned())
/* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case,
* rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the
* given address to ZomeApiResult<Game>
*/
}
/*===== End of DHT Functions ======*/
/*=============================================
= Local chain functions =
=============================================*/
pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> {
local_chain
.iter()
.filter(|entry| {
entry.address() == game_address.to_owned()
})
.filter_map(|entry| {
if let Entry::App(_, entry_data) = entry
|
else {
None
}
})
.next()
.ok_or(ZomeApiError::HashNotFound)
/* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>.
* now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator
* which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns
* true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element.
* now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address
* by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone
* the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure
* correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry.
* After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps.
* filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the
* element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure,
* we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from()
* method on the entry_data found in the Entry::App and convert it to the Game struct
|
{
Some(Game::try_from(entry_data.clone()).unwrap())
}
|
conditional_block
|
game.rs
|
None => {
false
},
}
}
/* In this match operator, we first cater to Some(first_move). The name is first_move because
* the Game entry is always linked to the first_move made by Player 2.
* So we store this first_move to a vector in a variable name move_addresses.
* Then we create a while loop in order to store all the game_move entries that are linked to the first_move.
* while more is true, we get the entries linked to the first_move, then the next move and the next move and
* on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links,
* we get the very last element of the move_addresses vector using last() which returns a Option<&T>.
* Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address.
* In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address.
* Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next().
* Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again.
* Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after
* we get all the moves that are linked together.
*/
let moves: Vec<Move> = move_addresses.iter().map(|addr| {
let move_entry = hdk::get_entry(addr).unwrap().unwrap();
if let Entry::App(_, move_struct) = move_entry {
Move::try_from(move_struct).expect("Entry at address is type other than Move")
} else {
panic!("Not an app entry!")
}
}).collect();
/* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can
* be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable,
* we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want
* to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then
* use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element.
* the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method
* get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to
* retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry
* enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not
* then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method
* to try to convert the Entry::App, which we assume to have the Move struct in the second element of App
* variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error
* saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses,
* we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move>
* since that is the defined type for moves.
*/
Ok(moves)
},
None => {
Ok(Vec::new())
}
}
}
pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> {
let moves = get_moves(game_address)?;
let game = get_game(game_address)?;
let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move));
Ok(new_state)
/* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function.
* First we create a vairable named moves and call the get_moves in it with the parameter game_address.
* Since we have the ? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong.
* T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game
* with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we
* use the ? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create
* a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it.
* fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element.
* The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty
* GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the
* initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated
* with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a
* reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will
* add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state)
*/
}
pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> {
utils::get_as_type(game_address.to_owned())
/* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case,
* rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the
* given address to ZomeApiResult<Game>
*/
}
/*===== End of DHT Functions ======*/
/*=============================================
= Local chain functions =
=============================================*/
pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> {
local_chain
.iter()
.filter(|entry| {
entry.address() == game_address.to_owned()
})
.filter_map(|entry| {
if let Entry::App(_, entry_data) = entry {
Some(Game::try_from(entry_data.clone()).unwrap())
} else {
None
}
})
.next()
.ok_or(ZomeApiError::HashNotFound)
/* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>.
* now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator
* which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns
* true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element.
* now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address
* by getting the address of each element in
|
{
match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() {
/* get links returns the ZomeApiResult<GetLinksResult>.
* This will get entries that are linked to the first argument.
* Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult),
* you can use the ? sugar to return the ZomeApiError if error then return the T if get_links is a success.
* GetLinkResult has a method implemented called addresses() which returns a vector of Addresses.
* into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator.
* next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item>
* Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item>
*/
Some(first_move) => {
let mut move_addresses = vec![first_move];
let mut more = true;
while more {
more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() {
Some(addr) => {
move_addresses.push(addr.clone());
true
},
|
identifier_body
|
|
server_rpc.py
|
self.rpc_config_set
self.rpc_handler_map['^/campaign/alerts/is_subscribed$'] = self.rpc_campaign_alerts_is_subscribed
self.rpc_handler_map['^/campaign/alerts/subscribe$'] = self.rpc_campaign_alerts_subscribe
self.rpc_handler_map['^/campaign/alerts/unsubscribe$'] = self.rpc_campaign_alerts_unsubscribe
self.rpc_handler_map['^/campaign/landing_page/new$'] = self.rpc_campaign_landing_page_new
self.rpc_handler_map['^/campaign/message/new$'] = self.rpc_campaign_message_new
self.rpc_handler_map['^/campaign/new$'] = self.rpc_campaign_new
self.rpc_handler_map['^/campaign/delete$'] = self.rpc_campaign_delete
for table_name in DATABASE_TABLES.keys():
self.rpc_handler_map['^/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/' + table_name + '/delete$'] = self.rpc_database_delete_row_by_id
self.rpc_handler_map['^/' + table_name + '/delete/multi'] = self.rpc_database_delete_rows_by_id
self.rpc_handler_map['^/' + table_name + '/get$'] = self.rpc_database_get_row_by_id
self.rpc_handler_map['^/' + table_name + '/insert'] = self.rpc_database_insert_row
self.rpc_handler_map['^/' + table_name + '/set$'] = self.rpc_database_set_row_value
self.rpc_handler_map['^/' + table_name + '/view$'] = self.rpc_database_get_rows
# Tables with a campaign_id field
for table_name in db_models.get_tables_with_column_id('campaign_id'):
|
# Tables with a message_id field
for table_name in db_models.get_tables_with_column_id('message_id'):
self.rpc_handler_map['^/message/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/message/' + table_name + '/view$'] = self.rpc_database_get_rows
def rpc_ping(self):
"""
An RPC method that can be used by clients to assert the status
and responsiveness of this server.
:return: This method always returns True.
:rtype: bool
"""
return True
def rpc_client_initialize(self):
"""
Initialize any client information necessary.
:return: This method always returns True.
:rtype: bool
"""
username = self.basic_auth_user
if not username:
return True
session = db_manager.Session()
if not db_manager.get_row_by_id(session, db_models.User, username):
user = db_models.User(id=username)
session.add(user)
session.commit()
session.close()
return True
def rpc_shutdown(self):
"""
This method can be used to shut down the server. This function will
return, however no subsequent requests will be processed.
"""
shutdown_thread = threading.Thread(target=self.server.shutdown)
shutdown_thread.start()
return
def rpc_version(self):
"""
Get the version information of the server. This returns a
dictionary with keys of version, version_info and rpc_api_version.
These values are provided for the client to determine
compatibility.
:return: A dictionary with version information.
:rtype: dict
"""
vinfo = {'version': version.version, 'version_info': version.version_info._asdict()}
vinfo['rpc_api_version'] = version.rpc_api_version
return vinfo
def rpc_config_get(self, option_name):
"""
Retrieve a value from the server's configuration.
:param str option_name: The name of the configuration option.
:return: The option's value.
"""
if isinstance(option_name, (list, tuple)):
option_names = option_name
option_values = {}
for option_name in option_names:
if self.config.has_option(option_name):
option_values[option_name] = self.config.get(option_name)
return option_values
elif self.config.has_option(option_name):
return self.config.get(option_name)
return
def rpc_config_set(self, options):
"""
Set options in the server's configuration. Any changes to the
server's configuration are not written to disk.
:param dict options: A dictionary of option names and values
"""
for option_name, option_value in options.items():
self.config.set(option_name, option_value)
return
def rpc_campaign_new(self, name):
"""
Create a new King Phisher campaign and initialize the database
information.
:param str name: The new campaign's name.
:return: The ID of the new campaign.
:rtype: int
"""
session = db_manager.Session()
campaign = db_models.Campaign(name=name, user_id=self.basic_auth_user)
session.add(campaign)
session.commit()
return campaign.id
def rpc_campaign_alerts_is_subscribed(self, campaign_id):
"""
Check if the user is subscribed to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
:return: The alert subscription status.
:rtype: bool
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
result = query.count()
session.close()
return result
def rpc_campaign_alerts_subscribe(self, campaign_id):
"""
Subscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
if query.count() == 0:
subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username)
session.add(subscription)
session.commit()
session.close()
return
def rpc_campaign_alerts_unsubscribe(self, campaign_id):
"""
Unsubscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
subscription = query.first()
if subscription:
session.delete(subscription)
session.commit()
session.close()
return
def rpc_campaign_landing_page_new(self, campaign_id, hostname, page):
"""
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The VHOST for the request.
:param str page: The request resource.
"""
page = page.lstrip('/')
session = db_manager.Session()
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)
if query.count() == 0:
landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)
session.add(landing_page)
session.commit()
session.close()
return
def rpc_campaign_message_new(self, campaign_id, email_id, target_email, company_name, first_name, last_name):
"""
Record a message that has been sent as part of a campaign. These
details can be retrieved later for value substitution in template
pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str company_name: The company name value for the message.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
"""
session = db_manager.Session()
message = db_models.Message()
message.id = email_id
message.campaign_id = campaign_id
message.target_email = target_email
message.company_name = company_name
message.first_name = first_name
message.last_name = last_name
session.add(message)
session.commit()
session.close()
return
def rpc_campaign_delete(self, campaign_id):
"""
Remove a campaign from the database and delete all associated
information with it.
.. warning::
This action can not be reversed and there is no confirmation before it
takes place.
"""
session = db_manager.Session()
session.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id))
session.commit()
session.close()
return
def rpc_database_count_rows(self, *args):
"""
Get a count of the rows in the specified table where the search
criteria matches.
:return: The number of matching rows.
:
|
self.rpc_handler_map['^/campaign/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/campaign/' + table_name + '/view$'] = self.rpc_database_get_rows
|
conditional_block
|
server_rpc.py
|
class KingPhisherRequestHandlerRPC(object):
"""
This superclass of :py:class:`.KingPhisherRequestHandler` maintains
all of the RPC call back functions.
:RPC API: :ref:`rpc-api-label`
"""
def install_handlers(self):
super(KingPhisherRequestHandlerRPC, self).install_handlers()
self.rpc_handler_map['^/ping$'] = self.rpc_ping
self.rpc_handler_map['^/shutdown$'] = self.rpc_shutdown
self.rpc_handler_map['^/version$'] = self.rpc_version
self.rpc_handler_map['^/geoip/lookup$'] = self.rpc_geoip_lookup
self.rpc_handler_map['^/geoip/lookup/multi$'] = self.rpc_geoip_lookup_multi
self.rpc_handler_map['^/client/initialize$'] = self.rpc_client_initialize
self.rpc_handler_map['^/config/get$'] = self.rpc_config_get
self.rpc_handler_map['^/config/set$'] = self.rpc_config_set
self.rpc_handler_map['^/campaign/alerts/is_subscribed$'] = self.rpc_campaign_alerts_is_subscribed
self.rpc_handler_map['^/campaign/alerts/subscribe$'] = self.rpc_campaign_alerts_subscribe
self.rpc_handler_map['^/campaign/alerts/unsubscribe$'] = self.rpc_campaign_alerts_unsubscribe
self.rpc_handler_map['^/campaign/landing_page/new$'] = self.rpc_campaign_landing_page_new
self.rpc_handler_map['^/campaign/message/new$'] = self.rpc_campaign_message_new
self.rpc_handler_map['^/campaign/new$'] = self.rpc_campaign_new
self.rpc_handler_map['^/campaign/delete$'] = self.rpc_campaign_delete
for table_name in DATABASE_TABLES.keys():
self.rpc_handler_map['^/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/' + table_name + '/delete$'] = self.rpc_database_delete_row_by_id
self.rpc_handler_map['^/' + table_name + '/delete/multi'] = self.rpc_database_delete_rows_by_id
self.rpc_handler_map['^/' + table_name + '/get$'] = self.rpc_database_get_row_by_id
self.rpc_handler_map['^/' + table_name + '/insert'] = self.rpc_database_insert_row
self.rpc_handler_map['^/' + table_name + '/set$'] = self.rpc_database_set_row_value
self.rpc_handler_map['^/' + table_name + '/view$'] = self.rpc_database_get_rows
# Tables with a campaign_id field
for table_name in db_models.get_tables_with_column_id('campaign_id'):
self.rpc_handler_map['^/campaign/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/campaign/' + table_name + '/view$'] = self.rpc_database_get_rows
# Tables with a message_id field
for table_name in db_models.get_tables_with_column_id('message_id'):
self.rpc_handler_map['^/message/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/message/' + table_name + '/view$'] = self.rpc_database_get_rows
def rpc_ping(self):
"""
An RPC method that can be used by clients to assert the status
and responsiveness of this server.
:return: This method always returns True.
:rtype: bool
"""
return True
def rpc_client_initialize(self):
"""
Initialize any client information necessary.
:return: This method always returns True.
:rtype: bool
"""
username = self.basic_auth_user
if not username:
return True
session = db_manager.Session()
if not db_manager.get_row_by_id(session, db_models.User, username):
user = db_models.User(id=username)
session.add(user)
session.commit()
session.close()
return True
def rpc_shutdown(self):
"""
This method can be used to shut down the server. This function will
return, however no subsequent requests will be processed.
"""
shutdown_thread = threading.Thread(target=self.server.shutdown)
shutdown_thread.start()
return
def rpc_version(self):
"""
Get the version information of the server. This returns a
dictionary with keys of version, version_info and rpc_api_version.
These values are provided for the client to determine
compatibility.
:return: A dictionary with version information.
:rtype: dict
"""
vinfo = {'version': version.version, 'version_info': version.version_info._asdict()}
vinfo['rpc_api_version'] = version.rpc_api_version
return vinfo
def rpc_config_get(self, option_name):
"""
Retrieve a value from the server's configuration.
:param str option_name: The name of the configuration option.
:return: The option's value.
"""
if isinstance(option_name, (list, tuple)):
option_names = option_name
option_values = {}
for option_name in option_names:
if self.config.has_option(option_name):
option_values[option_name] = self.config.get(option_name)
return option_values
elif self.config.has_option(option_name):
return self.config.get(option_name)
return
def rpc_config_set(self, options):
"""
Set options in the server's configuration. Any changes to the
server's configuration are not written to disk.
:param dict options: A dictionary of option names and values
"""
for option_name, option_value in options.items():
self.config.set(option_name, option_value)
return
def rpc_campaign_new(self, name):
"""
Create a new King Phisher campaign and initialize the database
information.
:param str name: The new campaign's name.
:return: The ID of the new campaign.
:rtype: int
"""
session = db_manager.Session()
campaign = db_models.Campaign(name=name, user_id=self.basic_auth_user)
session.add(campaign)
session.commit()
return campaign.id
def rpc_campaign_alerts_is_subscribed(self, campaign_id):
"""
Check if the user is subscribed to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
:return: The alert subscription status.
:rtype: bool
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
result = query.count()
session.close()
return result
def rpc_campaign_alerts_subscribe(self, campaign_id):
"""
Subscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
if query.count() == 0:
subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username)
session.add(subscription)
session.commit()
session.close()
return
def rpc_campaign_alerts_unsubscribe(self, campaign_id):
"""
Unsubscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
subscription = query.first()
if subscription:
session.delete(subscription)
session.commit()
session.close()
return
def rpc_campaign_landing_page_new(self, campaign_id, hostname, page):
"""
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The VHOST for the request.
:param str page: The request resource.
"""
page = page.lstrip('/')
session = db_manager.Session()
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)
if query.count() == 0:
landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)
session.add(landing_page)
session.commit()
session.close()
return
def rpc_campaign_message_new(self, campaign_id, email_id, target_email, company_name, first_name, last_name):
"""
Record a message that has been sent as part of a campaign. These
details can be retrieved later for value substitution in template
pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str company_name: The company name value for the message.
:param str first_name: The first name of the message's recipient.
:param
|
DATABASE_TABLES = db_models.DATABASE_TABLES
DATABASE_TABLE_OBJECTS = db_models.DATABASE_TABLE_OBJECTS
|
random_line_split
|
|
server_rpc.py
|
self.rpc_config_set
self.rpc_handler_map['^/campaign/alerts/is_subscribed$'] = self.rpc_campaign_alerts_is_subscribed
self.rpc_handler_map['^/campaign/alerts/subscribe$'] = self.rpc_campaign_alerts_subscribe
self.rpc_handler_map['^/campaign/alerts/unsubscribe$'] = self.rpc_campaign_alerts_unsubscribe
self.rpc_handler_map['^/campaign/landing_page/new$'] = self.rpc_campaign_landing_page_new
self.rpc_handler_map['^/campaign/message/new$'] = self.rpc_campaign_message_new
self.rpc_handler_map['^/campaign/new$'] = self.rpc_campaign_new
self.rpc_handler_map['^/campaign/delete$'] = self.rpc_campaign_delete
for table_name in DATABASE_TABLES.keys():
self.rpc_handler_map['^/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/' + table_name + '/delete$'] = self.rpc_database_delete_row_by_id
self.rpc_handler_map['^/' + table_name + '/delete/multi'] = self.rpc_database_delete_rows_by_id
self.rpc_handler_map['^/' + table_name + '/get$'] = self.rpc_database_get_row_by_id
self.rpc_handler_map['^/' + table_name + '/insert'] = self.rpc_database_insert_row
self.rpc_handler_map['^/' + table_name + '/set$'] = self.rpc_database_set_row_value
self.rpc_handler_map['^/' + table_name + '/view$'] = self.rpc_database_get_rows
# Tables with a campaign_id field
for table_name in db_models.get_tables_with_column_id('campaign_id'):
self.rpc_handler_map['^/campaign/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/campaign/' + table_name + '/view$'] = self.rpc_database_get_rows
# Tables with a message_id field
for table_name in db_models.get_tables_with_column_id('message_id'):
self.rpc_handler_map['^/message/' + table_name + '/count$'] = self.rpc_database_count_rows
self.rpc_handler_map['^/message/' + table_name + '/view$'] = self.rpc_database_get_rows
def rpc_ping(self):
"""
An RPC method that can be used by clients to assert the status
and responsiveness of this server.
:return: This method always returns True.
:rtype: bool
"""
return True
def rpc_client_initialize(self):
"""
Initialize any client information necessary.
:return: This method always returns True.
:rtype: bool
"""
username = self.basic_auth_user
if not username:
return True
session = db_manager.Session()
if not db_manager.get_row_by_id(session, db_models.User, username):
user = db_models.User(id=username)
session.add(user)
session.commit()
session.close()
return True
def rpc_shutdown(self):
"""
This method can be used to shut down the server. This function will
return, however no subsequent requests will be processed.
"""
shutdown_thread = threading.Thread(target=self.server.shutdown)
shutdown_thread.start()
return
def rpc_version(self):
"""
Get the version information of the server. This returns a
dictionary with keys of version, version_info and rpc_api_version.
These values are provided for the client to determine
compatibility.
:return: A dictionary with version information.
:rtype: dict
"""
vinfo = {'version': version.version, 'version_info': version.version_info._asdict()}
vinfo['rpc_api_version'] = version.rpc_api_version
return vinfo
def rpc_config_get(self, option_name):
"""
Retrieve a value from the server's configuration.
:param str option_name: The name of the configuration option.
:return: The option's value.
"""
if isinstance(option_name, (list, tuple)):
option_names = option_name
option_values = {}
for option_name in option_names:
if self.config.has_option(option_name):
option_values[option_name] = self.config.get(option_name)
return option_values
elif self.config.has_option(option_name):
return self.config.get(option_name)
return
def rpc_config_set(self, options):
"""
Set options in the server's configuration. Any changes to the
server's configuration are not written to disk.
:param dict options: A dictionary of option names and values
"""
for option_name, option_value in options.items():
self.config.set(option_name, option_value)
return
def rpc_campaign_new(self, name):
"""
Create a new King Phisher campaign and initialize the database
information.
:param str name: The new campaign's name.
:return: The ID of the new campaign.
:rtype: int
"""
session = db_manager.Session()
campaign = db_models.Campaign(name=name, user_id=self.basic_auth_user)
session.add(campaign)
session.commit()
return campaign.id
def rpc_campaign_alerts_is_subscribed(self, campaign_id):
"""
Check if the user is subscribed to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
:return: The alert subscription status.
:rtype: bool
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
result = query.count()
session.close()
return result
def rpc_campaign_alerts_subscribe(self, campaign_id):
"""
Subscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
if query.count() == 0:
subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username)
session.add(subscription)
session.commit()
session.close()
return
def rpc_campaign_alerts_unsubscribe(self, campaign_id):
"""
Unsubscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
subscription = query.first()
if subscription:
session.delete(subscription)
session.commit()
session.close()
return
def rpc_campaign_landing_page_new(self, campaign_id, hostname, page):
|
def rpc_campaign_message_new(self, campaign_id, email_id, target_email, company_name, first_name, last_name):
"""
Record a message that has been sent as part of a campaign. These
details can be retrieved later for value substitution in template
pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str company_name: The company name value for the message.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
"""
session = db_manager.Session()
message = db_models.Message()
message.id = email_id
message.campaign_id = campaign_id
message.target_email = target_email
message.company_name = company_name
message.first_name = first_name
message.last_name = last_name
session.add(message)
session.commit()
session.close()
return
def rpc_campaign_delete(self, campaign_id):
"""
Remove a campaign from the database and delete all associated
information with it.
.. warning::
This action can not be reversed and there is no confirmation before it
takes place.
"""
session = db_manager.Session()
session.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id))
session.commit()
session.close()
return
def rpc_database_count_rows(self, *args):
"""
Get a count of the rows in the specified table where the search
criteria matches.
:return: The number of matching rows.
:rtype
|
"""
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The VHOST for the request.
:param str page: The request resource.
"""
page = page.lstrip('/')
session = db_manager.Session()
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)
if query.count() == 0:
landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)
session.add(landing_page)
session.commit()
session.close()
return
|
identifier_body
|
server_rpc.py
|
_version
return vinfo
def rpc_config_get(self, option_name):
"""
Retrieve a value from the server's configuration.
:param str option_name: The name of the configuration option.
:return: The option's value.
"""
if isinstance(option_name, (list, tuple)):
option_names = option_name
option_values = {}
for option_name in option_names:
if self.config.has_option(option_name):
option_values[option_name] = self.config.get(option_name)
return option_values
elif self.config.has_option(option_name):
return self.config.get(option_name)
return
def rpc_config_set(self, options):
"""
Set options in the server's configuration. Any changes to the
server's configuration are not written to disk.
:param dict options: A dictionary of option names and values
"""
for option_name, option_value in options.items():
self.config.set(option_name, option_value)
return
def rpc_campaign_new(self, name):
"""
Create a new King Phisher campaign and initialize the database
information.
:param str name: The new campaign's name.
:return: The ID of the new campaign.
:rtype: int
"""
session = db_manager.Session()
campaign = db_models.Campaign(name=name, user_id=self.basic_auth_user)
session.add(campaign)
session.commit()
return campaign.id
def rpc_campaign_alerts_is_subscribed(self, campaign_id):
"""
Check if the user is subscribed to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
:return: The alert subscription status.
:rtype: bool
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
result = query.count()
session.close()
return result
def rpc_campaign_alerts_subscribe(self, campaign_id):
"""
Subscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
if query.count() == 0:
subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username)
session.add(subscription)
session.commit()
session.close()
return
def rpc_campaign_alerts_unsubscribe(self, campaign_id):
"""
Unsubscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = self.basic_auth_user
session = db_manager.Session()
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
subscription = query.first()
if subscription:
session.delete(subscription)
session.commit()
session.close()
return
def rpc_campaign_landing_page_new(self, campaign_id, hostname, page):
"""
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The VHOST for the request.
:param str page: The request resource.
"""
page = page.lstrip('/')
session = db_manager.Session()
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)
if query.count() == 0:
landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)
session.add(landing_page)
session.commit()
session.close()
return
def rpc_campaign_message_new(self, campaign_id, email_id, target_email, company_name, first_name, last_name):
"""
Record a message that has been sent as part of a campaign. These
details can be retrieved later for value substitution in template
pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str company_name: The company name value for the message.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
"""
session = db_manager.Session()
message = db_models.Message()
message.id = email_id
message.campaign_id = campaign_id
message.target_email = target_email
message.company_name = company_name
message.first_name = first_name
message.last_name = last_name
session.add(message)
session.commit()
session.close()
return
def rpc_campaign_delete(self, campaign_id):
"""
Remove a campaign from the database and delete all associated
information with it.
.. warning::
This action can not be reversed and there is no confirmation before it
takes place.
"""
session = db_manager.Session()
session.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id))
session.commit()
session.close()
return
def rpc_database_count_rows(self, *args):
"""
Get a count of the rows in the specified table where the search
criteria matches.
:return: The number of matching rows.
:rtype: int
"""
args = list(args)
fields = self.path.split('/')[1:-2]
assert len(fields) == len(args)
table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2])
assert table
session = db_manager.Session()
query = session.query(table)
query = query.filter_by(**dict(zip((f + '_id' for f in fields), args)))
result = query.count()
session.close()
return result
def rpc_database_get_rows(self, *args):
"""
Retrieve the rows from the specified table where the search
criteria matches.
:return: A dictionary with columns and rows keys.
:rtype: dict
"""
args = list(args)
offset = 0
fields = self.path.split('/')[1:-2]
if len(args) == (len(fields) + 1):
offset = (args.pop() * VIEW_ROW_COUNT)
assert len(fields) == len(args)
table_name = self.path.split('/')[-2]
table = DATABASE_TABLE_OBJECTS.get(table_name)
assert table
# it's critical that the columns are in the order that the client is expecting
columns = DATABASE_TABLES[table_name]
rows = []
session = db_manager.Session()
query = session.query(table)
query = query.filter_by(**dict(zip((f + '_id' for f in fields), args)))
for row in query[offset:offset + VIEW_ROW_COUNT]:
rows.append([getattr(row, c) for c in columns])
session.close()
if not len(rows):
return None
return {'columns': columns, 'rows': rows}
def rpc_database_delete_row_by_id(self, row_id):
"""
Delete a row from a table with the specified value in the id column.
:param row_id: The id value.
"""
table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2])
assert table
session = db_manager.Session()
try:
session.delete(db_manager.get_row_by_id(session, table, row_id))
session.commit()
finally:
session.close()
return
def rpc_database_delete_rows_by_id(self, row_ids):
"""
Delete multiple rows from a table with the specified values in the id
column. If a row id specified in *row_ids* does not exist, then it will
be skipped and no error will be thrown.
:param list row_ids: The row ids to delete.
:return: The row ids that were deleted.
:rtype: list
"""
table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-3])
assert table
deleted_rows = []
session = db_manager.Session()
try:
for row_id in row_ids:
row = db_manager.get_row_by_id(session, table, row_id)
if not row:
continue
session.delete(row)
deleted_rows.append(row_id)
session.commit()
finally:
session.close()
return deleted_rows
def rpc_database_get_row_by_id(self, row_id):
"""
Retrieve a row from a given table with the specified value in the
id column.
:param row_id: The id value.
:return: The specified row data.
:rtype: dict
"""
table_name = self.path.split('/')[-2]
table = DATABASE_TABLE_OBJECTS.get(table_name)
assert table
columns = DATABASE_TABLES[table_name]
session = db_manager.Session()
row = db_manager.get_row_by_id(session, table, row_id)
if row:
row = dict(zip(columns, (getattr(row, c) for c in columns)))
session.close()
return row
def
|
rpc_database_insert_row
|
identifier_name
|
|
server.rs
|
server: bmrng::RequestSender<ServerEvent, Packet>,
// server: broadcast::Sender<ServerEvent>,
/// Event sender to Socket instances. cloned and passed over
client: mpsc::Sender<SocketEvent>,
}
#[derive(Debug)]
pub enum ServerState {
Unsubscribed {
socket_event_receiver: mpsc::Receiver<SocketEvent>,
engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>,
},
Subscribed,
}
pub struct Server<A: 'static + Adapter> {
state: Arc<Mutex<ServerState>>,
// TODO: don't use a mutex here, instead have an internal socket state
clients: Arc<DashMap<String, Arc<Socket<A>>>>,
event_senders: EventSenders,
// TODO: ping timeout handler EngineIoSocketTimeoutHandler
pub options: ServerOptions,
}
impl Default for ServerOptions {
fn default() -> Self {
ServerOptions {
ping_timeout: 5000,
ping_interval: 25000,
upgrade_timeout: 10000,
transports: vec![TransportKind::WebSocket, TransportKind::Polling],
allow_upgrades: true,
initial_packet: None,
cookie: Some(CookieOptions::default()),
// allow_request: None,
buffer_factor: 2,
}
}
}
impl Default for CookieOptions {
fn default() -> Self {
CookieOptions {
name: "io".to_owned(),
path: "/".to_owned(),
http_only: true,
}
}
}
#[derive(Display, Debug, Clone, PartialEq)]
pub enum ServerEvent {
/// Socket ID
Connection {
connection_id: String,
},
Close {
connection_id: String,
reason: SocketCloseReason,
},
Flush {
connection_id: String,
},
Drain {
connection_id: String,
},
Message {
connection_id: String,
context: Arc<RequestContext>,
data: PacketData,
},
Error {
connection_id: String,
},
}
impl<A: 'static + Adapter> Server<A> {
pub fn new(options: ServerOptions) -> Self {
// To listen events from socket instances
let (client_event_sender, client_event_receiver) =
mpsc::channel(options.buffer_factor * BUFFER_CONST);
// To send events to the owner of this Server instance
let (server_event_sender, server_event_receiver) =
bmrng::channel(options.buffer_factor * BUFFER_CONST);
Server {
state: Arc::new(Mutex::new(ServerState::Unsubscribed {
socket_event_receiver: client_event_receiver,
engine_event_receiver: server_event_receiver,
})),
clients: Arc::new(DashMap::new()),
event_senders: EventSenders {
server: server_event_sender,
client: client_event_sender,
},
options,
}
}
pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet>
|
pub fn try_subscribe(
&self,
) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> {
let mut state = self.state.lock().unwrap();
let old_state = std::mem::replace(&mut *state, ServerState::Subscribed);
match old_state {
ServerState::Subscribed => Err(AlreadySubscribedError),
ServerState::Unsubscribed {
socket_event_receiver,
engine_event_receiver,
} => {
// First time calling subscribe, also start listening events from `Socket` instances
self.subscribe_to_socket_events(socket_event_receiver);
Ok(engine_event_receiver)
}
}
// TODO: handle shutdown properly by receiving a shutdown signal
// sending it to socket instances.
}
pub async fn close(&self) {
// TODO: consider sending signals or dropping channels instead of closing them like this?
// TODO: or drop the whole thing. The server, the sockets, everything.
todo!();
// for socket in self.clients.iter() {
// socket.value().close(true);
// }
}
pub async fn close_socket(&self, connection_id: &str) {
if let Some((_key, socket)) = self.clients.remove(connection_id) {
// TODO: convert this to drop
todo!();
// socket.close(true);
}
}
// TODO: consider converting ack callbacks into optional async Results?
// `connection_id` is an owned string just because of a Rust compiler issue.
pub async fn send_packet_with_ack(
&self,
connection_id: String,
packet: Packet,
callback: Option<Callback>,
) -> Result<(), SendPacketError> {
match self.clients.get(&connection_id) {
Some(client) => Ok(client.send_packet(packet, None).await),
None => Err(SendPacketError::UnknownConnectionId),
}
}
pub async fn send_packet(
&self,
connection_id: String,
packet: Packet,
) -> Result<(), SendPacketError> {
match self.clients.get(&connection_id) {
Some(client) => Ok(client.send_packet(packet, None).await),
None => Err(SendPacketError::UnknownConnectionId),
}
}
pub async fn handle_request(
&self,
context: RequestContext,
body: Option<A::Body>,
) -> Result<A::Response, ServerError> {
let context = Arc::new(context);
let sid_ref = context.query.get("sid");
let sid = sid_ref.map(|s| s.to_owned());
self.verify_request(sid_ref, false, context.transport_kind, context.http_method)
.await?;
if let Some(sid) = sid {
let client = self.get_client_or_error(&sid)?;
let response = client.handle_polling_request(context.clone(), body).await?;
Ok(response)
} else {
let (sid, response) = self.handshake(context, HandshakeData::Polling).await?;
Ok(response)
}
}
/// Akin to `onWebSocket` from engine.io js
// TODO: handle errors, socket closure etc.
pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) {
let context = Arc::new(context);
let sid_ref = context.query.get("sid");
let sid = sid_ref.map(|s| s.to_owned());
if let Some(sid) = sid {
// TODO: don't panic
let client = self.get_client_or_error(&sid).expect("TODO: fix this");
client.maybe_upgrade(context, todo!());
// TODO: implement this!
// let client =
// TODO: call socket.maybe_upgrade()
} else {
self.handshake(context, HandshakeData::WebSocket { socket })
.await;
todo!();
}
}
pub async fn verify_request(
&self,
sid: Option<&String>,
upgrade: bool,
transport_kind: TransportKind,
http_method: HttpMethod,
) -> Result<(), ServerError> {
if let Some(sid) = sid {
let client = self.clients.get(sid);
if let Some(client) = client {
let client_transport_kind = client.get_transport_kind();
if !upgrade && Some(transport_kind) != client_transport_kind {
return Err(ServerError::BadRequest);
}
} else {
return Err(ServerError::UnknownSid);
}
} else {
if http_method != HttpMethod::Get {
return Err(ServerError::BadHandshakeMethod);
}
// FIXME: fix allow_request calls
/*if let Some(validator) = &self.options.allow_request {
// FIXME: pass some request parameters to this validator
// to make it useful
let valid = validator();
if !valid {
return Err(ServerError::BadRequest);
}
}*/
}
Ok(())
}
/// Generate a new ID for a client.
/// Note: This generates IDs in a different format from the original JS
/// engine.io implementation, which uses a library called
/// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem
/// to guarantee uniqueness.
pub fn generate_id() -> String {
Uuid::new_v4().to_hyphenated().to_string()
}
/// Returns the new client ID
pub async fn handshake(
&self,
context: Arc<RequestContext>,
data: HandshakeData<A::WsHandle>,
) -> Result<(String, A::Response), ServerError> {
let sid = Self::generate_id();
let supports_binary = !context.query.contains_key("b64");
let jsonp = !supports_binary && !context.query.contains_key("j");
let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options(
&self.options.cookie,
sid.clone(),
)));
let transport_create_data = match data {
HandshakeData::Polling => TransportCreateData::Polling { jsonp },
HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket {
supports_binary,
socket,
},
};
let socket = Arc::new(Socket::new(
sid.clone(),
context.clone(),
self.event_senders.client.clone(),
transport_create_data,
));
self.clients.insert(sid.clone(), socket.clone());
socket.open(&self.options).await;
// TODO: send
|
{
self.try_subscribe()
.expect("Already subscribed to engine_io_server::Server")
}
|
identifier_body
|
server.rs
|
server: bmrng::RequestSender<ServerEvent, Packet>,
// server: broadcast::Sender<ServerEvent>,
/// Event sender to Socket instances. cloned and passed over
client: mpsc::Sender<SocketEvent>,
}
#[derive(Debug)]
pub enum ServerState {
Unsubscribed {
socket_event_receiver: mpsc::Receiver<SocketEvent>,
engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>,
},
Subscribed,
}
pub struct Server<A: 'static + Adapter> {
state: Arc<Mutex<ServerState>>,
// TODO: don't use a mutex here, instead have an internal socket state
clients: Arc<DashMap<String, Arc<Socket<A>>>>,
event_senders: EventSenders,
// TODO: ping timeout handler EngineIoSocketTimeoutHandler
pub options: ServerOptions,
}
impl Default for ServerOptions {
fn default() -> Self {
ServerOptions {
ping_timeout: 5000,
ping_interval: 25000,
upgrade_timeout: 10000,
transports: vec![TransportKind::WebSocket, TransportKind::Polling],
allow_upgrades: true,
initial_packet: None,
cookie: Some(CookieOptions::default()),
// allow_request: None,
buffer_factor: 2,
}
}
}
impl Default for CookieOptions {
fn default() -> Self {
CookieOptions {
name: "io".to_owned(),
path: "/".to_owned(),
http_only: true,
}
}
}
#[derive(Display, Debug, Clone, PartialEq)]
pub enum ServerEvent {
/// Socket ID
Connection {
connection_id: String,
},
Close {
connection_id: String,
reason: SocketCloseReason,
},
Flush {
connection_id: String,
},
Drain {
connection_id: String,
},
Message {
connection_id: String,
context: Arc<RequestContext>,
data: PacketData,
},
Error {
connection_id: String,
},
}
impl<A: 'static + Adapter> Server<A> {
pub fn new(options: ServerOptions) -> Self {
// To listen events from socket instances
let (client_event_sender, client_event_receiver) =
mpsc::channel(options.buffer_factor * BUFFER_CONST);
// To send events to the owner of this Server instance
let (server_event_sender, server_event_receiver) =
bmrng::channel(options.buffer_factor * BUFFER_CONST);
Server {
state: Arc::new(Mutex::new(ServerState::Unsubscribed {
socket_event_receiver: client_event_receiver,
engine_event_receiver: server_event_receiver,
})),
clients: Arc::new(DashMap::new()),
event_senders: EventSenders {
server: server_event_sender,
client: client_event_sender,
},
options,
}
}
pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet> {
self.try_subscribe()
.expect("Already subscribed to engine_io_server::Server")
}
pub fn try_subscribe(
&self,
) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> {
let mut state = self.state.lock().unwrap();
let old_state = std::mem::replace(&mut *state, ServerState::Subscribed);
match old_state {
ServerState::Subscribed => Err(AlreadySubscribedError),
ServerState::Unsubscribed {
socket_event_receiver,
engine_event_receiver,
} => {
// First time calling subscribe, also start listening events from `Socket` instances
self.subscribe_to_socket_events(socket_event_receiver);
Ok(engine_event_receiver)
}
}
// TODO: handle shutdown properly by receiving a shutdown signal
// sending it to socket instances.
}
pub async fn close(&self) {
|
// TODO: or drop the whole thing. The server, the sockets, everything.
todo!();
// for socket in self.clients.iter() {
// socket.value().close(true);
// }
}
pub async fn close_socket(&self, connection_id: &str) {
if let Some((_key, socket)) = self.clients.remove(connection_id) {
// TODO: convert this to drop
todo!();
// socket.close(true);
}
}
// TODO: consider converting ack callbacks into optional async Results?
// `connection_id` is an owned string just because of a Rust compiler issue.
pub async fn send_packet_with_ack(
&self,
connection_id: String,
packet: Packet,
callback: Option<Callback>,
) -> Result<(), SendPacketError> {
match self.clients.get(&connection_id) {
Some(client) => Ok(client.send_packet(packet, None).await),
None => Err(SendPacketError::UnknownConnectionId),
}
}
pub async fn send_packet(
&self,
connection_id: String,
packet: Packet,
) -> Result<(), SendPacketError> {
match self.clients.get(&connection_id) {
Some(client) => Ok(client.send_packet(packet, None).await),
None => Err(SendPacketError::UnknownConnectionId),
}
}
pub async fn handle_request(
&self,
context: RequestContext,
body: Option<A::Body>,
) -> Result<A::Response, ServerError> {
let context = Arc::new(context);
let sid_ref = context.query.get("sid");
let sid = sid_ref.map(|s| s.to_owned());
self.verify_request(sid_ref, false, context.transport_kind, context.http_method)
.await?;
if let Some(sid) = sid {
let client = self.get_client_or_error(&sid)?;
let response = client.handle_polling_request(context.clone(), body).await?;
Ok(response)
} else {
let (sid, response) = self.handshake(context, HandshakeData::Polling).await?;
Ok(response)
}
}
/// Akin to `onWebSocket` from engine.io js
// TODO: handle errors, socket closure etc.
pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) {
let context = Arc::new(context);
let sid_ref = context.query.get("sid");
let sid = sid_ref.map(|s| s.to_owned());
if let Some(sid) = sid {
// TODO: don't panic
let client = self.get_client_or_error(&sid).expect("TODO: fix this");
client.maybe_upgrade(context, todo!());
// TODO: implement this!
// let client =
// TODO: call socket.maybe_upgrade()
} else {
self.handshake(context, HandshakeData::WebSocket { socket })
.await;
todo!();
}
}
pub async fn verify_request(
&self,
sid: Option<&String>,
upgrade: bool,
transport_kind: TransportKind,
http_method: HttpMethod,
) -> Result<(), ServerError> {
if let Some(sid) = sid {
let client = self.clients.get(sid);
if let Some(client) = client {
let client_transport_kind = client.get_transport_kind();
if !upgrade && Some(transport_kind) != client_transport_kind {
return Err(ServerError::BadRequest);
}
} else {
return Err(ServerError::UnknownSid);
}
} else {
if http_method != HttpMethod::Get {
return Err(ServerError::BadHandshakeMethod);
}
// FIXME: fix allow_request calls
/*if let Some(validator) = &self.options.allow_request {
// FIXME: pass some request parameters to this validator
// to make it useful
let valid = validator();
if !valid {
return Err(ServerError::BadRequest);
}
}*/
}
Ok(())
}
/// Generate a new ID for a client.
/// Note: This generates IDs in a different format from the original JS
/// engine.io implementation, which uses a library called
/// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem
/// to guarantee uniqueness.
pub fn generate_id() -> String {
Uuid::new_v4().to_hyphenated().to_string()
}
/// Returns the new client ID
pub async fn handshake(
&self,
context: Arc<RequestContext>,
data: HandshakeData<A::WsHandle>,
) -> Result<(String, A::Response), ServerError> {
let sid = Self::generate_id();
let supports_binary = !context.query.contains_key("b64");
let jsonp = !supports_binary && !context.query.contains_key("j");
let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options(
&self.options.cookie,
sid.clone(),
)));
let transport_create_data = match data {
HandshakeData::Polling => TransportCreateData::Polling { jsonp },
HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket {
supports_binary,
socket,
},
};
let socket = Arc::new(Socket::new(
sid.clone(),
context.clone(),
self.event_senders.client.clone(),
transport_create_data,
));
self.clients.insert(sid.clone(), socket.clone());
socket.open(&self.options).await;
// TODO: send this initial
|
// TODO: consider sending signals or dropping channels instead of closing them like this?
|
random_line_split
|
server.rs
|
server: bmrng::RequestSender<ServerEvent, Packet>,
// server: broadcast::Sender<ServerEvent>,
/// Event sender to Socket instances. cloned and passed over
client: mpsc::Sender<SocketEvent>,
}
#[derive(Debug)]
pub enum ServerState {
Unsubscribed {
socket_event_receiver: mpsc::Receiver<SocketEvent>,
engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>,
},
Subscribed,
}
pub struct Server<A: 'static + Adapter> {
state: Arc<Mutex<ServerState>>,
// TODO: don't use a mutex here, instead have an internal socket state
clients: Arc<DashMap<String, Arc<Socket<A>>>>,
event_senders: EventSenders,
// TODO: ping timeout handler EngineIoSocketTimeoutHandler
pub options: ServerOptions,
}
impl Default for ServerOptions {
fn default() -> Self {
ServerOptions {
ping_timeout: 5000,
ping_interval: 25000,
upgrade_timeout: 10000,
transports: vec![TransportKind::WebSocket, TransportKind::Polling],
allow_upgrades: true,
initial_packet: None,
cookie: Some(CookieOptions::default()),
// allow_request: None,
buffer_factor: 2,
}
}
}
impl Default for CookieOptions {
fn default() -> Self {
CookieOptions {
name: "io".to_owned(),
path: "/".to_owned(),
http_only: true,
}
}
}
#[derive(Display, Debug, Clone, PartialEq)]
pub enum ServerEvent {
/// Socket ID
Connection {
connection_id: String,
},
Close {
connection_id: String,
reason: SocketCloseReason,
},
Flush {
connection_id: String,
},
Drain {
connection_id: String,
},
Message {
connection_id: String,
context: Arc<RequestContext>,
data: PacketData,
},
Error {
connection_id: String,
},
}
impl<A: 'static + Adapter> Server<A> {
pub fn new(options: ServerOptions) -> Self {
// To listen events from socket instances
let (client_event_sender, client_event_receiver) =
mpsc::channel(options.buffer_factor * BUFFER_CONST);
// To send events to the owner of this Server instance
let (server_event_sender, server_event_receiver) =
bmrng::channel(options.buffer_factor * BUFFER_CONST);
Server {
state: Arc::new(Mutex::new(ServerState::Unsubscribed {
socket_event_receiver: client_event_receiver,
engine_event_receiver: server_event_receiver,
})),
clients: Arc::new(DashMap::new()),
event_senders: EventSenders {
server: server_event_sender,
client: client_event_sender,
},
options,
}
}
pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet> {
self.try_subscribe()
.expect("Already subscribed to engine_io_server::Server")
}
pub fn try_subscribe(
&self,
) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> {
let mut state = self.state.lock().unwrap();
let old_state = std::mem::replace(&mut *state, ServerState::Subscribed);
match old_state {
ServerState::Subscribed => Err(AlreadySubscribedError),
ServerState::Unsubscribed {
socket_event_receiver,
engine_event_receiver,
} => {
// First time calling subscribe, also start listening events from `Socket` instances
self.subscribe_to_socket_events(socket_event_receiver);
Ok(engine_event_receiver)
}
}
// TODO: handle shutdown properly by receiving a shutdown signal
// sending it to socket instances.
}
pub async fn close(&self) {
// TODO: consider sending signals or dropping channels instead of closing them like this?
// TODO: or drop the whole thing. The server, the sockets, everything.
todo!();
// for socket in self.clients.iter() {
// socket.value().close(true);
// }
}
pub async fn close_socket(&self, connection_id: &str) {
if let Some((_key, socket)) = self.clients.remove(connection_id) {
// TODO: convert this to drop
todo!();
// socket.close(true);
}
}
// TODO: consider converting ack callbacks into optional async Results?
// `connection_id` is an owned string just because of a Rust compiler issue.
pub async fn send_packet_with_ack(
&self,
connection_id: String,
packet: Packet,
callback: Option<Callback>,
) -> Result<(), SendPacketError> {
match self.clients.get(&connection_id) {
Some(client) => Ok(client.send_packet(packet, None).await),
None => Err(SendPacketError::UnknownConnectionId),
}
}
pub async fn send_packet(
&self,
connection_id: String,
packet: Packet,
) -> Result<(), SendPacketError> {
match self.clients.get(&connection_id) {
Some(client) => Ok(client.send_packet(packet, None).await),
None => Err(SendPacketError::UnknownConnectionId),
}
}
pub async fn handle_request(
&self,
context: RequestContext,
body: Option<A::Body>,
) -> Result<A::Response, ServerError> {
let context = Arc::new(context);
let sid_ref = context.query.get("sid");
let sid = sid_ref.map(|s| s.to_owned());
self.verify_request(sid_ref, false, context.transport_kind, context.http_method)
.await?;
if let Some(sid) = sid {
let client = self.get_client_or_error(&sid)?;
let response = client.handle_polling_request(context.clone(), body).await?;
Ok(response)
} else {
let (sid, response) = self.handshake(context, HandshakeData::Polling).await?;
Ok(response)
}
}
/// Akin to `onWebSocket` from engine.io js
// TODO: handle errors, socket closure etc.
pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) {
let context = Arc::new(context);
let sid_ref = context.query.get("sid");
let sid = sid_ref.map(|s| s.to_owned());
if let Some(sid) = sid {
// TODO: don't panic
let client = self.get_client_or_error(&sid).expect("TODO: fix this");
client.maybe_upgrade(context, todo!());
// TODO: implement this!
// let client =
// TODO: call socket.maybe_upgrade()
} else {
self.handshake(context, HandshakeData::WebSocket { socket })
.await;
todo!();
}
}
pub async fn
|
(
&self,
sid: Option<&String>,
upgrade: bool,
transport_kind: TransportKind,
http_method: HttpMethod,
) -> Result<(), ServerError> {
if let Some(sid) = sid {
let client = self.clients.get(sid);
if let Some(client) = client {
let client_transport_kind = client.get_transport_kind();
if !upgrade && Some(transport_kind) != client_transport_kind {
return Err(ServerError::BadRequest);
}
} else {
return Err(ServerError::UnknownSid);
}
} else {
if http_method != HttpMethod::Get {
return Err(ServerError::BadHandshakeMethod);
}
// FIXME: fix allow_request calls
/*if let Some(validator) = &self.options.allow_request {
// FIXME: pass some request parameters to this validator
// to make it useful
let valid = validator();
if !valid {
return Err(ServerError::BadRequest);
}
}*/
}
Ok(())
}
/// Generate a new ID for a client.
/// Note: This generates IDs in a different format from the original JS
/// engine.io implementation, which uses a library called
/// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem
/// to guarantee uniqueness.
pub fn generate_id() -> String {
Uuid::new_v4().to_hyphenated().to_string()
}
/// Returns the new client ID
pub async fn handshake(
&self,
context: Arc<RequestContext>,
data: HandshakeData<A::WsHandle>,
) -> Result<(String, A::Response), ServerError> {
let sid = Self::generate_id();
let supports_binary = !context.query.contains_key("b64");
let jsonp = !supports_binary && !context.query.contains_key("j");
let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options(
&self.options.cookie,
sid.clone(),
)));
let transport_create_data = match data {
HandshakeData::Polling => TransportCreateData::Polling { jsonp },
HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket {
supports_binary,
socket,
},
};
let socket = Arc::new(Socket::new(
sid.clone(),
context.clone(),
self.event_senders.client.clone(),
transport_create_data,
));
self.clients.insert(sid.clone(), socket.clone());
socket.open(&self.options).await;
// TODO: send this
|
verify_request
|
identifier_name
|
dkg.go
|
.Int, [2]*big.Int, error) {
privateKey, publicKeyG1, err := cloudflare.RandomG1(rand.Reader)
publicKey := bn256.G1ToBigIntArray(publicKeyG1)
return privateKey, publicKey, err
}
// GenerateShares returns encrypted shares, private coefficients, commitments and potentially an error
func GenerateShares(transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, participants ParticipantList, threshold int) ([]*big.Int, []*big.Int, [][2]*big.Int, error)
|
// convert public keys into G1 structs
publicKeyG1s := []*cloudflare.G1{}
for idx := 0; idx < len(participants); idx++ {
participant := participants[idx]
logger.Infof("participants[%v]: %v", idx, participant)
if participant != nil && participant.PublicKey[0] != nil && participant.PublicKey[1] != nil {
publicKeyG1, err := bn256.BigIntArrayToG1(participant.PublicKey)
if err != nil {
return nil, nil, nil, err
}
publicKeyG1s = append(publicKeyG1s, publicKeyG1)
}
}
// check for missing data
if len(publicKeyG1s) != len(participants) {
return nil, nil, nil, fmt.Errorf("only have %v of %v public keys", len(publicKeyG1s), len(participants))
}
if len(privateCoefficients) != threshold+1 {
return nil, nil, nil, fmt.Errorf("only have %v of %v private coefficients", len(privateCoefficients), threshold+1)
}
//
secretsArray, err := cloudflare.GenerateSecretShares(transportPublicKeyG1, privateCoefficients, publicKeyG1s)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to generate secret shares: %v", err)
}
// final encrypted shares
encryptedShares, err := cloudflare.GenerateEncryptedShares(secretsArray, transportPrivateKey, publicKeyG1s)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to generate encrypted shares: %v", err)
}
return encryptedShares, privateCoefficients, commitments, nil
}
// GenerateKeyShare returns G1 key share, G1 proof, G2 key share and potentially an error
func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) {
h1Base, err := cloudflare.HashToG1(h1BaseMessage)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10)
h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)
if firstPrivateCoefficients == nil {
return empty2Big, empty2Big, empty4Big, errors.New("Missing secret value, aka private coefficient[0]")
}
keyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients)
keyShareG1Big := bn256.G1ToBigIntArray(keyShareG1)
// KeyShare G2
h2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1)
keyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients)
keyShareG2Big := bn256.G2ToBigIntArray(keyShareG2)
// PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair
validPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2})
if !validPair {
return empty2Big, empty2Big, empty4Big, errors.New("key shares not a valid pair")
}
// DLEQ Prooof
g1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1)
g1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients)
keyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
// Verify DLEQ before sending
err = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
return keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil
}
// GenerateMasterPublicKey returns the master public key
func GenerateMasterPublicKey(keyShare1s [][2]*big.Int, keyShare2s [][4]*big.Int) ([4]*big.Int, error) {
if len(keyShare1s) != len(keyShare2s) {
return empty4Big, errors.New("len(keyShare1s) != len(keyshare2s)")
}
// Some predefined stuff to setup
h1Base, err := cloudflare.HashToG1(h1BaseMessage)
if err != nil {
return empty4Big, err
}
orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10)
h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)
// Generate master public key
masterPublicKeyG1 := new(cloudflare.G1)
masterPublicKeyG2 := new(cloudflare.G2)
n := len(keyShare1s)
for idx := 0; idx < n; idx++ {
keySharedG1, err := bn256.BigIntArrayToG1(keyShare1s[idx])
if err != nil {
return empty4Big, err
}
masterPublicKeyG1.Add(masterPublicKeyG1, keySharedG1)
keySharedG2, err := bn256.BigIntArrayToG2(keyShare2s[idx])
if err != nil {
return empty4Big, err
}
masterPublicKeyG2.Add(masterPublicKeyG2, keySharedG2)
}
masterPublicKey := bn256.G2ToBigIntArray(masterPublicKeyG2)
validPair := cloudflare.PairingCheck([]*cloudflare.G1{masterPublicKeyG1, h1Base}, []*cloudflare.G2{h2Neg, masterPublicKeyG2})
if !validPair {
return empty4Big, errors.New("invalid pairing for master public key")
}
return masterPublicKey, nil
}
// GenerateGroupKeys returns the group private key, group public key, a signature and potentially an error
func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) {
// setup
n := len(participants)
// build portions of group secret key
publicKeyG1s := make([]*cloudflare.G1, n)
for idx := 0; idx < n; idx++ {
publicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey)
if err != nil {
return nil, empty4Big, empty2Big, fmt.Errorf("error converting public key to g1: %v", err)
}
publicKeyG1s[idx] = publicKeyG1
}
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)
if err != nil {
return nil, empty4Big, empty2Big, fmt.Errorf("error converting transport public key to g1: %v
|
{
// create coefficients (private/public)
privateCoefficients, err := cloudflare.ConstructPrivatePolyCoefs(rand.Reader, threshold)
if err != nil {
return nil, nil, nil, err
}
publicCoefficients := cloudflare.GeneratePublicCoefs(privateCoefficients)
// create commitments
commitments := make([][2]*big.Int, len(publicCoefficients))
for idx, publicCoefficient := range publicCoefficients {
commitments[idx] = bn256.G1ToBigIntArray(publicCoefficient)
}
// secret shares
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)
if err != nil {
return nil, nil, nil, err
}
|
identifier_body
|
dkg.go
|
.Int, [2]*big.Int, error) {
privateKey, publicKeyG1, err := cloudflare.RandomG1(rand.Reader)
publicKey := bn256.G1ToBigIntArray(publicKeyG1)
return privateKey, publicKey, err
}
// GenerateShares returns encrypted shares, private coefficients, commitments and potentially an error
func GenerateShares(transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, participants ParticipantList, threshold int) ([]*big.Int, []*big.Int, [][2]*big.Int, error) {
// create coefficients (private/public)
privateCoefficients, err := cloudflare.ConstructPrivatePolyCoefs(rand.Reader, threshold)
if err != nil {
return nil, nil, nil, err
}
publicCoefficients := cloudflare.GeneratePublicCoefs(privateCoefficients)
// create commitments
commitments := make([][2]*big.Int, len(publicCoefficients))
for idx, publicCoefficient := range publicCoefficients {
commitments[idx] = bn256.G1ToBigIntArray(publicCoefficient)
}
// secret shares
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)
if err != nil {
return nil, nil, nil, err
}
// convert public keys into G1 structs
publicKeyG1s := []*cloudflare.G1{}
for idx := 0; idx < len(participants); idx++ {
participant := participants[idx]
logger.Infof("participants[%v]: %v", idx, participant)
if participant != nil && participant.PublicKey[0] != nil && participant.PublicKey[1] != nil {
publicKeyG1, err := bn256.BigIntArrayToG1(participant.PublicKey)
if err != nil {
return nil, nil, nil, err
}
publicKeyG1s = append(publicKeyG1s, publicKeyG1)
}
}
// check for missing data
if len(publicKeyG1s) != len(participants) {
return nil, nil, nil, fmt.Errorf("only have %v of %v public keys", len(publicKeyG1s), len(participants))
}
if len(privateCoefficients) != threshold+1 {
return nil, nil, nil, fmt.Errorf("only have %v of %v private coefficients", len(privateCoefficients), threshold+1)
}
//
secretsArray, err := cloudflare.GenerateSecretShares(transportPublicKeyG1, privateCoefficients, publicKeyG1s)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to generate secret shares: %v", err)
}
// final encrypted shares
encryptedShares, err := cloudflare.GenerateEncryptedShares(secretsArray, transportPrivateKey, publicKeyG1s)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to generate encrypted shares: %v", err)
}
return encryptedShares, privateCoefficients, commitments, nil
}
// GenerateKeyShare returns G1 key share, G1 proof, G2 key share and potentially an error
func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) {
h1Base, err := cloudflare.HashToG1(h1BaseMessage)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10)
h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)
if firstPrivateCoefficients == nil {
return empty2Big, empty2Big, empty4Big, errors.New("Missing secret value, aka private coefficient[0]")
}
keyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients)
keyShareG1Big := bn256.G1ToBigIntArray(keyShareG1)
// KeyShare G2
h2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1)
keyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients)
keyShareG2Big := bn256.G2ToBigIntArray(keyShareG2)
// PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair
validPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2})
if !validPair {
return empty2Big, empty2Big, empty4Big, errors.New("key shares not a valid pair")
}
// DLEQ Prooof
g1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1)
g1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients)
keyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
// Verify DLEQ before sending
err = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
return keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil
}
// GenerateMasterPublicKey returns the master public key
func GenerateMasterPublicKey(keyShare1s [][2]*big.Int, keyShare2s [][4]*big.Int) ([4]*big.Int, error) {
if len(keyShare1s) != len(keyShare2s) {
return empty4Big, errors.New("len(keyShare1s) != len(keyshare2s)")
}
// Some predefined stuff to setup
h1Base, err := cloudflare.HashToG1(h1BaseMessage)
if err != nil {
return empty4Big, err
}
orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10)
h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)
// Generate master public key
masterPublicKeyG1 := new(cloudflare.G1)
masterPublicKeyG2 := new(cloudflare.G2)
n := len(keyShare1s)
for idx := 0; idx < n; idx++ {
keySharedG1, err := bn256.BigIntArrayToG1(keyShare1s[idx])
if err != nil {
return empty4Big, err
}
masterPublicKeyG1.Add(masterPublicKeyG1, keySharedG1)
keySharedG2, err := bn256.BigIntArrayToG2(keyShare2s[idx])
if err != nil {
return empty4Big, err
}
masterPublicKeyG2.Add(masterPublicKeyG2, keySharedG2)
}
masterPublicKey := bn256.G2ToBigIntArray(masterPublicKeyG2)
validPair := cloudflare.PairingCheck([]*cloudflare.G1{masterPublicKeyG1, h1Base}, []*cloudflare.G2{h2Neg, masterPublicKeyG2})
if !validPair {
return empty4Big, errors.New("invalid pairing for master public key")
}
return masterPublicKey, nil
}
// GenerateGroupKeys returns the group private key, group public key, a signature and potentially an error
func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) {
// setup
n := len(participants)
// build portions of group secret key
publicKeyG1s := make([]*cloudflare.G1, n)
for idx := 0; idx < n; idx++
|
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)
if err != nil {
return nil, empty4Big, empty2Big, fmt.Errorf("error converting transport public key to g1: %v
|
{
publicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey)
if err != nil {
return nil, empty4Big, empty2Big, fmt.Errorf("error converting public key to g1: %v", err)
}
publicKeyG1s[idx] = publicKeyG1
}
|
conditional_block
|
dkg.go
|
.Int, [2]*big.Int, error) {
privateKey, publicKeyG1, err := cloudflare.RandomG1(rand.Reader)
publicKey := bn256.G1ToBigIntArray(publicKeyG1)
return privateKey, publicKey, err
}
// GenerateShares returns encrypted shares, private coefficients, commitments and potentially an error
func GenerateShares(transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, participants ParticipantList, threshold int) ([]*big.Int, []*big.Int, [][2]*big.Int, error) {
// create coefficients (private/public)
privateCoefficients, err := cloudflare.ConstructPrivatePolyCoefs(rand.Reader, threshold)
if err != nil {
return nil, nil, nil, err
}
publicCoefficients := cloudflare.GeneratePublicCoefs(privateCoefficients)
// create commitments
commitments := make([][2]*big.Int, len(publicCoefficients))
for idx, publicCoefficient := range publicCoefficients {
commitments[idx] = bn256.G1ToBigIntArray(publicCoefficient)
}
// secret shares
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)
if err != nil {
return nil, nil, nil, err
}
// convert public keys into G1 structs
publicKeyG1s := []*cloudflare.G1{}
for idx := 0; idx < len(participants); idx++ {
participant := participants[idx]
logger.Infof("participants[%v]: %v", idx, participant)
if participant != nil && participant.PublicKey[0] != nil && participant.PublicKey[1] != nil {
publicKeyG1, err := bn256.BigIntArrayToG1(participant.PublicKey)
if err != nil {
return nil, nil, nil, err
}
publicKeyG1s = append(publicKeyG1s, publicKeyG1)
}
}
// check for missing data
if len(publicKeyG1s) != len(participants) {
return nil, nil, nil, fmt.Errorf("only have %v of %v public keys", len(publicKeyG1s), len(participants))
}
if len(privateCoefficients) != threshold+1 {
return nil, nil, nil, fmt.Errorf("only have %v of %v private coefficients", len(privateCoefficients), threshold+1)
}
//
secretsArray, err := cloudflare.GenerateSecretShares(transportPublicKeyG1, privateCoefficients, publicKeyG1s)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to generate secret shares: %v", err)
}
// final encrypted shares
encryptedShares, err := cloudflare.GenerateEncryptedShares(secretsArray, transportPrivateKey, publicKeyG1s)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to generate encrypted shares: %v", err)
}
return encryptedShares, privateCoefficients, commitments, nil
}
// GenerateKeyShare returns G1 key share, G1 proof, G2 key share and potentially an error
func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) {
h1Base, err := cloudflare.HashToG1(h1BaseMessage)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10)
h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)
if firstPrivateCoefficients == nil {
return empty2Big, empty2Big, empty4Big, errors.New("Missing secret value, aka private coefficient[0]")
}
keyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients)
keyShareG1Big := bn256.G1ToBigIntArray(keyShareG1)
// KeyShare G2
h2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1)
keyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients)
keyShareG2Big := bn256.G2ToBigIntArray(keyShareG2)
// PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair
validPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2})
if !validPair {
return empty2Big, empty2Big, empty4Big, errors.New("key shares not a valid pair")
}
// DLEQ Prooof
g1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1)
g1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients)
keyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
// Verify DLEQ before sending
err = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
return keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil
}
// GenerateMasterPublicKey returns the master public key
func GenerateMasterPublicKey(keyShare1s [][2]*big.Int, keyShare2s [][4]*big.Int) ([4]*big.Int, error) {
if len(keyShare1s) != len(keyShare2s) {
return empty4Big, errors.New("len(keyShare1s) != len(keyshare2s)")
}
// Some predefined stuff to setup
h1Base, err := cloudflare.HashToG1(h1BaseMessage)
if err != nil {
return empty4Big, err
}
orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10)
h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)
// Generate master public key
masterPublicKeyG1 := new(cloudflare.G1)
masterPublicKeyG2 := new(cloudflare.G2)
n := len(keyShare1s)
for idx := 0; idx < n; idx++ {
keySharedG1, err := bn256.BigIntArrayToG1(keyShare1s[idx])
if err != nil {
return empty4Big, err
}
masterPublicKeyG1.Add(masterPublicKeyG1, keySharedG1)
keySharedG2, err := bn256.BigIntArrayToG2(keyShare2s[idx])
if err != nil {
return empty4Big, err
}
masterPublicKeyG2.Add(masterPublicKeyG2, keySharedG2)
}
masterPublicKey := bn256.G2ToBigIntArray(masterPublicKeyG2)
validPair := cloudflare.PairingCheck([]*cloudflare.G1{masterPublicKeyG1, h1Base}, []*cloudflare.G2{h2Neg, masterPublicKeyG2})
if !validPair {
return empty4Big, errors.New("invalid pairing for master public key")
}
return masterPublicKey, nil
}
// GenerateGroupKeys returns the group private key, group public key, a signature and potentially an error
func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) {
// setup
n := len(participants)
// build portions of group secret key
publicKeyG1s := make([]*cloudflare.G1, n)
for idx := 0; idx < n; idx++ {
publicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey)
if err != nil {
return nil, empty4Big, empty2Big, fmt.Errorf("error converting public key to g1: %v", err)
}
publicKeyG1s[idx] = publicKeyG1
}
|
if err != nil {
return nil, empty4Big, empty2Big, fmt.Errorf("error converting transport public key to g1: %v", err
|
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)
|
random_line_split
|
dkg.go
|
(i, j int) {
pl[i], pl[j] = pl[j], pl[i]
}
// ThresholdForUserCount returns the threshold user count and k for successful key generation
func ThresholdForUserCount(n int) (int, int) {
k := n / 3
threshold := 2 * k
if (n - 3*k) == 2 {
threshold = threshold + 1
}
return int(threshold), int(k)
}
// InverseArrayForUserCount pre-calculates an inverse array for use by ethereum contracts
func InverseArrayForUserCount(n int) ([]*big.Int, error) {
bigNeg2 := big.NewInt(-2)
orderMinus2 := new(big.Int).Add(cloudflare.Order, bigNeg2)
// Get inverse array; this array is required to help keep gas costs down
// in the smart contract. Modular multiplication is much cheaper than
// modular inversion (expopnentiation).
invArrayBig := make([]*big.Int, n-1)
for idx := 0; idx < n-1; idx++ {
m := big.NewInt(int64(idx + 1))
mInv := new(big.Int).Exp(m, orderMinus2, cloudflare.Order)
// Confirm
res := new(big.Int).Mul(m, mInv)
res.Mod(res, cloudflare.Order)
if res.Cmp(common.Big1) != 0 {
return nil, errors.New("Error when computing inverseArray")
}
invArrayBig[idx] = mInv
}
return invArrayBig, nil
}
// GenerateKeys returns a private key, a public key and potentially an error
func GenerateKeys() (*big.Int, [2]*big.Int, error) {
privateKey, publicKeyG1, err := cloudflare.RandomG1(rand.Reader)
publicKey := bn256.G1ToBigIntArray(publicKeyG1)
return privateKey, publicKey, err
}
// GenerateShares returns encrypted shares, private coefficients, commitments and potentially an error
func GenerateShares(transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, participants ParticipantList, threshold int) ([]*big.Int, []*big.Int, [][2]*big.Int, error) {
// create coefficients (private/public)
privateCoefficients, err := cloudflare.ConstructPrivatePolyCoefs(rand.Reader, threshold)
if err != nil {
return nil, nil, nil, err
}
publicCoefficients := cloudflare.GeneratePublicCoefs(privateCoefficients)
// create commitments
commitments := make([][2]*big.Int, len(publicCoefficients))
for idx, publicCoefficient := range publicCoefficients {
commitments[idx] = bn256.G1ToBigIntArray(publicCoefficient)
}
// secret shares
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)
if err != nil {
return nil, nil, nil, err
}
// convert public keys into G1 structs
publicKeyG1s := []*cloudflare.G1{}
for idx := 0; idx < len(participants); idx++ {
participant := participants[idx]
logger.Infof("participants[%v]: %v", idx, participant)
if participant != nil && participant.PublicKey[0] != nil && participant.PublicKey[1] != nil {
publicKeyG1, err := bn256.BigIntArrayToG1(participant.PublicKey)
if err != nil {
return nil, nil, nil, err
}
publicKeyG1s = append(publicKeyG1s, publicKeyG1)
}
}
// check for missing data
if len(publicKeyG1s) != len(participants) {
return nil, nil, nil, fmt.Errorf("only have %v of %v public keys", len(publicKeyG1s), len(participants))
}
if len(privateCoefficients) != threshold+1 {
return nil, nil, nil, fmt.Errorf("only have %v of %v private coefficients", len(privateCoefficients), threshold+1)
}
//
secretsArray, err := cloudflare.GenerateSecretShares(transportPublicKeyG1, privateCoefficients, publicKeyG1s)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to generate secret shares: %v", err)
}
// final encrypted shares
encryptedShares, err := cloudflare.GenerateEncryptedShares(secretsArray, transportPrivateKey, publicKeyG1s)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to generate encrypted shares: %v", err)
}
return encryptedShares, privateCoefficients, commitments, nil
}
// GenerateKeyShare returns G1 key share, G1 proof, G2 key share and potentially an error
func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) {
h1Base, err := cloudflare.HashToG1(h1BaseMessage)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10)
h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)
if firstPrivateCoefficients == nil {
return empty2Big, empty2Big, empty4Big, errors.New("Missing secret value, aka private coefficient[0]")
}
keyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients)
keyShareG1Big := bn256.G1ToBigIntArray(keyShareG1)
// KeyShare G2
h2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1)
keyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients)
keyShareG2Big := bn256.G2ToBigIntArray(keyShareG2)
// PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair
validPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2})
if !validPair {
return empty2Big, empty2Big, empty4Big, errors.New("key shares not a valid pair")
}
// DLEQ Prooof
g1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1)
g1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients)
keyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
// Verify DLEQ before sending
err = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof)
if err != nil {
return empty2Big, empty2Big, empty4Big, err
}
return keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil
}
// GenerateMasterPublicKey returns the master public key
func GenerateMasterPublicKey(keyShare1s [][2]*big.Int, keyShare2s [][4]*big.Int) ([4]*big.Int, error) {
if len(keyShare1s) != len(keyShare2s) {
return empty4Big, errors.New("len(keyShare1s) != len(keyshare2s)")
}
// Some predefined stuff to setup
h1Base, err := cloudflare.HashToG1(h1BaseMessage)
if err != nil {
return empty4Big, err
}
orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10)
h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)
// Generate master public key
masterPublicKeyG1 := new(cloudflare.G1)
masterPublicKeyG2 := new(cloudflare.G2)
n := len(keyShare1s)
for idx := 0; idx < n; idx++ {
keySharedG1, err := bn256.BigIntArrayToG1(keyShare1s[idx])
if err != nil {
return empty4Big, err
}
masterPublicKeyG1.Add(masterPublicKeyG1, keySharedG1)
keySharedG2, err := bn256.BigIntArrayToG2(keyShare2s[idx])
if err != nil {
return empty4Big, err
}
|
Swap
|
identifier_name
|
|
id.rs
|
pub fn art(name: impl AsRef<str>) -> usize {
let name = name.as_ref().to_lowercase();
match ART.iter().position(|&other| other == name) {
Some(index) => index,
_ => panic!("art '{}' has no id yet", name),
}
}
pub fn villager(name: impl AsRef<str>) -> usize {
let name = name.as_ref().to_lowercase();
match VILLAGERS.iter().position(|&other| other == name) {
Some(index) => index,
_ => panic!("villager '{}' has no id yet", name),
}
}
const BUGS: &[&str] = &[
"common butterfly",
"yellow butterfly",
"tiger butterfly",
"peacock butterfly",
"common bluebottle",
"paper kite butterfly",
"great purple emperor",
"monarch butterfly",
"emperor butterfly",
"agrias butterfly",
"rajah brooke's birdwing",
"queen alexandra's birdwing",
"moth",
"atlas moth",
"madagascan sunset moth",
"long locust",
"migratory locust",
"rice grasshopper",
"grasshopper",
"cricket",
"bell cricket",
"mantis",
"orchid mantis",
"honeybee",
"wasp",
"brown cicada",
"robust cicada",
"giant cicada",
"walker cicada",
"evening cicada",
"cicada shell",
"red dragonfly",
"darner dragonfly",
"banded dragonfly",
"damselfly",
"firefly",
"mole cricket",
"pondskater",
"diving beetle",
"giant water bug",
"stinkbug",
"man-faced stink bug",
"ladybug",
"tiger beetle",
"jewel beetle",
"violin beetle",
"citrus long-horned beetle",
"rosalia batesi beetle",
"blue weevil beetle",
"dung beetle",
"earth-boring dung beetle",
"scarab beetle",
"drone beetle",
"goliath beetle",
"saw stag",
"miyama stag",
"giant stag",
"rainbow stag",
"cyclommatus stag",
"golden stag",
"giraffe stag",
"horned dynastid",
"horned atlas",
"horned elephant",
"horned hercules",
"walking stick",
"walking leaf",
"bagworm",
"ant",
"hermit crab",
"wharf roach",
"fly",
"mosquito",
"flea",
"snail",
"pill bug",
"centipede",
"spider",
"tarantula",
"scorpion",
];
const FISH: &[&str] = &[
"bitterling",
"pale chub",
"crucian carp",
"dace",
"carp",
"koi",
"goldfish",
"pop-eyed goldfish",
"ranchu goldfish",
"killifish",
"crawfish",
"soft-shelled turtle",
"snapping turtle",
"tadpole",
"frog",
"freshwater goby",
"loach",
"catfish",
"giant snakehead",
"bluegill",
"yellow perch",
"black bass",
"tilapia",
"pike",
"pond smelt",
"sweetfish",
"cherry salmon",
"char",
"golden trout",
"stringfish",
"salmon",
"king salmon",
"mitten crab",
"guppy",
"nibble fish",
"angelfish",
"betta",
"neon tetra",
"rainbowfish",
"piranha",
"arowana",
"dorado",
"gar",
"arapaima",
"saddled bichir",
"sturgeon",
"sea butterfly",
"sea horse",
"clown fish",
"surgeonfish",
"butterfly fish",
"napoleonfish",
"zebra turkeyfish",
"blowfish",
"puffer fish",
"anchovy",
"horse mackerel",
"barred knifejaw",
"sea bass",
"red snapper",
"dab",
"olive flounder",
"squid",
"moray eel",
"ribbon eel",
"tuna",
"blue marlin",
"giant trevally",
"mahi-mahi",
"ocean sunfish",
"ray",
"saw shark",
"hammerhead shark",
"great white shark",
"whale shark",
"suckerfish",
"football fish",
"oarfish",
"barreleye",
"coelacanth",
];
const FOSSILS: &[&str] = &[
"acanthostega",
"amber",
"ammonite",
"anomalocaris",
"archaeopteryx",
"australopith",
"coprolite",
"dinosaur track",
"dunkleosteus",
"eusthenopteron",
"juramaia",
"myllokunmingia",
"shark-tooth pattern",
"trilobite",
"ankylo skull",
"ankylo torso",
"ankylo tail",
"archelon skull",
"archelon tail",
"brachio skull",
"brachio chest",
"brachio pelvis",
"brachio tail",
"deinony torso",
"deinony tail",
"dimetrodon skull",
"dimetrodon torso",
"diplo skull",
"diplo neck",
"diplo chest",
"diplo pelvis",
"diplo tail",
"diplo tail tip",
"iguanodon skull",
"iguanodon torso",
"iguanodon tail",
"mammoth skull",
"mammoth torso",
"megacero skull",
"megacero torso",
"megacero tail",
"left megalo side",
"right megalo side",
"ophthalmo skull",
"ophthalmo torso",
"pachysaurus skull",
"pachysaurus tail",
"parasaur skull",
"parasaur torso",
"parasaur tail",
"plesio skull",
"plesio tail",
"plesio body",
"right ptera wing",
"ptera body",
"left ptera wing",
"right quetzal wing",
"quetzal torso",
"left quetzal wing",
"sabertooth skull",
"sabertooth tail",
"spino skull",
"spino torso",
"spino tail",
"stego skull",
"stego torso",
"stego tail",
"tricera skull",
"tricera torso",
"tricera tail",
"t. rex skull",
"t. rex torso",
"t. rex tail",
];
const FLOWERS: &[&str] = &[
"red cosmos",
"white cosmos",
"yellow cosmos",
"pink cosmos",
"orange cosmos",
"black cosmos",
"white tulips",
"red tulips",
"yellow tulips",
"pink tulips",
"orange tulips",
"purple tulips",
"black tulips",
"yellow pansies",
"red pansies",
"white pansies",
"orange pansies",
"purple pansies",
"blue pansies",
"white roses",
"red roses",
"yellow roses",
"pink roses",
"orange roses",
"purple roses",
"black roses",
"blue roses",
"gold roses",
"white lilies",
"red lilies",
"yellow lilies",
"pink lilies",
"orange lilies",
"black lilies",
"white windflowers",
"orange windflowers",
"red windflowers",
"blue windflowers",
"pink windflowers",
"purple windflowers",
"white hyacinths",
"yellow hyacinths",
"red hyacinths",
"pink hyacinths",
"orange hyacinths",
"blue hyacinths",
"purple hyacinths",
"white mums",
"yellow mums",
"red mums",
"purple mums",
"pink mums",
"green mums",
];
const ART: &[&str] = &[
"robust statue",
"rock-head statue",
|
{
let name = name.as_ref().to_lowercase();
match FLOWERS.iter().position(|&other| other == name) {
Some(index) => index,
_ => panic!("flower '{}' has no id yet", name),
}
}
|
identifier_body
|
|
id.rs
|
(name: impl AsRef<str>) -> usize {
let name = name.as_ref().to_lowercase();
match ART.iter().position(|&other| other == name) {
Some(index) => index,
_ => panic!("art '{}' has no id yet", name),
}
}
pub fn villager(name: impl AsRef<str>) -> usize {
let name = name.as_ref().to_lowercase();
match VILLAGERS.iter().position(|&other| other == name) {
Some(index) => index,
_ => panic!("villager '{}' has no id yet", name),
}
}
const BUGS: &[&str] = &[
"common butterfly",
"yellow butterfly",
"tiger butterfly",
"peacock butterfly",
"common bluebottle",
"paper kite butterfly",
"great purple emperor",
"monarch butterfly",
"emperor butterfly",
"agrias butterfly",
"rajah brooke's birdwing",
"queen alexandra's birdwing",
"moth",
"atlas moth",
"madagascan sunset moth",
"long locust",
"migratory locust",
"rice grasshopper",
"grasshopper",
"cricket",
"bell cricket",
"mantis",
"orchid mantis",
"honeybee",
"wasp",
"brown cicada",
"robust cicada",
"giant cicada",
"walker cicada",
"evening cicada",
"cicada shell",
"red dragonfly",
"darner dragonfly",
"banded dragonfly",
"damselfly",
"firefly",
"mole cricket",
"pondskater",
"diving beetle",
"giant water bug",
"stinkbug",
"man-faced stink bug",
"ladybug",
"tiger beetle",
"jewel beetle",
"violin beetle",
"citrus long-horned beetle",
"rosalia batesi beetle",
"blue weevil beetle",
"dung beetle",
"earth-boring dung beetle",
"scarab beetle",
"drone beetle",
"goliath beetle",
"saw stag",
"miyama stag",
"giant stag",
"rainbow stag",
"cyclommatus stag",
"golden stag",
"giraffe stag",
"horned dynastid",
"horned atlas",
"horned elephant",
"horned hercules",
"walking stick",
"walking leaf",
"bagworm",
"ant",
"hermit crab",
"wharf roach",
"fly",
"mosquito",
"flea",
"snail",
"pill bug",
"centipede",
"spider",
"tarantula",
"scorpion",
];
const FISH: &[&str] = &[
"bitterling",
"pale chub",
"crucian carp",
"dace",
"carp",
"koi",
"goldfish",
"pop-eyed goldfish",
"ranchu goldfish",
"killifish",
"crawfish",
"soft-shelled turtle",
"snapping turtle",
"tadpole",
"frog",
"freshwater goby",
"loach",
"catfish",
"giant snakehead",
"bluegill",
"yellow perch",
"black bass",
"tilapia",
"pike",
"pond smelt",
"sweetfish",
"cherry salmon",
"char",
"golden trout",
"stringfish",
"salmon",
"king salmon",
"mitten crab",
"guppy",
"nibble fish",
"angelfish",
"betta",
"neon tetra",
"rainbowfish",
"piranha",
"arowana",
"dorado",
"gar",
"arapaima",
"saddled bichir",
"sturgeon",
"sea butterfly",
"sea horse",
"clown fish",
"surgeonfish",
"butterfly fish",
"napoleonfish",
"zebra turkeyfish",
"blowfish",
"puffer fish",
"anchovy",
"horse mackerel",
"barred knifejaw",
"sea bass",
"red snapper",
"dab",
"olive flounder",
"squid",
"moray eel",
"ribbon eel",
"tuna",
"blue marlin",
"giant trevally",
"mahi-mahi",
"ocean sunfish",
"ray",
"saw shark",
"hammerhead shark",
"great white shark",
"whale shark",
"suckerfish",
"football fish",
"oarfish",
"barreleye",
"coelacanth",
];
const FOSSILS: &[&str] = &[
"acanthostega",
"amber",
"ammonite",
"anomalocaris",
"archaeopteryx",
"australopith",
"coprolite",
"dinosaur track",
"dunkleosteus",
"eusthenopteron",
"juramaia",
"myllokunmingia",
"shark-tooth pattern",
"trilobite",
"ankylo skull",
"ankylo torso",
"ankylo tail",
"archelon skull",
"archelon tail",
"brachio skull",
"brachio chest",
"brachio pelvis",
"brachio tail",
"deinony torso",
"deinony tail",
"dimetrodon skull",
"dimetrodon torso",
"diplo skull",
"diplo neck",
"diplo chest",
"diplo pelvis",
"diplo tail",
"diplo tail tip",
"iguanodon skull",
"iguanodon torso",
"iguanodon tail",
"mammoth skull",
"mammoth torso",
"megacero skull",
"megacero torso",
"megacero tail",
"left megalo side",
"right megalo side",
"ophthalmo skull",
"ophthalmo torso",
"pachysaurus skull",
"pachysaurus tail",
"parasaur skull",
"parasaur torso",
"parasaur tail",
"plesio skull",
"plesio tail",
"plesio body",
"right ptera wing",
"ptera body",
"left ptera wing",
"right quetzal wing",
"quetzal torso",
"left quetzal wing",
"sabertooth skull",
"sabertooth tail",
"spino skull",
"spino torso",
"spino tail",
"stego skull",
"stego torso",
"stego tail",
"tricera skull",
"tricera torso",
"tricera tail",
"t. rex skull",
"t. rex torso",
"t. rex tail",
];
const FLOWERS: &[&str] = &[
"red cosmos",
"white cosmos",
"yellow cosmos",
"pink cosmos",
"orange cosmos",
"black cosmos",
"white tulips",
"red tulips",
"yellow tulips",
"pink tulips",
"orange tulips",
"purple tulips",
"black tulips",
"yellow pansies",
"red pansies",
"white pansies",
"orange pansies",
"purple pansies",
"blue pansies",
"white roses",
"red roses",
"yellow roses",
"pink roses",
"orange roses",
"purple roses",
"black roses",
"blue roses",
"gold roses",
"white lilies",
"red lilies",
"yellow lilies",
"pink lilies",
"orange lilies",
"black lilies",
"white windflowers",
"orange windflowers",
"red windflowers",
"blue windflowers",
"pink windflowers",
"purple windflowers",
"white hyacinths",
"yellow hyacinths",
"red hyacinths",
"pink hyacinths",
"orange hyacinths",
"blue hyacinths",
"purple hyacinths",
"white mums",
"yellow mums",
"red mums",
"purple mums",
"pink mums",
"green mums",
];
const ART: &[&str] = &[
"robust statue",
"rock-head statue",
"beautiful statue",
"valiant statue",
"gallant statue",
"mystic statue",
"informative statue",
"warrior statue",
"tremendous statue",
"ancient statue",
"motherly statue
|
art
|
identifier_name
|
|
id.rs
|
",
"earth-boring dung beetle",
"scarab beetle",
"drone beetle",
"goliath beetle",
"saw stag",
"miyama stag",
"giant stag",
"rainbow stag",
"cyclommatus stag",
"golden stag",
"giraffe stag",
"horned dynastid",
"horned atlas",
"horned elephant",
"horned hercules",
"walking stick",
"walking leaf",
"bagworm",
"ant",
"hermit crab",
"wharf roach",
"fly",
"mosquito",
"flea",
"snail",
"pill bug",
"centipede",
"spider",
"tarantula",
"scorpion",
];
const FISH: &[&str] = &[
"bitterling",
"pale chub",
"crucian carp",
"dace",
"carp",
"koi",
"goldfish",
"pop-eyed goldfish",
"ranchu goldfish",
"killifish",
"crawfish",
"soft-shelled turtle",
"snapping turtle",
"tadpole",
"frog",
"freshwater goby",
"loach",
"catfish",
"giant snakehead",
"bluegill",
"yellow perch",
"black bass",
"tilapia",
"pike",
"pond smelt",
"sweetfish",
"cherry salmon",
"char",
"golden trout",
"stringfish",
"salmon",
"king salmon",
"mitten crab",
"guppy",
"nibble fish",
"angelfish",
"betta",
"neon tetra",
"rainbowfish",
"piranha",
"arowana",
"dorado",
"gar",
"arapaima",
"saddled bichir",
"sturgeon",
"sea butterfly",
"sea horse",
"clown fish",
"surgeonfish",
"butterfly fish",
"napoleonfish",
"zebra turkeyfish",
"blowfish",
"puffer fish",
"anchovy",
"horse mackerel",
"barred knifejaw",
"sea bass",
"red snapper",
"dab",
"olive flounder",
"squid",
"moray eel",
"ribbon eel",
"tuna",
"blue marlin",
"giant trevally",
"mahi-mahi",
"ocean sunfish",
"ray",
"saw shark",
"hammerhead shark",
"great white shark",
"whale shark",
"suckerfish",
"football fish",
"oarfish",
"barreleye",
"coelacanth",
];
const FOSSILS: &[&str] = &[
"acanthostega",
"amber",
"ammonite",
"anomalocaris",
"archaeopteryx",
"australopith",
"coprolite",
"dinosaur track",
"dunkleosteus",
"eusthenopteron",
"juramaia",
"myllokunmingia",
"shark-tooth pattern",
"trilobite",
"ankylo skull",
"ankylo torso",
"ankylo tail",
"archelon skull",
"archelon tail",
"brachio skull",
"brachio chest",
"brachio pelvis",
"brachio tail",
"deinony torso",
"deinony tail",
"dimetrodon skull",
"dimetrodon torso",
"diplo skull",
"diplo neck",
"diplo chest",
"diplo pelvis",
"diplo tail",
"diplo tail tip",
"iguanodon skull",
"iguanodon torso",
"iguanodon tail",
"mammoth skull",
"mammoth torso",
"megacero skull",
"megacero torso",
"megacero tail",
"left megalo side",
"right megalo side",
"ophthalmo skull",
"ophthalmo torso",
"pachysaurus skull",
"pachysaurus tail",
"parasaur skull",
"parasaur torso",
"parasaur tail",
"plesio skull",
"plesio tail",
"plesio body",
"right ptera wing",
"ptera body",
"left ptera wing",
"right quetzal wing",
"quetzal torso",
"left quetzal wing",
"sabertooth skull",
"sabertooth tail",
"spino skull",
"spino torso",
"spino tail",
"stego skull",
"stego torso",
"stego tail",
"tricera skull",
"tricera torso",
"tricera tail",
"t. rex skull",
"t. rex torso",
"t. rex tail",
];
const FLOWERS: &[&str] = &[
"red cosmos",
|
"black cosmos",
"white tulips",
"red tulips",
"yellow tulips",
"pink tulips",
"orange tulips",
"purple tulips",
"black tulips",
"yellow pansies",
"red pansies",
"white pansies",
"orange pansies",
"purple pansies",
"blue pansies",
"white roses",
"red roses",
"yellow roses",
"pink roses",
"orange roses",
"purple roses",
"black roses",
"blue roses",
"gold roses",
"white lilies",
"red lilies",
"yellow lilies",
"pink lilies",
"orange lilies",
"black lilies",
"white windflowers",
"orange windflowers",
"red windflowers",
"blue windflowers",
"pink windflowers",
"purple windflowers",
"white hyacinths",
"yellow hyacinths",
"red hyacinths",
"pink hyacinths",
"orange hyacinths",
"blue hyacinths",
"purple hyacinths",
"white mums",
"yellow mums",
"red mums",
"purple mums",
"pink mums",
"green mums",
];
const ART: &[&str] = &[
"robust statue",
"rock-head statue",
"beautiful statue",
"valiant statue",
"gallant statue",
"mystic statue",
"informative statue",
"warrior statue",
"tremendous statue",
"ancient statue",
"motherly statue",
"familiar statue",
"great statue",
"quaint painting",
"graceful painting",
"famous painting",
"detailed painting",
"basic painting",
"serene painting",
"amazing painting",
"solemn painting",
"scary painting",
"jolly painting",
"wistful painting",
"moving painting",
"wild painting left half",
"wild painting right half",
"scenic painting",
"academic painting",
"common painting",
"flowery painting",
"twinkling painting",
"nice painting",
"moody painting",
"glowing painting",
"perfect painting",
"mysterious painting",
"calm painting",
"proper painting",
"sinking painting",
"worthy painting",
"warm painting",
"dynamic painting",
];
const VILLAGERS: &[&str] = &[
"amelia",
"pierce",
"apollo",
"frank",
"buzz",
"sterling",
"keaton",
"celia",
"avery",
"deli",
"tammi",
"monty",
"nana",
"flip",
"simon",
"elise",
"shari",
"anabelle",
"annalisa",
"snooty",
"pango",
"olaf",
"antonio",
"cyrano",
"beardo",
"chow",
"megan",
"groucho",
"grizzly",
"klaus",
"ike",
"curt",
"tutu",
"nate",
"paula",
"pinky",
"charlise",
"teddy",
"ursala",
"filbert",
"sally",
"cally",
"marshal",
"agent s",
"blaire",
"nibbles",
"sylvana",
"mint",
"hazel",
"tasha",
"pecan",
"peanut",
"
|
"white cosmos",
"yellow cosmos",
"pink cosmos",
"orange cosmos",
|
random_line_split
|
main.rs
|
wing2up = rng.gen_bool(0.5);
for i in 0..count {
let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64);
let xbase = wingw * xp;
let wing1 = rng.gen_range(0.8, 1.1) * wing1m;
let wing2 =
rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 });
let route = shake(
path_subdivide_to_curve(
vec![
(
xbase * spread1 + dx1 + wingw * offset1,
-wingh * 0.5 * wing1,
),
(xbase + dx1 * interp, -wingh * 0.5 * interp * wing1),
(xbase, 0.0),
(xbase + dx2 * interp, wingh * 0.5 * interp * wing2),
(xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2),
],
2,
0.8,
),
shaking,
rng,
);
routes.push(route);
}
// scale, rotate & translate
routes
.iter()
.map(|route| {
route
.iter()
.map(|&p| {
let p = p_r(p, rotation);
(xmul * scale * p.0 + origin.0, scale * p.1 + origin.1)
})
.collect()
})
.collect()
}
fn art(opts: &Opts) -> Vec<Group> {
let height = opts.height;
let width = opts.width;
let pad = opts.pad;
let mut rng = rng_from_seed(opts.seed);
let perlin = Perlin::new();
let mut passage = Passage::new(0.5, width, height);
let passage_threshold = 5;
let min_route = 2;
let peakfactor = rng.gen_range(-0.001, 0.001)
* rng.gen_range(0.0, 1.0)
* rng.gen_range(0.0, 1.0);
let ynoisefactor = rng.gen_range(0.02, 0.1);
let yincr = rng.gen_range(1.5, 3.0);
let amp2 = rng.gen_range(1.0, 12.0);
let precision = rng.gen_range(0.1, 0.3);
let offsetstrategy = rng.gen_range(0, 5);
let mut routes = Vec::new();
let mut cave_spawned = false;
let cave_threshold = rng.gen_range(0.5, 0.9) * height;
let mut cave_initial_pos = Vec::new();
let w = rng.gen_range(0.05, 0.2);
let mut base_y = height * 5.0;
let mut miny = height;
let stopy = rng.gen_range(0.2, 0.5) * height;
let mut height_map: Vec<f64> = Vec::new();
loop {
if miny < stopy {
break;
}
if miny < cave_threshold && !cave_spawned {
cave_spawned = true;
let xfrom = (0.5 - w / 2.0) * width;
let xto = (0.5 + w / 2.0) * width;
let yamp = rng.gen_range(8.0, 24.0);
let mut x = xfrom;
let mut route = Vec::new();
loop {
if x > xto {
break;
}
let xi = (x / precision) as usize;
let ybottom = height_map[xi].min(height - pad);
let ytop = ybottom
- yamp
* (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs()
+ 0.6
* (1.0
- (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0)));
height_map[xi] = ytop;
cave_initial_pos.push((x, ytop));
if (route.len() / 2) % 2 == 0 {
route.push((x, ybottom));
route.push((x, ytop));
} else {
route.push((x, ytop));
route.push((x, ybottom));
}
x += precision;
}
routes.push(route);
}
let mut route = Vec::new();
let mut x = pad;
let mut was_outside = true;
loop {
if x > width - pad {
break;
}
let xv = (4.0 - base_y / height) * (x - width / 2.);
let amp = height * 0.05;
let mut y = base_y;
if offsetstrategy == 0 {
y += amp * peakfactor * xv * xv;
}
y += -amp
* perlin
.get([
//
xv * 0.005,
y * 0.02,
77.
+ opts.seed / 7.3
+ perlin.get([
//
-opts.seed * 7.3,
8.3 + xv * 0.02,
y * 0.1,
]),
])
.abs();
if offsetstrategy == 1 {
y += amp * peakfactor * xv * xv;
}
y += amp2
* amp
* perlin.get([
//
8.3 + xv * 0.01,
88.1 + y * ynoisefactor,
opts.seed * 97.3,
]);
if offsetstrategy == 2 {
y += amp * peakfactor * xv * xv;
}
y += amp
* perlin.get([
//
opts.seed * 9.3 - 77.,
xv * 0.1,
y * 0.5,
])
* perlin
.get([
//
xv * 0.02,
88.1 + y * 0.2,
-opts.seed / 7.7,
])
.min(0.0);
if offsetstrategy == 3 {
y += amp * peakfactor * xv * xv;
}
y += 0.1
* amp
* (1.0 - miny / height)
* perlin.get([
//
66666. + opts.seed * 1.3,
88.3 + xv * 0.5,
88.1 + y * 0.5,
]);
if offsetstrategy == 4 {
y += amp * peakfactor * xv * xv;
}
if y < miny {
miny = y;
}
let mut collides = false;
let xi = (x / precision) as usize;
if xi >= height_map.len() {
height_map.push(y);
} else {
if y > height_map[xi] {
collides = true;
} else {
height_map[xi] = y;
}
}
let inside =
!collides && pad < x && x < width - pad && pad < y && y < height - pad;
if inside && passage.get((x, y)) < passage_threshold {
if was_outside {
if route.len() > min_route {
routes.push(route);
}
route = Vec::new();
}
was_outside = false;
route.push((x, y));
passage.count((x, y));
} else {
was_outside = true;
}
x += precision;
}
if route.len() > min_route {
routes.push(route);
}
base_y -= yincr;
}
let radius = 6.0;
passage.grow_passage(radius);
rng.shuffle(&mut cave_initial_pos);
let mut positions = Vec::new();
for i in 0..rng.gen_range(4, 12) {
if i >= cave_initial_pos.len() - 1 {
break;
}
let initial = cave_initial_pos[i];
let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0);
let mut p = initial;
let amp = 3.0;
let pad = pad * 2.;
loop {
|
if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad {
break;
}
p = (p.0 + amp * a.cos(), p.1 + amp * a.sin());
positions.push(p);
|
random_line_split
|
|
main.rs
|
fn eagle<R: Rng>(
origin: (f64, f64),
scale: f64,
rotation: f64,
xreverse: bool,
rng: &mut R,
) -> Vec<Vec<(f64, f64)>> {
let xmul = if xreverse { -1.0 } else { 1.0 };
let count = 2 + (scale * 3.0) as usize;
let mut routes: Vec<Vec<(f64, f64)>> = Vec::new();
let shaking = scale * 0.1;
// body
let bodyw = 5.0;
let bodyh = 1.5;
let headcompression = rng.gen_range(0.1, 0.5);
let headoff = rng.gen_range(0.1, 0.5);
for i in 0..count {
let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64);
let ybase = bodyh * yp;
let route = shake(
path_subdivide_to_curve(
vec![
(-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase),
(-0.3 * bodyw, ybase),
(0.2 * bodyw, ybase),
(0.45 * bodyw, headcompression * ybase + headoff * bodyh),
],
1,
0.8,
),
shaking,
rng,
);
routes.push(route);
}
let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize;
// wings
let wingw = 1.4;
let wingh = 8.0;
let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0);
let dx2 = if rng.gen_bool(0.8) {
-dx1
} else {
rng.gen_range(-3.0, 3.0)
};
let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0);
let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0);
let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0);
let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0);
let interp = 0.5;
let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0);
let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0);
let wing2up = rng.gen_bool(0.5);
for i in 0..count {
let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64);
let xbase = wingw * xp;
let wing1 = rng.gen_range(0.8, 1.1) * wing1m;
let wing2 =
rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 });
let route = shake(
path_subdivide_to_curve(
vec![
(
xbase * spread1 + dx1 + wingw * offset1,
-wingh * 0.5 * wing1,
),
(xbase + dx1 * interp, -wingh * 0.5 * interp * wing1),
(xbase, 0.0),
(xbase + dx2 * interp, wingh * 0.5 * interp * wing2),
(xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2),
],
2,
0.8,
),
shaking,
rng,
);
routes.push(route);
}
// scale, rotate & translate
routes
.iter()
.map(|route| {
route
.iter()
.map(|&p| {
let p = p_r(p, rotation);
(xmul * scale * p.0 + origin.0, scale * p.1 + origin.1)
})
.collect()
})
.collect()
}
fn art(opts: &Opts) -> Vec<Group> {
let height = opts.height;
let width = opts.width;
let pad = opts.pad;
let mut rng = rng_from_seed(opts.seed);
let perlin = Perlin::new();
let mut passage = Passage::new(0.5, width, height);
let passage_threshold = 5;
let min_route = 2;
let peakfactor = rng.gen_range(-0.001, 0.001)
* rng.gen_range(0.0, 1.0)
* rng.gen_range(0.0, 1.0);
let ynoisefactor = rng.gen_range(0.02, 0.1);
let yincr = rng.gen_range(1.5, 3.0);
let amp2 = rng.gen_range(1.0, 12.0);
let precision = rng.gen_range(0.1, 0.3);
let offsetstrategy = rng.gen_range(0, 5);
let mut routes = Vec::new();
let mut cave_spawned = false;
let cave_threshold = rng.gen_range(0.5, 0.9) * height;
let mut cave_initial_pos = Vec::new();
let w = rng.gen_range(0.05, 0.2);
let mut base_y = height * 5.0;
let mut miny = height;
let stopy = rng.gen_range(0.2, 0.5) * height;
let mut height_map: Vec<f64> = Vec::new();
loop {
if miny < stopy {
break;
}
if miny < cave_threshold && !cave_spawned {
cave_spawned = true;
let xfrom = (0.5 - w / 2.0) * width;
let xto = (0.5 + w / 2.0) * width;
let yamp = rng.gen_range(8.0, 24.0);
let mut x = xfrom;
let mut route = Vec::new();
loop {
if x > xto {
break;
}
let xi = (x / precision) as usize;
let ybottom = height_map[xi].min(height - pad);
let ytop = ybottom
- yamp
* (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs()
+ 0.6
* (1.0
- (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0)));
height_map[xi] = ytop;
cave_initial_pos.push((x, ytop));
if (route.len() / 2) % 2 == 0 {
route.push((x, ybottom));
route.push((x, ytop));
} else {
route.push((x, ytop));
route.push((x, ybottom));
}
x += precision;
}
routes.push(route);
}
let mut route = Vec::new();
let mut x = pad;
let mut was_outside = true;
loop {
if x > width - pad {
break;
}
let xv = (4.0 - base_y / height) * (x - width / 2.);
let amp = height * 0.05;
let mut y = base_y;
if offsetstrategy == 0 {
y += amp * peakfactor * xv * xv;
}
y += -amp
* perlin
.get([
//
xv * 0.005,
y * 0.02,
77.
+ opts.seed / 7.3
+ perlin.get([
//
-opts.seed * 7.3,
8.3 + xv * 0.02,
y * 0.1,
|
{
path
.iter()
.map(|&(x, y)| {
let dx = rng.gen_range(-scale, scale);
let dy = rng.gen_range(-scale, scale);
(x + dx, y + dy)
})
.collect()
}
|
identifier_body
|
|
main.rs
|
{
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "100.0")]
pub width: f64,
#[clap(short, long, default_value = "150.0")]
pub height: f64,
#[clap(short, long, default_value = "5.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
#[clap(short, long, default_value = "0.0")]
pub seed1: f64,
#[clap(short, long, default_value = "0.0")]
pub seed2: f64,
#[clap(short, long, default_value = "0.0")]
pub seed3: f64,
}
fn shake<R: Rng>(
path: Vec<(f64, f64)>,
scale: f64,
rng: &mut R,
) -> Vec<(f64, f64)> {
path
.iter()
.map(|&(x, y)| {
let dx = rng.gen_range(-scale, scale);
let dy = rng.gen_range(-scale, scale);
(x + dx, y + dy)
})
.collect()
}
fn eagle<R: Rng>(
origin: (f64, f64),
scale: f64,
rotation: f64,
xreverse: bool,
rng: &mut R,
) -> Vec<Vec<(f64, f64)>> {
let xmul = if xreverse { -1.0 } else { 1.0 };
let count = 2 + (scale * 3.0) as usize;
let mut routes: Vec<Vec<(f64, f64)>> = Vec::new();
let shaking = scale * 0.1;
// body
let bodyw = 5.0;
let bodyh = 1.5;
let headcompression = rng.gen_range(0.1, 0.5);
let headoff = rng.gen_range(0.1, 0.5);
for i in 0..count {
let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64);
let ybase = bodyh * yp;
let route = shake(
path_subdivide_to_curve(
vec![
(-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase),
(-0.3 * bodyw, ybase),
(0.2 * bodyw, ybase),
(0.45 * bodyw, headcompression * ybase + headoff * bodyh),
],
1,
0.8,
),
shaking,
rng,
);
routes.push(route);
}
let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize;
// wings
let wingw = 1.4;
let wingh = 8.0;
let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0);
let dx2 = if rng.gen_bool(0.8) {
-dx1
} else {
rng.gen_range(-3.0, 3.0)
};
let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0);
let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0);
let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0);
let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0);
let interp = 0.5;
let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0);
let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0);
let wing2up = rng.gen_bool(0.5);
for i in 0..count {
let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64);
let xbase = wingw * xp;
let wing1 = rng.gen_range(0.8, 1.1) * wing1m;
let wing2 =
rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 });
let route = shake(
path_subdivide_to_curve(
vec![
(
xbase * spread1 + dx1 + wingw * offset1,
-wingh * 0.5 * wing1,
),
(xbase + dx1 * interp, -wingh * 0.5 * interp * wing1),
(xbase, 0.0),
(xbase + dx2 * interp, wingh * 0.5 * interp * wing2),
(xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2),
],
2,
0.8,
),
shaking,
rng,
);
routes.push(route);
}
// scale, rotate & translate
routes
.iter()
.map(|route| {
route
.iter()
.map(|&p| {
let p = p_r(p, rotation);
(xmul * scale * p.0 + origin.0, scale * p.1 + origin.1)
})
.collect()
})
.collect()
}
fn art(opts: &Opts) -> Vec<Group> {
let height = opts.height;
let width = opts.width;
let pad = opts.pad;
let mut rng = rng_from_seed(opts.seed);
let perlin = Perlin::new();
let mut passage = Passage::new(0.5, width, height);
let passage_threshold = 5;
let min_route = 2;
let peakfactor = rng.gen_range(-0.001, 0.001)
* rng.gen_range(0.0, 1.0)
* rng.gen_range(0.0, 1.0);
let ynoisefactor = rng.gen_range(0.02, 0.1);
let yincr = rng.gen_range(1.5, 3.0);
let amp2 = rng.gen_range(1.0, 12.0);
let precision = rng.gen_range(0.1, 0.3);
let offsetstrategy = rng.gen_range(0, 5);
let mut routes = Vec::new();
let mut cave_spawned = false;
let cave_threshold = rng.gen_range(0.5, 0.9) * height;
let mut cave_initial_pos = Vec::new();
let w = rng.gen_range(0.05, 0.2);
let mut base_y = height * 5.0;
let mut miny = height;
let stopy = rng.gen_range(0.2, 0.5) * height;
let mut height_map: Vec<f64> = Vec::new();
loop {
if miny < stopy {
break;
}
if miny < cave_threshold && !cave_spawned {
cave_spawned = true;
let xfrom = (0.5 - w / 2.0) * width;
let xto = (0.5 + w / 2.0) * width;
let yamp = rng.gen_range(8.0, 24.0);
let mut x = xfrom;
let mut route = Vec::new();
loop {
if x > xto {
break;
}
let xi = (x / precision) as usize;
let ybottom = height_map[xi].min(height - pad);
let ytop = ybottom
- yamp
* (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs()
+ 0.6
* (1.0
- (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0)));
height_map[xi] = ytop;
cave_initial_pos.push((x, ytop));
if (route.len() / 2) % 2 == 0 {
route.push((x, ybottom));
route
|
Opts
|
identifier_name
|
|
main.rs
|
.0, 1.0);
let dx2 = if rng.gen_bool(0.8)
|
else {
rng.gen_range(-3.0, 3.0)
};
let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0);
let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0);
let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0);
let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0);
let interp = 0.5;
let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0);
let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0);
let wing2up = rng.gen_bool(0.5);
for i in 0..count {
let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64);
let xbase = wingw * xp;
let wing1 = rng.gen_range(0.8, 1.1) * wing1m;
let wing2 =
rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 });
let route = shake(
path_subdivide_to_curve(
vec![
(
xbase * spread1 + dx1 + wingw * offset1,
-wingh * 0.5 * wing1,
),
(xbase + dx1 * interp, -wingh * 0.5 * interp * wing1),
(xbase, 0.0),
(xbase + dx2 * interp, wingh * 0.5 * interp * wing2),
(xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2),
],
2,
0.8,
),
shaking,
rng,
);
routes.push(route);
}
// scale, rotate & translate
routes
.iter()
.map(|route| {
route
.iter()
.map(|&p| {
let p = p_r(p, rotation);
(xmul * scale * p.0 + origin.0, scale * p.1 + origin.1)
})
.collect()
})
.collect()
}
fn art(opts: &Opts) -> Vec<Group> {
let height = opts.height;
let width = opts.width;
let pad = opts.pad;
let mut rng = rng_from_seed(opts.seed);
let perlin = Perlin::new();
let mut passage = Passage::new(0.5, width, height);
let passage_threshold = 5;
let min_route = 2;
let peakfactor = rng.gen_range(-0.001, 0.001)
* rng.gen_range(0.0, 1.0)
* rng.gen_range(0.0, 1.0);
let ynoisefactor = rng.gen_range(0.02, 0.1);
let yincr = rng.gen_range(1.5, 3.0);
let amp2 = rng.gen_range(1.0, 12.0);
let precision = rng.gen_range(0.1, 0.3);
let offsetstrategy = rng.gen_range(0, 5);
let mut routes = Vec::new();
let mut cave_spawned = false;
let cave_threshold = rng.gen_range(0.5, 0.9) * height;
let mut cave_initial_pos = Vec::new();
let w = rng.gen_range(0.05, 0.2);
let mut base_y = height * 5.0;
let mut miny = height;
let stopy = rng.gen_range(0.2, 0.5) * height;
let mut height_map: Vec<f64> = Vec::new();
loop {
if miny < stopy {
break;
}
if miny < cave_threshold && !cave_spawned {
cave_spawned = true;
let xfrom = (0.5 - w / 2.0) * width;
let xto = (0.5 + w / 2.0) * width;
let yamp = rng.gen_range(8.0, 24.0);
let mut x = xfrom;
let mut route = Vec::new();
loop {
if x > xto {
break;
}
let xi = (x / precision) as usize;
let ybottom = height_map[xi].min(height - pad);
let ytop = ybottom
- yamp
* (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs()
+ 0.6
* (1.0
- (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0)));
height_map[xi] = ytop;
cave_initial_pos.push((x, ytop));
if (route.len() / 2) % 2 == 0 {
route.push((x, ybottom));
route.push((x, ytop));
} else {
route.push((x, ytop));
route.push((x, ybottom));
}
x += precision;
}
routes.push(route);
}
let mut route = Vec::new();
let mut x = pad;
let mut was_outside = true;
loop {
if x > width - pad {
break;
}
let xv = (4.0 - base_y / height) * (x - width / 2.);
let amp = height * 0.05;
let mut y = base_y;
if offsetstrategy == 0 {
y += amp * peakfactor * xv * xv;
}
y += -amp
* perlin
.get([
//
xv * 0.005,
y * 0.02,
77.
+ opts.seed / 7.3
+ perlin.get([
//
-opts.seed * 7.3,
8.3 + xv * 0.02,
y * 0.1,
]),
])
.abs();
if offsetstrategy == 1 {
y += amp * peakfactor * xv * xv;
}
y += amp2
* amp
* perlin.get([
//
8.3 + xv * 0.01,
88.1 + y * ynoisefactor,
opts.seed * 97.3,
]);
if offsetstrategy == 2 {
y += amp * peakfactor * xv * xv;
}
y += amp
* perlin.get([
//
opts.seed * 9.3 - 77.,
xv * 0.1,
y * 0.5,
])
* perlin
.get([
//
xv * 0.02,
88.1 + y * 0.2,
-opts.seed / 7.7,
])
.min(0.0);
if offsetstrategy == 3 {
y += amp * peakfactor * xv * xv;
}
y += 0.1
* amp
* (1.0 - miny / height)
* perlin.get([
//
66666. + opts.seed * 1.3,
88.3 + xv * 0.5,
88.1 + y * 0.5,
]);
if offsetstrategy == 4 {
y += amp * peakfactor * xv * xv;
}
if y < miny {
miny = y;
}
let mut collides = false;
let xi = (x / precision) as usize;
if xi >= height_map.len() {
height_map.push(y);
} else {
if y > height_map[xi] {
collides = true;
} else {
height_map[xi] = y;
}
}
let inside =
!collides && pad < x && x < width - pad && pad < y && y < height - pad;
if inside && passage.get((x, y)) < passage_threshold {
if was_outside {
if route.len() > min_route {
routes.push(route);
}
route = Vec::new();
}
was_out
|
{
-dx1
}
|
conditional_block
|
leetcode.rs
|
= (vec![], &self);
while let Some(n) = temp {
vec.push(n.val);
temp = &n.next;
}
vec
}
/// Build the vector of the node from the a list node.
fn to_node_vec(self) -> Vec<Option<Box<ListNode>>> {
let (mut vec, mut current) = (vec![], self);
while let Some(v) = current.as_mut() {
// use Option::take() to take the value out of the Option, and then leaving a None in its place.
// let node = std::mem::replace(&mut v.next, None);
let node = v.next.take();
vec.push(current);
current = node;
}
vec
}
}
use std::{cell::RefCell, rc::Rc};
/// The definition of a binary tree node (`ListNode`), used by many problems.
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct
|
{
val: i32,
left: Option<Rc<RefCell<Self>>>,
right: Option<Rc<RefCell<Self>>>,
}
impl TreeNode {
#[inline]
fn new(val: i32) -> Self {
TreeNode {
val,
left: None,
right: None,
}
}
#[inline]
fn new_option(val: Option<i32>) -> Option<Rc<RefCell<Self>>> {
val.map(|v| Rc::new(RefCell::new(Self::new(v))))
}
/**
Building binary tree from `Vec<Option<i32>>`, Some means valued node, None means empty node.
For example:
`[Some(1), Some(2), Some(3), Some(4), Some(5), Some(6)]` will be transformed to:
```html
1
/ \
2 3
/ \ /
4 5 6
```
`[Some(1), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to:
```html
1
/ \
2 3
/ \ / \
4 N 5 N
/
6
```
`[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to:
```html
7
/ \
5 11
/ \ / \
4 N 8 13
/ \ / \ /
2 N N N 12
```
*/
fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> {
use std::collections::VecDeque;
let mut root = None; // save the root node
let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes
for v in vec {
// use the macro to deal with child node
macro_rules! update {
($node: expr) => {
if let Some(n) = &*$node {
// add the pointer of child node, use raw pointer to avoid the ownership check
// save the raw pointer of child node of new tree node dosn't need UNSAFE
nodes.push_back(&mut n.borrow_mut().left);
nodes.push_back(&mut n.borrow_mut().right);
}
};
}
let node = Self::new_option(v); // new tree node
if root.is_none() {
root = node;
update!(&root);
} else if let Some(current) = nodes.pop_front() {
unsafe {
// only dereference raw pointer should under UNSAFE
*current = node;
update!(current);
}
}
}
root
}
}
/// For `q15` and `q18`, check if the target is included in the **vec_list**.
fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool {
for old_vec in vec_list {
let mut new_vec = target.clone();
for old_val in old_vec {
for i in 0..new_vec.len() {
// check target vec if have equal element in old_vec
if old_val == &new_vec[i] {
new_vec.remove(i);
break;
}
}
}
// if all elemnets have been removed, mean the vec is duplicate
if new_vec.is_empty() {
return true;
}
}
false
}
/// For `q126` and `q127`, check if two words differ by only one character.
fn check_diff_one_char(old_word: &String, new_word: &String) -> bool {
let mut count = 0;
let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref());
for i in 0..old_u8s.len() {
if old_u8s[i] != new_u8s[i] {
count += 1;
if count > 1 {
return false;
}
}
}
count == 1
}
/// Check element content equivalence without element order.
fn check_element_eq<T>(v1: T, v2: T) -> bool
where
T: IntoIterator,
T::Item: Eq + std::hash::Hash + std::fmt::Debug,
{
use std::collections::HashMap;
let (mut length1, mut length2) = (0, 0);
let (mut content1, mut content2) = (HashMap::new(), HashMap::new());
for v in v1 {
length1 += 1;
*content1.entry(v).or_insert(0) += 1;
}
for v in v2 {
length2 += 1;
*content2.entry(v).or_insert(0) += 1;
}
let eq = content1 == content2 && length1 == length2;
if !eq {
println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}");
println!("Content 1: {content1:?}\nContent 2: {content2:?}");
}
eq
}
/**
Unlike everything else in the languages, macros will remain visible in sub-modules.
Also, unlike everything else in the language, macros are only accessible after their definition.
Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!".
*/
macro_rules! string_vec {
($($content:expr),*) => {{
let mut temp = vec![];
$(temp.push($content.to_string());)*
temp
}}
}
/// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode.
macro_rules! build_tree_node {
() => { None };
// macro matcher type 'tt' means "a single token tree",
// which allow a independent sub token tree for other macro usage,
// until the current rust version (1.58),
// only positive number or zero will be treated as a single token,
// a negative number won't be treated as it
($($t:tt),*) => {{
let mut temp = vec![];
$(temp.push(covert_tree_node!($t));)*
TreeNode::from(temp)
}};
}
// Use macro to transform the input content.
macro_rules! covert_tree_node {
(null) => {
None
};
($l:literal) => {
Some($l)
};
}
// normal problems
mod q1008_construct_binary_search_tree_from_preorder_traversal;
mod q102_binary_tree_level_order_traversal;
mod q103_binary_tree_zipzag_level_order_traversal;
mod q107_binary_tree_level_order_traversal_ii;
mod q10_regular_expression_matching;
mod q11_container_with_most_water;
mod q126_word_ladder_ii;
mod q127_word_ladder;
mod q12_integer_to_roman;
mod q16_three_sum_closest;
mod q17_letter_combinations_of_a_phone_number;
mod q18_four_sum;
mod q19_remove_nth_node_from_end_of_list;
mod q200_number_of_islands;
mod q208_implement_trie;
mod q212_word_search_ii;
mod q22_generate_parentheses;
mod q23_merge_k_sorted_lists;
mod q24_swap_nodes_in_pairs;
mod q25_reverse_nodes_in_k_group;
mod q29_divide_two_integers;
mod q2_add_two_numbers;
mod q30_substring_with_concatenation_of_all_words;
mod q31_next_permutation;
mod q32_longest_valid_parentheses;
mod q33_search_in_rotated_sorted_array;
mod q34_find_first_and_last_position_of_element_in_sorted_array;
mod q35_valid_sudoku;
mod q37_sudoku_solver;
mod q39_combination_sum;
mod q3_length_of_longest_substring;
mod q40
|
TreeNode
|
identifier_name
|
leetcode.rs
|
= (vec![], &self);
while let Some(n) = temp {
vec.push(n.val);
temp = &n.next;
}
vec
}
/// Build the vector of the node from the a list node.
fn to_node_vec(self) -> Vec<Option<Box<ListNode>>> {
let (mut vec, mut current) = (vec![], self);
while let Some(v) = current.as_mut() {
// use Option::take() to take the value out of the Option, and then leaving a None in its place.
// let node = std::mem::replace(&mut v.next, None);
let node = v.next.take();
vec.push(current);
current = node;
}
vec
}
}
use std::{cell::RefCell, rc::Rc};
/// The definition of a binary tree node (`ListNode`), used by many problems.
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct TreeNode {
val: i32,
left: Option<Rc<RefCell<Self>>>,
right: Option<Rc<RefCell<Self>>>,
}
impl TreeNode {
#[inline]
fn new(val: i32) -> Self {
TreeNode {
val,
left: None,
right: None,
}
}
#[inline]
fn new_option(val: Option<i32>) -> Option<Rc<RefCell<Self>>> {
val.map(|v| Rc::new(RefCell::new(Self::new(v))))
}
/**
Building binary tree from `Vec<Option<i32>>`, Some means valued node, None means empty node.
For example:
`[Some(1), Some(2), Some(3), Some(4), Some(5), Some(6)]` will be transformed to:
```html
1
/ \
2 3
/ \ /
4 5 6
```
`[Some(1), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to:
```html
1
/ \
2 3
/ \ / \
4 N 5 N
/
6
```
`[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to:
```html
7
/ \
5 11
/ \ / \
4 N 8 13
/ \ / \ /
2 N N N 12
```
*/
fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> {
use std::collections::VecDeque;
let mut root = None; // save the root node
let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes
for v in vec {
// use the macro to deal with child node
macro_rules! update {
($node: expr) => {
if let Some(n) = &*$node {
// add the pointer of child node, use raw pointer to avoid the ownership check
// save the raw pointer of child node of new tree node dosn't need UNSAFE
nodes.push_back(&mut n.borrow_mut().left);
nodes.push_back(&mut n.borrow_mut().right);
}
};
}
let node = Self::new_option(v); // new tree node
if root.is_none() {
root = node;
update!(&root);
} else if let Some(current) = nodes.pop_front() {
unsafe {
// only dereference raw pointer should under UNSAFE
*current = node;
update!(current);
}
}
}
root
}
}
/// For `q15` and `q18`, check if the target is included in the **vec_list**.
fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool {
for old_vec in vec_list {
let mut new_vec = target.clone();
for old_val in old_vec {
for i in 0..new_vec.len() {
// check target vec if have equal element in old_vec
if old_val == &new_vec[i] {
new_vec.remove(i);
break;
}
}
}
// if all elemnets have been removed, mean the vec is duplicate
if new_vec.is_empty() {
return true;
}
}
false
}
/// For `q126` and `q127`, check if two words differ by only one character.
fn check_diff_one_char(old_word: &String, new_word: &String) -> bool {
let mut count = 0;
let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref());
for i in 0..old_u8s.len() {
if old_u8s[i] != new_u8s[i] {
count += 1;
if count > 1
|
}
}
count == 1
}
/// Check element content equivalence without element order.
fn check_element_eq<T>(v1: T, v2: T) -> bool
where
T: IntoIterator,
T::Item: Eq + std::hash::Hash + std::fmt::Debug,
{
use std::collections::HashMap;
let (mut length1, mut length2) = (0, 0);
let (mut content1, mut content2) = (HashMap::new(), HashMap::new());
for v in v1 {
length1 += 1;
*content1.entry(v).or_insert(0) += 1;
}
for v in v2 {
length2 += 1;
*content2.entry(v).or_insert(0) += 1;
}
let eq = content1 == content2 && length1 == length2;
if !eq {
println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}");
println!("Content 1: {content1:?}\nContent 2: {content2:?}");
}
eq
}
/**
Unlike everything else in the languages, macros will remain visible in sub-modules.
Also, unlike everything else in the language, macros are only accessible after their definition.
Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!".
*/
macro_rules! string_vec {
($($content:expr),*) => {{
let mut temp = vec![];
$(temp.push($content.to_string());)*
temp
}}
}
/// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode.
macro_rules! build_tree_node {
() => { None };
// macro matcher type 'tt' means "a single token tree",
// which allow a independent sub token tree for other macro usage,
// until the current rust version (1.58),
// only positive number or zero will be treated as a single token,
// a negative number won't be treated as it
($($t:tt),*) => {{
let mut temp = vec![];
$(temp.push(covert_tree_node!($t));)*
TreeNode::from(temp)
}};
}
// Use macro to transform the input content.
macro_rules! covert_tree_node {
(null) => {
None
};
($l:literal) => {
Some($l)
};
}
// normal problems
mod q1008_construct_binary_search_tree_from_preorder_traversal;
mod q102_binary_tree_level_order_traversal;
mod q103_binary_tree_zipzag_level_order_traversal;
mod q107_binary_tree_level_order_traversal_ii;
mod q10_regular_expression_matching;
mod q11_container_with_most_water;
mod q126_word_ladder_ii;
mod q127_word_ladder;
mod q12_integer_to_roman;
mod q16_three_sum_closest;
mod q17_letter_combinations_of_a_phone_number;
mod q18_four_sum;
mod q19_remove_nth_node_from_end_of_list;
mod q200_number_of_islands;
mod q208_implement_trie;
mod q212_word_search_ii;
mod q22_generate_parentheses;
mod q23_merge_k_sorted_lists;
mod q24_swap_nodes_in_pairs;
mod q25_reverse_nodes_in_k_group;
mod q29_divide_two_integers;
mod q2_add_two_numbers;
mod q30_substring_with_concatenation_of_all_words;
mod q31_next_permutation;
mod q32_longest_valid_parentheses;
mod q33_search_in_rotated_sorted_array;
mod q34_find_first_and_last_position_of_element_in_sorted_array;
mod q35_valid_sudoku;
mod q37_sudoku_solver;
mod q39_combination_sum;
mod q3_length_of_longest_substring;
mod q4
|
{
return false;
}
|
conditional_block
|
leetcode.rs
|
), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to:
```html
1
/ \
2 3
/ \ / \
4 N 5 N
/
6
```
`[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to:
```html
7
/ \
5 11
/ \ / \
4 N 8 13
/ \ / \ /
2 N N N 12
```
*/
fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> {
use std::collections::VecDeque;
let mut root = None; // save the root node
let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes
for v in vec {
// use the macro to deal with child node
macro_rules! update {
($node: expr) => {
if let Some(n) = &*$node {
// add the pointer of child node, use raw pointer to avoid the ownership check
// save the raw pointer of child node of new tree node dosn't need UNSAFE
nodes.push_back(&mut n.borrow_mut().left);
nodes.push_back(&mut n.borrow_mut().right);
}
};
}
let node = Self::new_option(v); // new tree node
if root.is_none() {
root = node;
update!(&root);
} else if let Some(current) = nodes.pop_front() {
unsafe {
// only dereference raw pointer should under UNSAFE
*current = node;
update!(current);
}
}
}
root
}
}
/// For `q15` and `q18`, check if the target is included in the **vec_list**.
fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool {
for old_vec in vec_list {
let mut new_vec = target.clone();
for old_val in old_vec {
for i in 0..new_vec.len() {
// check target vec if have equal element in old_vec
if old_val == &new_vec[i] {
new_vec.remove(i);
break;
}
}
}
// if all elemnets have been removed, mean the vec is duplicate
if new_vec.is_empty() {
return true;
}
}
false
}
/// For `q126` and `q127`, check if two words differ by only one character.
fn check_diff_one_char(old_word: &String, new_word: &String) -> bool {
let mut count = 0;
let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref());
for i in 0..old_u8s.len() {
if old_u8s[i] != new_u8s[i] {
count += 1;
if count > 1 {
return false;
}
}
}
count == 1
}
/// Check element content equivalence without element order.
fn check_element_eq<T>(v1: T, v2: T) -> bool
where
T: IntoIterator,
T::Item: Eq + std::hash::Hash + std::fmt::Debug,
{
use std::collections::HashMap;
let (mut length1, mut length2) = (0, 0);
let (mut content1, mut content2) = (HashMap::new(), HashMap::new());
for v in v1 {
length1 += 1;
*content1.entry(v).or_insert(0) += 1;
}
for v in v2 {
length2 += 1;
*content2.entry(v).or_insert(0) += 1;
}
let eq = content1 == content2 && length1 == length2;
if !eq {
println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}");
println!("Content 1: {content1:?}\nContent 2: {content2:?}");
}
eq
}
/**
Unlike everything else in the languages, macros will remain visible in sub-modules.
Also, unlike everything else in the language, macros are only accessible after their definition.
Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!".
*/
macro_rules! string_vec {
($($content:expr),*) => {{
let mut temp = vec![];
$(temp.push($content.to_string());)*
temp
}}
}
/// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode.
macro_rules! build_tree_node {
() => { None };
// macro matcher type 'tt' means "a single token tree",
// which allow a independent sub token tree for other macro usage,
// until the current rust version (1.58),
// only positive number or zero will be treated as a single token,
// a negative number won't be treated as it
($($t:tt),*) => {{
let mut temp = vec![];
$(temp.push(covert_tree_node!($t));)*
TreeNode::from(temp)
}};
}
// Use macro to transform the input content.
macro_rules! covert_tree_node {
(null) => {
None
};
($l:literal) => {
Some($l)
};
}
// normal problems
mod q1008_construct_binary_search_tree_from_preorder_traversal;
mod q102_binary_tree_level_order_traversal;
mod q103_binary_tree_zipzag_level_order_traversal;
mod q107_binary_tree_level_order_traversal_ii;
mod q10_regular_expression_matching;
mod q11_container_with_most_water;
mod q126_word_ladder_ii;
mod q127_word_ladder;
mod q12_integer_to_roman;
mod q16_three_sum_closest;
mod q17_letter_combinations_of_a_phone_number;
mod q18_four_sum;
mod q19_remove_nth_node_from_end_of_list;
mod q200_number_of_islands;
mod q208_implement_trie;
mod q212_word_search_ii;
mod q22_generate_parentheses;
mod q23_merge_k_sorted_lists;
mod q24_swap_nodes_in_pairs;
mod q25_reverse_nodes_in_k_group;
mod q29_divide_two_integers;
mod q2_add_two_numbers;
mod q30_substring_with_concatenation_of_all_words;
mod q31_next_permutation;
mod q32_longest_valid_parentheses;
mod q33_search_in_rotated_sorted_array;
mod q34_find_first_and_last_position_of_element_in_sorted_array;
mod q35_valid_sudoku;
mod q37_sudoku_solver;
mod q39_combination_sum;
mod q3_length_of_longest_substring;
mod q407_trapping_rain_water_ii;
mod q40_combination_sum_ii;
mod q41_first_missing_positive;
mod q42_trapping_rain_water;
mod q43_multiply_strings;
mod q44_wildcard_matching;
mod q454_four_sum_ii;
mod q45_jump_game_ii;
mod q46_permutations;
mod q47_permutations_ii;
mod q48_rotate_image;
mod q49_group_anagrams;
mod q4_find_median_sorted_arrays;
mod q50_pow_x_n;
mod q51_n_queens;
mod q525_contiguous_array;
mod q52_n_queens_ii;
mod q53_maximum_subarray;
mod q543_diameter_of_binary_tree;
mod q54_spiral_matrix;
mod q55_jump_game;
mod q56_merge_intervals;
mod q57_insert_interval;
mod q59_spiral_matrix_ii;
mod q5_longest_palindrome;
mod q60_permutation_sequence;
mod q61_rotate_list;
mod q62_unique_paths;
mod q63_unique_paths_ii;
mod q64_minimum_path_sum;
mod q65_valid_number;
mod q68_text_justification;
mod q6_zipzag_conversion;
mod q71_simplify_path;
mod q72_edit_distance;
mod q73_set_matrix_zeroes;
mod q74_search_a_2d_matrix;
mod q75_sort_colors;
mod q76_minimum_window_substring;
mod q77_combinations;
mod q78_subsets;
mod q79_word_search;
mod q7_reverse_integer;
mod q80_remove_duplicates_from_sorted_array_ii;
mod q81_search_in_rotated_sorted_array_ii;
mod q82_remove_duplicates_from_sorted_list_ii;
mod q844_backspace_string_compare;
|
mod q84_largest_rectangle_in_histogram;
mod q85_maximal_rectangle;
mod q86_partition_list;
|
random_line_split
|
|
lib.rs
|
/ noun when `n >= 1`, otherwise 0 names.
///
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Petnames<'a> {
pub adjectives: Words<'a>,
pub adverbs: Words<'a>,
pub names: Words<'a>,
}
#[cfg(feature = "default_dictionary")]
mod words {
include!(concat!(env!("OUT_DIR"), "/words.rs"));
}
impl<'a> Petnames<'a> {
/// Constructs a new `Petnames` from the default (small) word lists.
#[cfg(feature = "default_dictionary")]
pub fn new() -> Self {
Self::default()
}
/// Constructs a new `Petnames` from the small word lists.
#[cfg(feature = "default_dictionary")]
pub fn small() -> Self {
Self {
adjectives: Cow::from(&words::small::ADJECTIVES[..]),
adverbs: Cow::from(&words::small::ADVERBS[..]),
names: Cow::from(&words::small::NAMES[..]),
}
}
/// Constructs a new `Petnames` from the medium word lists.
#[cfg(feature = "default_dictionary")]
pub fn medium() -> Self {
Self {
adjectives: Cow::from(&words::medium::ADJECTIVES[..]),
adverbs: Cow::from(&words::medium::ADVERBS[..]),
names: Cow::from(&words::medium::NAMES[..]),
}
}
/// Constructs a new `Petnames` from the large word lists.
#[cfg(feature = "default_dictionary")]
pub fn large() -> Self {
Self {
adjectives: Cow::from(&words::large::ADJECTIVES[..]),
adverbs: Cow::from(&words::large::ADVERBS[..]),
names: Cow::from(&words::large::NAMES[..]),
}
}
/// Constructs a new `Petnames` from the given word lists.
///
/// The words are extracted from the given strings by splitting on whitespace.
pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self {
Self {
adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()),
adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()),
names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()),
}
}
/// Keep words matching a predicate.
///
/// # Examples
///
/// ```rust
/// # #[cfg(feature = "default_dictionary")]
/// let mut petnames = petname::Petnames::default();
/// # #[cfg(feature = "default_dictionary")]
/// petnames.retain(|s| s.starts_with("b"));
/// # #[cfg(feature = "default_dictionary")]
/// # #[cfg(feature = "std_rng")]
/// petnames.generate_one(2, ".");
/// ```
///
/// This is merely a convenience wrapper that applies the same predicate to
/// the adjectives, adverbs, and names lists.
///
pub fn retain<F>(&mut self, mut predicate: F)
where
F: FnMut(&str) -> bool,
{
self.adjectives.to_mut().retain(|word| predicate(word));
self.adverbs.to_mut().retain(|word| predicate(word));
self.names.to_mut().retain(|word| predicate(word));
}
/// Calculate the cardinality of this `Petnames`.
///
/// If this is low, names may be repeated by the generator with a higher
/// frequency than your use-case may allow. If it is 0 (zero) the generator
/// will panic (unless `words` is also zero).
///
/// This can saturate. If the total possible combinations of words exceeds
/// `u128::MAX` then this will return `u128::MAX`.
pub fn cardinality(&self, words: u8) -> u128 {
Lists::new(words)
.map(|list| match list {
List::Adverb => self.adverbs.len() as u128,
List::Adjective => self.adjectives.len() as u128,
List::Name => self.names.len() as u128,
})
.reduce(u128::saturating_mul)
.unwrap_or(0u128)
}
/// Generate a new petname.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// petname::Petnames::default().generate(&mut rng, 7, ":");
/// ```
///
/// # Notes
///
/// This may return fewer words than you request if one or more of the word
/// lists are empty. For example, if there are no adverbs, requesting 3 or
/// more words may still yield only "doubtful-salmon".
///
pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String
where
RNG: rand::Rng,
{
Itertools::intersperse(
Lists::new(words).filter_map(|list| match list {
List::Adverb => self.adverbs.choose(rng).copied(),
List::Adjective => self.adjectives.choose(rng).copied(),
List::Name => self.names.choose(rng).copied(),
}),
separator,
)
.collect::<String>()
}
/// Generate a single new petname.
///
/// This is like `generate` but uses `rand::thread_rng` as the random
/// source. For efficiency use `generate` when creating multiple names, or
/// when you want to use a custom source of randomness.
#[cfg(feature = "std_rng")]
pub fn generate_one(&self, words: u8, separator: &str) -> String {
self.generate(&mut rand::thread_rng(), words, separator)
}
/// Iterator yielding petnames.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let petnames = petname::Petnames::default();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut iter = petnames.iter(&mut rng, 4, "_");
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// println!("name: {}", iter.next().unwrap());
/// ```
///
pub fn
|
<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG>
where
RNG: rand::Rng,
{
Names { petnames: self, rng, words, separator: separator.to_string() }
}
/// Iterator yielding unique – i.e. non-repeating – petnames.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let petnames = petname::Petnames::default();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_");
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// println!("name: {}", iter.next().unwrap());
/// ```
///
pub fn iter_non_repeating<RNG>(
&'a self,
rng: &'a mut RNG,
words: u8,
separator: &str,
) -> impl Iterator<Item = String> + 'a
where
RNG: rand::Rng,
{
let lists: Vec<&'a Words<'a>> = Lists::new(words)
.map(|list| match list {
List::Adverb => &self.adverbs,
List::Adjective => &self.adjectives,
List::Name => &self.names,
})
.collect();
NamesProduct::shuffled(&lists, rng, separator)
}
}
#[cfg(feature = "default_dictionary")]
impl<'a> Default for Petnames<'a> {
fn default() -> Self {
Self::small()
}
}
/// Enum representing which word list to use.
#[derive(Debug, PartialEq)]
enum List {
Adverb,
Adjective,
Name,
}
/// Iterator, yielding which word list to use next.
///
/// This yields the appropriate list – [adverbs][List::Adverb],
/// [adjectives][List::Adjective]s, [names][List::Name] – from which to select
/// a word when constructing a petname of `n` words. For example, if you want 4
/// words in your petname, this will first yield [List::Adverb], then
/// [List::Adverb] again, then
|
iter
|
identifier_name
|
lib.rs
|
name / noun when `n >= 1`, otherwise 0 names.
///
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Petnames<'a> {
pub adjectives: Words<'a>,
pub adverbs: Words<'a>,
pub names: Words<'a>,
}
#[cfg(feature = "default_dictionary")]
mod words {
include!(concat!(env!("OUT_DIR"), "/words.rs"));
}
impl<'a> Petnames<'a> {
/// Constructs a new `Petnames` from the default (small) word lists.
#[cfg(feature = "default_dictionary")]
pub fn new() -> Self {
Self::default()
}
/// Constructs a new `Petnames` from the small word lists.
#[cfg(feature = "default_dictionary")]
pub fn small() -> Self {
Self {
adjectives: Cow::from(&words::small::ADJECTIVES[..]),
adverbs: Cow::from(&words::small::ADVERBS[..]),
names: Cow::from(&words::small::NAMES[..]),
}
}
/// Constructs a new `Petnames` from the medium word lists.
#[cfg(feature = "default_dictionary")]
pub fn medium() -> Self {
Self {
adjectives: Cow::from(&words::medium::ADJECTIVES[..]),
adverbs: Cow::from(&words::medium::ADVERBS[..]),
names: Cow::from(&words::medium::NAMES[..]),
}
}
/// Constructs a new `Petnames` from the large word lists.
#[cfg(feature = "default_dictionary")]
pub fn large() -> Self {
Self {
adjectives: Cow::from(&words::large::ADJECTIVES[..]),
adverbs: Cow::from(&words::large::ADVERBS[..]),
names: Cow::from(&words::large::NAMES[..]),
}
}
/// Constructs a new `Petnames` from the given word lists.
///
/// The words are extracted from the given strings by splitting on whitespace.
pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self {
Self {
adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()),
adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()),
names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()),
}
}
/// Keep words matching a predicate.
///
/// # Examples
///
/// ```rust
/// # #[cfg(feature = "default_dictionary")]
/// let mut petnames = petname::Petnames::default();
/// # #[cfg(feature = "default_dictionary")]
/// petnames.retain(|s| s.starts_with("b"));
/// # #[cfg(feature = "default_dictionary")]
/// # #[cfg(feature = "std_rng")]
/// petnames.generate_one(2, ".");
/// ```
///
/// This is merely a convenience wrapper that applies the same predicate to
|
{
self.adjectives.to_mut().retain(|word| predicate(word));
self.adverbs.to_mut().retain(|word| predicate(word));
self.names.to_mut().retain(|word| predicate(word));
}
/// Calculate the cardinality of this `Petnames`.
///
/// If this is low, names may be repeated by the generator with a higher
/// frequency than your use-case may allow. If it is 0 (zero) the generator
/// will panic (unless `words` is also zero).
///
/// This can saturate. If the total possible combinations of words exceeds
/// `u128::MAX` then this will return `u128::MAX`.
pub fn cardinality(&self, words: u8) -> u128 {
Lists::new(words)
.map(|list| match list {
List::Adverb => self.adverbs.len() as u128,
List::Adjective => self.adjectives.len() as u128,
List::Name => self.names.len() as u128,
})
.reduce(u128::saturating_mul)
.unwrap_or(0u128)
}
/// Generate a new petname.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// petname::Petnames::default().generate(&mut rng, 7, ":");
/// ```
///
/// # Notes
///
/// This may return fewer words than you request if one or more of the word
/// lists are empty. For example, if there are no adverbs, requesting 3 or
/// more words may still yield only "doubtful-salmon".
///
pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String
where
RNG: rand::Rng,
{
Itertools::intersperse(
Lists::new(words).filter_map(|list| match list {
List::Adverb => self.adverbs.choose(rng).copied(),
List::Adjective => self.adjectives.choose(rng).copied(),
List::Name => self.names.choose(rng).copied(),
}),
separator,
)
.collect::<String>()
}
/// Generate a single new petname.
///
/// This is like `generate` but uses `rand::thread_rng` as the random
/// source. For efficiency use `generate` when creating multiple names, or
/// when you want to use a custom source of randomness.
#[cfg(feature = "std_rng")]
pub fn generate_one(&self, words: u8, separator: &str) -> String {
self.generate(&mut rand::thread_rng(), words, separator)
}
/// Iterator yielding petnames.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let petnames = petname::Petnames::default();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut iter = petnames.iter(&mut rng, 4, "_");
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// println!("name: {}", iter.next().unwrap());
/// ```
///
pub fn iter<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG>
where
RNG: rand::Rng,
{
Names { petnames: self, rng, words, separator: separator.to_string() }
}
/// Iterator yielding unique – i.e. non-repeating – petnames.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let petnames = petname::Petnames::default();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_");
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// println!("name: {}", iter.next().unwrap());
/// ```
///
pub fn iter_non_repeating<RNG>(
&'a self,
rng: &'a mut RNG,
words: u8,
separator: &str,
) -> impl Iterator<Item = String> + 'a
where
RNG: rand::Rng,
{
let lists: Vec<&'a Words<'a>> = Lists::new(words)
.map(|list| match list {
List::Adverb => &self.adverbs,
List::Adjective => &self.adjectives,
List::Name => &self.names,
})
.collect();
NamesProduct::shuffled(&lists, rng, separator)
}
}
#[cfg(feature = "default_dictionary")]
impl<'a> Default for Petnames<'a> {
fn default() -> Self {
Self::small()
}
}
/// Enum representing which word list to use.
#[derive(Debug, PartialEq)]
enum List {
Adverb,
Adjective,
Name,
}
/// Iterator, yielding which word list to use next.
///
/// This yields the appropriate list – [adverbs][List::Adverb],
/// [adjectives][List::Adjective]s, [names][List::Name] – from which to select
/// a word when constructing a petname of `n` words. For example, if you want 4
/// words in your petname, this will first yield [List::Adverb], then
/// [List::Adverb] again, then [
|
/// the adjectives, adverbs, and names lists.
///
pub fn retain<F>(&mut self, mut predicate: F)
where
F: FnMut(&str) -> bool,
|
random_line_split
|
lib.rs
|
adjectives: Cow::from(&words::medium::ADJECTIVES[..]),
adverbs: Cow::from(&words::medium::ADVERBS[..]),
names: Cow::from(&words::medium::NAMES[..]),
}
}
/// Constructs a new `Petnames` from the large word lists.
#[cfg(feature = "default_dictionary")]
pub fn large() -> Self {
Self {
adjectives: Cow::from(&words::large::ADJECTIVES[..]),
adverbs: Cow::from(&words::large::ADVERBS[..]),
names: Cow::from(&words::large::NAMES[..]),
}
}
/// Constructs a new `Petnames` from the given word lists.
///
/// The words are extracted from the given strings by splitting on whitespace.
pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self {
Self {
adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()),
adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()),
names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()),
}
}
/// Keep words matching a predicate.
///
/// # Examples
///
/// ```rust
/// # #[cfg(feature = "default_dictionary")]
/// let mut petnames = petname::Petnames::default();
/// # #[cfg(feature = "default_dictionary")]
/// petnames.retain(|s| s.starts_with("b"));
/// # #[cfg(feature = "default_dictionary")]
/// # #[cfg(feature = "std_rng")]
/// petnames.generate_one(2, ".");
/// ```
///
/// This is merely a convenience wrapper that applies the same predicate to
/// the adjectives, adverbs, and names lists.
///
pub fn retain<F>(&mut self, mut predicate: F)
where
F: FnMut(&str) -> bool,
{
self.adjectives.to_mut().retain(|word| predicate(word));
self.adverbs.to_mut().retain(|word| predicate(word));
self.names.to_mut().retain(|word| predicate(word));
}
/// Calculate the cardinality of this `Petnames`.
///
/// If this is low, names may be repeated by the generator with a higher
/// frequency than your use-case may allow. If it is 0 (zero) the generator
/// will panic (unless `words` is also zero).
///
/// This can saturate. If the total possible combinations of words exceeds
/// `u128::MAX` then this will return `u128::MAX`.
pub fn cardinality(&self, words: u8) -> u128 {
Lists::new(words)
.map(|list| match list {
List::Adverb => self.adverbs.len() as u128,
List::Adjective => self.adjectives.len() as u128,
List::Name => self.names.len() as u128,
})
.reduce(u128::saturating_mul)
.unwrap_or(0u128)
}
/// Generate a new petname.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// petname::Petnames::default().generate(&mut rng, 7, ":");
/// ```
///
/// # Notes
///
/// This may return fewer words than you request if one or more of the word
/// lists are empty. For example, if there are no adverbs, requesting 3 or
/// more words may still yield only "doubtful-salmon".
///
pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String
where
RNG: rand::Rng,
{
Itertools::intersperse(
Lists::new(words).filter_map(|list| match list {
List::Adverb => self.adverbs.choose(rng).copied(),
List::Adjective => self.adjectives.choose(rng).copied(),
List::Name => self.names.choose(rng).copied(),
}),
separator,
)
.collect::<String>()
}
/// Generate a single new petname.
///
/// This is like `generate` but uses `rand::thread_rng` as the random
/// source. For efficiency use `generate` when creating multiple names, or
/// when you want to use a custom source of randomness.
#[cfg(feature = "std_rng")]
pub fn generate_one(&self, words: u8, separator: &str) -> String {
self.generate(&mut rand::thread_rng(), words, separator)
}
/// Iterator yielding petnames.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let petnames = petname::Petnames::default();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut iter = petnames.iter(&mut rng, 4, "_");
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// println!("name: {}", iter.next().unwrap());
/// ```
///
pub fn iter<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG>
where
RNG: rand::Rng,
{
Names { petnames: self, rng, words, separator: separator.to_string() }
}
/// Iterator yielding unique – i.e. non-repeating – petnames.
///
/// # Examples
///
/// ```rust
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut rng = rand::thread_rng();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let petnames = petname::Petnames::default();
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_");
/// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))]
/// println!("name: {}", iter.next().unwrap());
/// ```
///
pub fn iter_non_repeating<RNG>(
&'a self,
rng: &'a mut RNG,
words: u8,
separator: &str,
) -> impl Iterator<Item = String> + 'a
where
RNG: rand::Rng,
{
let lists: Vec<&'a Words<'a>> = Lists::new(words)
.map(|list| match list {
List::Adverb => &self.adverbs,
List::Adjective => &self.adjectives,
List::Name => &self.names,
})
.collect();
NamesProduct::shuffled(&lists, rng, separator)
}
}
#[cfg(feature = "default_dictionary")]
impl<'a> Default for Petnames<'a> {
fn default() -> Self {
Self::small()
}
}
/// Enum representing which word list to use.
#[derive(Debug, PartialEq)]
enum List {
Adverb,
Adjective,
Name,
}
/// Iterator, yielding which word list to use next.
///
/// This yields the appropriate list – [adverbs][List::Adverb],
/// [adjectives][List::Adjective]s, [names][List::Name] – from which to select
/// a word when constructing a petname of `n` words. For example, if you want 4
/// words in your petname, this will first yield [List::Adverb], then
/// [List::Adverb] again, then [List::Adjective], and lastly [List::Name].
#[derive(Debug, PartialEq)]
enum Lists {
Adverb(u8),
Adjective,
Name,
Done,
}
impl Lists {
fn new(words: u8) -> Self {
match words {
0 => Self::Done,
1 => Self::Name,
2 => Self::Adjective,
n => Self::Adverb(n - 3),
}
}
fn advance(&mut self) {
*self = match self {
Self::Adverb(0) => Self::Adjective,
Self::Adverb(remaining) => Self::Adverb(*remaining - 1),
Self::Adjective => Self::Name,
Self::Name | Self::Done => Self::Done,
}
}
}
impl Iterator for Lists {
type Item = List;
fn next(&mut self) -> Option<Self::Item> {
|
let list = match self {
Self::Adjective => Some(List::Adjective),
Self::Adverb(_) => Some(List::Adverb),
Self::Name => Some(List::Name),
Self::Done => None,
};
self.advance();
list
}
fn
|
identifier_body
|
|
califa2_2.py
|
lcula a Concentracao de uma populacao, usando a definicao
de Conselice(2014) http://iopscience.iop.org/article/10.1086/375001/pdf
'''
a=1
radius=df.sort_values('raio')
r20=radius.iat[int(0.2*len(df)),-1]
r80=radius.iat[int(0.8*len(df)),-1]
Conc = 5*np.log((r80/r20))
return Conc
def Z(df0,gal,Conc,ordem):
'''definindo uma funcao para ordenar a propridade de interesse
dividindo-o em bins de igual tamanho e calculando alguns parametros'''
df_Z = pd.DataFrame()
propr = []
err_prop = []
raio = []
err_raio = []
halpha = []
err_halpha = []
dens = []
err_dens = []
idade = []
err_age = []
semia = []
err_semia = []
conc = []
df = df0.sort_values(by=ordem)
df = df.reset_index()
del df['index']
cx, cy = mom.centro_mass(df)
delta = len(df)/50 #Quantidade de bins
j=0
for i in range(0,(len(df)), delta):
df1 = df.ix[i:i+delta,:]
propr.append(df1[ordem].mean())
err_prop.append(df1[ordem].std())
raio.append(df1['raio'].mean())
err_raio.append(df1['raio'].std())
halpha.append(df1['halpha'].mean())
err_halpha.append(df1['halpha'].std())
dens.append(df1['mass'].mean())
err_dens.append(df1['mass'].std())
idade.append(df1['age'].mean())
err_age.append(df1['age'].std())
semia.append(df1['a'].mean())
err_semia.append(df1['a'].std())
conc.append(C(df1))
j=j+1
df_Z[ordem] = propr
df_Z['erro'] = err_prop
df_Z['raio_m'] = raio
df_Z['err_raio'] = err_raio
df_Z['age_m'] = idade
df_Z['err_age'] = err_age
df_Z['mass_m'] = dens
df_Z['err_mass'] = err_dens
df_Z['halpha_m'] = halpha
df_Z['err_halpha'] = err_halpha
df_Z['a_m'] = semia
df_Z['err_a'] = err_semia
df_Z[Conc] = conc
return df_Z
def obtendo_dados(img,param):
'''função para leitura do arquivo fits, criando um dataframe com os dados'''
df = pd.DataFrame()
nrows, ncols = img.shape
xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )
table = np.column_stack(( xx.flatten(), yy.flatten(), img.flatten() ))
temp = pd.DataFrame(table, columns=['x','y',param])
df = pd.concat([df,temp], axis=1)
return(df)
def plots(df,param1,param2,param3,diretorio):
'''Função para plotar os gráficos'''
plt.figure()
incr = param3*(df.ix[:,0].mean())
plt.xlim([(df.ix[:,0].min()-(incr)),(df.ix[:,0].max()+(incr))])
plt.scatter(df.ix[:,0], df.ix[:,12])
plt.title(gal+' '+tipo, fontsize=30)
plt.ylabel('Concentraction', fontsize=30)
plt.xlabel(param2, fontsize=30)
plt.savefig('figures/%s/gal_%s_concentration_%s' %(diretorio,param1,param2))
plt.close()
# plt.figure()
# plt.title('Distribuicao C(%s)- %s' %(param2,param1))
# df.ix[:,0].hist(bins=100)
# plt.savefig('figures/%s/gal%s_hist_%s' %(diretorio,param1,param2))
# plt.close()
data_dir = '/home/pnovais/Dropbox/DOUTORADO/renew'
age = pd.read_csv('Paty_at_flux__yx/age.csv')
mass = pd.read_csv('PatImages/mass.csv')
halpha = pd.read_csv('Hamaps/halpha.csv')
#halpha = pd.read_csv('Hamaps/teste.csv')
hu1 = []
hu2 = []
hu3 = []
hu4 = []
hu5 = []
hu6 = []
hu7 = []
hugal = []
hutype = []
df_hu = pd.DataFrame()
for i_gal in range(len(halpha)):
#for i_gal in range(0,2):
print(bcolors.FAIL +'-'*79+ bcolors.ENDC)
print(bcolors.FAIL + '-'*33 + 'OBJETO: %s' %halpha['num_gal'][i_gal] + '-'*33 + bcolors.ENDC)
print(bcolors.FAIL +'-'*79+ bcolors.ENDC)
plt.close()
image_ha = fits.open('Hamaps/%s_%s_Ha.fits' %(halpha['num_gal'][i_gal],halpha['type'][i_gal]))
img = get_image(image_ha)
#plotando a imagem fits
plt.figure(1)
plt.clf()
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
plt.axis([0,77,0,72])
plt.xlabel('X',fontweight='bold')
plt.ylabel('Y',fontweight='bold')
imgplot = plt.imshow(100*np.log10(img/255), cmap=cx)
titulo='Halpha Maps - Galaxy %s ' %halpha['num_gal'][i_gal]
plt.title(titulo)
#plt.colorbar()
figura = 'figures/imagens_Ha/galaxy_%s' %halpha['num_gal'][i_gal]
plt.savefig(figura)
#obtendo os dados de Halpha da imagem fits
df_ha = obtendo_dados(img,'halpha')
#obtendo os dados de densidade de massa da imagem fits
image_mass = fits.open('PatImages/PatImagesMcorSD__yx_%s.fits' %halpha['num_gal'][i_gal])
img = get_image(image_mass)
df_mass = obtendo_dados(img, 'mass')
#obtendo os dados de idade da imagem fits
image_age = fits.open('Paty_at_flux__yx/at_flux__yx_%s.fits' %halpha['num_gal'][i_gal])
img = get_image(image_age)
df_age = obtendo_dados(img, 'age')
#selecionando apenas os dados de idade > 0 e mass > 0
df0 = pd.merge(df_age,df_mass)
df1 = pd.merge(df0,df_ha, how='inner')
df = df1[(df1.age > 0.0) & (df1.mass > 0.0) & (df1.halpha > 0.0)]
Re = mom.equivalent_radius(df)
cx, cy = mom.centro_mass(df)
tetha, exc, a, b = mom.param_elipse(df)
df['raio'] = np.sqrt((df['x'] - cx)**2 + (df['y'] - cy)**2)
acres = math.radians(180)
d = ((df['x'] - cx)*np.cos(tetha) + (df['y'] - cy)*np.sin(-tetha+acres))**2
e = ((df['x'] - cx)*np.sin(tetha) + (df['y'] - cy)*np.cos(-tetha+acres))**2
df['a'] = np.sqrt(d + e/((1-exc)**2))
gal = halpha['num_gal'][i_gal]
tipo = halpha['type'][i_gal]
age_test = Z(df,gal,'conc_age','age')
mass_test = Z(df,gal,'conc_mass','mass')
ha_test = Z(df,gal,'conc_ha','halpha')
raio_test = Z(df,gal,'conc_raio', 'raio')
a_test = Z(df,gal, 'conc_a', 'a')
plots(age_test,gal,'Age',0,'concentracao')
plots(mass_test,gal,'Mass_density',1,'concentracao')
plots(ha_test,gal,'Halpha',1,'concentracao')
#perfis circulares
plt.figure(1)
plt.title(gal)
ax1 = plt.subplot(311)
plt.title('%s - %s' %(gal,
|
as imagens fits'''
img = f_sdss[0].data
return img
def C(df):
'''funcao que ca
|
identifier_body
|
|
califa2_2.py
|
plt.axis([0,77,0,72])
plt.xlabel('X',fontweight='bold')
plt.ylabel('Y',fontweight='bold')
imgplot = plt.imshow(100*np.log10(img/255), cmap=cx)
titulo='Halpha Maps - Galaxy %s ' %halpha['num_gal'][i_gal]
plt.title(titulo)
#plt.colorbar()
figura = 'figures/imagens_Ha/galaxy_%s' %halpha['num_gal'][i_gal]
plt.savefig(figura)
#obtendo os dados de Halpha da imagem fits
df_ha = obtendo_dados(img,'halpha')
#obtendo os dados de densidade de massa da imagem fits
image_mass = fits.open('PatImages/PatImagesMcorSD__yx_%s.fits' %halpha['num_gal'][i_gal])
img = get_image(image_mass)
df_mass = obtendo_dados(img, 'mass')
#obtendo os dados de idade da imagem fits
image_age = fits.open('Paty_at_flux__yx/at_flux__yx_%s.fits' %halpha['num_gal'][i_gal])
img = get_image(image_age)
df_age = obtendo_dados(img, 'age')
#selecionando apenas os dados de idade > 0 e mass > 0
df0 = pd.merge(df_age,df_mass)
df1 = pd.merge(df0,df_ha, how='inner')
df = df1[(df1.age > 0.0) & (df1.mass > 0.0) & (df1.halpha > 0.0)]
Re = mom.equivalent_radius(df)
cx, cy = mom.centro_mass(df)
tetha, exc, a, b = mom.param_elipse(df)
df['raio'] = np.sqrt((df['x'] - cx)**2 + (df['y'] - cy)**2)
acres = math.radians(180)
d = ((df['x'] - cx)*np.cos(tetha) + (df['y'] - cy)*np.sin(-tetha+acres))**2
e = ((df['x'] - cx)*np.sin(tetha) + (df['y'] - cy)*np.cos(-tetha+acres))**2
df['a'] = np.sqrt(d + e/((1-exc)**2))
gal = halpha['num_gal'][i_gal]
tipo = halpha['type'][i_gal]
age_test = Z(df,gal,'conc_age','age')
mass_test = Z(df,gal,'conc_mass','mass')
ha_test = Z(df,gal,'conc_ha','halpha')
raio_test = Z(df,gal,'conc_raio', 'raio')
a_test = Z(df,gal, 'conc_a', 'a')
plots(age_test,gal,'Age',0,'concentracao')
plots(mass_test,gal,'Mass_density',1,'concentracao')
plots(ha_test,gal,'Halpha',1,'concentracao')
#perfis circulares
plt.figure(1)
plt.title(gal)
ax1 = plt.subplot(311)
plt.title('%s - %s' %(gal, tipo))
ax1.errorbar(raio_test.raio_m/Re, raio_test.age_m, yerr=raio_test.err_age, fmt='o')
plt.plot(raio_test.raio_m/Re, raio_test.age_m, color='#7e2601',linewidth=1)
plt.ylabel('Mean Age')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(312, sharex=ax1)
plt.ylim([(raio_test.halpha_m.min()-(raio_test.err_halpha.max() + 1e-17)),
(raio_test.halpha_m.max()+(raio_test.err_halpha.max() + 1e-17))])
ax2.errorbar(raio_test.raio_m/Re, raio_test.halpha_m, yerr=raio_test.err_halpha, fmt='o')
plt.plot(raio_test.raio_m/Re, raio_test.halpha_m, color='#7e2601',linewidth=1)
plt.ylabel(r'Mean $H\alpha$')
plt.setp(ax2.get_xticklabels(), visible=False)
ax3 = plt.subplot(313, sharex=ax2)
ax3.errorbar(raio_test.raio_m/Re, raio_test.mass_m, yerr=raio_test.err_mass, fmt='.')
plt.plot(raio_test.raio_m/Re, raio_test.mass_m, color='#7e2601',linewidth=1)
plt.ylabel('Mean mass density')
plt.xlabel('Radius/Re')
plt.savefig('figures/perfis_circular/gal%s_perfis_circ' %(gal))
plt.close(1)
plt.figure()
plt.scatter(raio_test.raio_m/Re, raio_test.conc_raio)
plt.plot(raio_test.raio_m/Re, raio_test.conc_raio, color='#7e2601',linewidth=1)
plt.title(gal)
plt.ylabel('Concentraction')
plt.xlabel('Raio/Re')
plt.savefig('figures/perfis_circular/gal%s_perfil_concentracao_circ' %(gal))
plt.close()
#perfis elipticos
plt.figure(1)
plt.title(gal)
ax1 = plt.subplot(311)
plt.title('%s - %s' %(gal, tipo))
ax1.errorbar(a_test.a_m/Re, a_test.age_m, yerr=a_test.err_age, fmt='o')
plt.scatter(a_test.a_m/Re, a_test.age_m)
plt.plot(a_test.a_m/Re, a_test.age_m, color='#7e2601',linewidth=1)
plt.ylabel('Mean Age')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(312, sharex=ax1)
plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)),
(a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))])
ax2.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o')
plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1)
plt.ylabel('Mean Halpha')
plt.setp(ax2.get_xticklabels(), visible=False)
ax3 = plt.subplot(313, sharex=ax2)
ax3.errorbar(a_test.a_m/Re, a_test.mass_m, yerr=a_test.err_mass, fmt='.')
plt.plot(a_test.a_m/Re, a_test.mass_m, color='#7e2601',linewidth=1)
plt.ylabel('Mean mass density')
plt.xlabel('Semi-eixo a/Re')
plt.savefig('figures/perfis_eliptico/gal%s_perfis_elip' %(gal))
plt.close(1)
plt.figure()
plt.scatter(a_test.a_m/Re, a_test.conc_a)
plt.plot(a_test.a_m/Re, a_test.conc_a, color='#7e2601',linewidth=1)
plt.title(gal)
plt.ylabel('Concentraction')
plt.xlabel('Semi-eixo a/Re')
plt.savefig('figures/perfis_eliptico/gal%s_perfil_concentracao_elip' %(gal))
plt.close()
mean = [cx,cy]
width = 2*a
height = 2*b
angle = math.degrees(tetha)
ell = mpl.patches.Ellipse(xy=mean, width=width, height=height, angle = 180+angle, alpha=0.2, color='black')
fig, ax = plt.subplots()
ax.add_patch(ell)
ax.autoscale()
df2 = df.ix[(df.a > a/3) & (df.a < (a/3 + 2))]
df3 = df.ix[(df.a > a/2) & (df.a < (a/2 + 2))]
df4 = df.ix[(df.a > a) & (df.a < (a + 2))]
plt.scatter(df.x,df.y, c='red', s=10, alpha=0.7)
plt.scatter(df2.x,df2.y, c='blue')
plt.scatter(df3.x,df3.y, c='purple')
plt.scatter(df4.x, df4.y, c='green')
plt.savefig('figures/ajuste_elipse/gal%s_elipses' %(gal))
plt.close()
|
print('excentricidade = %f' %exc)
print('inclinacao = %f' %(math.degrees(tetha)))
print('#%d' %i_gal)
|
random_line_split
|
|
califa2_2.py
|
0,gal,Conc,ordem):
'''definindo uma funcao para ordenar a propridade de interesse
dividindo-o em bins de igual tamanho e calculando alguns parametros'''
df_Z = pd.DataFrame()
propr = []
err_prop = []
raio = []
err_raio = []
halpha = []
err_halpha = []
dens = []
err_dens = []
idade = []
err_age = []
semia = []
err_semia = []
conc = []
df = df0.sort_values(by=ordem)
df = df.reset_index()
del df['index']
cx, cy = mom.centro_mass(df)
delta = len(df)/50 #Quantidade de bins
j=0
for i in range(0,(len(df)), delta):
df1 = df.ix[i:i+delta,:]
propr.append(df1[ordem].mean())
err_prop.append(df1[ordem].std())
raio.append(df1['raio'].mean())
err_raio.append(df1['raio'].std())
halpha.append(df1['halpha'].mean())
err_halpha.append(df1['halpha'].std())
dens.append(df1['mass'].mean())
err_dens.append(df1['mass'].std())
idade.append(df1['age'].mean())
err_age.append(df1['age'].std())
semia.append(df1['a'].mean())
err_semia.append(df1['a'].std())
conc.append(C(df1))
j=j+1
df_Z[ordem] = propr
df_Z['erro'] = err_prop
df_Z['raio_m'] = raio
df_Z['err_raio'] = err_raio
df_Z['age_m'] = idade
df_Z['err_age'] = err_age
df_Z['mass_m'] = dens
df_Z['err_mass'] = err_dens
df_Z['halpha_m'] = halpha
df_Z['err_halpha'] = err_halpha
df_Z['a_m'] = semia
df_Z['err_a'] = err_semia
df_Z[Conc] = conc
return df_Z
def obtendo_dados(img,param):
'''
|
eitura do arquivo fits, criando um dataframe com os dados'''
df = pd.DataFrame()
nrows, ncols = img.shape
xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )
table = np.column_stack(( xx.flatten(), yy.flatten(), img.flatten() ))
temp = pd.DataFrame(table, columns=['x','y',param])
df = pd.concat([df,temp], axis=1)
return(df)
def plots(df,param1,param2,param3,diretorio):
'''Função para plotar os gráficos'''
plt.figure()
incr = param3*(df.ix[:,0].mean())
plt.xlim([(df.ix[:,0].min()-(incr)),(df.ix[:,0].max()+(incr))])
plt.scatter(df.ix[:,0], df.ix[:,12])
plt.title(gal+' '+tipo, fontsize=30)
plt.ylabel('Concentraction', fontsize=30)
plt.xlabel(param2, fontsize=30)
plt.savefig('figures/%s/gal_%s_concentration_%s' %(diretorio,param1,param2))
plt.close()
# plt.figure()
# plt.title('Distribuicao C(%s)- %s' %(param2,param1))
# df.ix[:,0].hist(bins=100)
# plt.savefig('figures/%s/gal%s_hist_%s' %(diretorio,param1,param2))
# plt.close()
data_dir = '/home/pnovais/Dropbox/DOUTORADO/renew'
age = pd.read_csv('Paty_at_flux__yx/age.csv')
mass = pd.read_csv('PatImages/mass.csv')
halpha = pd.read_csv('Hamaps/halpha.csv')
#halpha = pd.read_csv('Hamaps/teste.csv')
hu1 = []
hu2 = []
hu3 = []
hu4 = []
hu5 = []
hu6 = []
hu7 = []
hugal = []
hutype = []
df_hu = pd.DataFrame()
for i_gal in range(len(halpha)):
#for i_gal in range(0,2):
print(bcolors.FAIL +'-'*79+ bcolors.ENDC)
print(bcolors.FAIL + '-'*33 + 'OBJETO: %s' %halpha['num_gal'][i_gal] + '-'*33 + bcolors.ENDC)
print(bcolors.FAIL +'-'*79+ bcolors.ENDC)
plt.close()
image_ha = fits.open('Hamaps/%s_%s_Ha.fits' %(halpha['num_gal'][i_gal],halpha['type'][i_gal]))
img = get_image(image_ha)
#plotando a imagem fits
plt.figure(1)
plt.clf()
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
plt.axis([0,77,0,72])
plt.xlabel('X',fontweight='bold')
plt.ylabel('Y',fontweight='bold')
imgplot = plt.imshow(100*np.log10(img/255), cmap=cx)
titulo='Halpha Maps - Galaxy %s ' %halpha['num_gal'][i_gal]
plt.title(titulo)
#plt.colorbar()
figura = 'figures/imagens_Ha/galaxy_%s' %halpha['num_gal'][i_gal]
plt.savefig(figura)
#obtendo os dados de Halpha da imagem fits
df_ha = obtendo_dados(img,'halpha')
#obtendo os dados de densidade de massa da imagem fits
image_mass = fits.open('PatImages/PatImagesMcorSD__yx_%s.fits' %halpha['num_gal'][i_gal])
img = get_image(image_mass)
df_mass = obtendo_dados(img, 'mass')
#obtendo os dados de idade da imagem fits
image_age = fits.open('Paty_at_flux__yx/at_flux__yx_%s.fits' %halpha['num_gal'][i_gal])
img = get_image(image_age)
df_age = obtendo_dados(img, 'age')
#selecionando apenas os dados de idade > 0 e mass > 0
df0 = pd.merge(df_age,df_mass)
df1 = pd.merge(df0,df_ha, how='inner')
df = df1[(df1.age > 0.0) & (df1.mass > 0.0) & (df1.halpha > 0.0)]
Re = mom.equivalent_radius(df)
cx, cy = mom.centro_mass(df)
tetha, exc, a, b = mom.param_elipse(df)
df['raio'] = np.sqrt((df['x'] - cx)**2 + (df['y'] - cy)**2)
acres = math.radians(180)
d = ((df['x'] - cx)*np.cos(tetha) + (df['y'] - cy)*np.sin(-tetha+acres))**2
e = ((df['x'] - cx)*np.sin(tetha) + (df['y'] - cy)*np.cos(-tetha+acres))**2
df['a'] = np.sqrt(d + e/((1-exc)**2))
gal = halpha['num_gal'][i_gal]
tipo = halpha['type'][i_gal]
age_test = Z(df,gal,'conc_age','age')
mass_test = Z(df,gal,'conc_mass','mass')
ha_test = Z(df,gal,'conc_ha','halpha')
raio_test = Z(df,gal,'conc_raio', 'raio')
a_test = Z(df,gal, 'conc_a', 'a')
plots(age_test,gal,'Age',0,'concentracao')
plots(mass_test,gal,'Mass_density',1,'concentracao')
plots(ha_test,gal,'Halpha',1,'concentracao')
#perfis circulares
plt.figure(1)
plt.title(gal)
ax1 = plt.subplot(311)
plt.title('%s - %s' %(gal, tipo))
ax1.errorbar(raio_test.raio_m/Re, raio_test.age_m, yerr=raio_test.err_age, fmt='o')
plt.plot(raio_test.raio_m/Re, raio_test.age_m, color='#7e2601',linewidth=1)
plt.ylabel('Mean Age')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(312, sharex=ax1)
plt.ylim([(raio_test.halpha_m.min()-(raio_test.err_halpha.max() + 1e-17)),
(raio_test.halpha_m.max()+(raio_test.err_halpha.max() + 1e-17))])
|
função para l
|
identifier_name
|
califa2_2.py
|
,gal,Conc,ordem):
'''definindo uma funcao para ordenar a propridade de interesse
dividindo-o em bins de igual tamanho e calculando alguns parametros'''
df_Z = pd.DataFrame()
propr = []
err_prop = []
raio = []
err_raio = []
halpha = []
err_halpha = []
dens = []
err_dens = []
idade = []
err_age = []
semia = []
err_semia = []
conc = []
df = df0.sort_values(by=ordem)
df = df.reset_index()
del df['index']
cx, cy = mom.centro_mass(df)
delta = len(df)/50 #Quantidade de bins
j=0
for i in range(0,(len(df)), delta):
df1 = df.ix[i:i+delta,:]
propr.append(df1[ordem].mean())
err_prop.append(df1[ordem].std())
raio.append(df1['raio'].mean())
err_raio.append(df1['raio'].std())
halpha.append(df1['halpha'].mean())
err_halpha.append(df1['halpha'].std())
dens.append(df1['mass'].mean())
err_dens.append(df1['mass'].std())
idade.append(df1['age'].mean())
err_age.append(df1['age'].std())
semia.append(df1['a'].mean())
err_semia.append(df1['a'].std())
conc.append(C(df1))
j=j+1
df_Z[ordem] = propr
df_Z['erro'] = err_prop
df_Z['raio_m'] = raio
df_Z['err_raio'] = err_raio
df_Z['age_m'] = idade
df_Z['err_age'] = err_age
df_Z['mass_m'] = dens
df_Z['err_mass'] = err_dens
df_Z['halpha_m'] = halpha
df_Z['err_halpha'] = err_halpha
df_Z['a_m'] = semia
df_Z['err_a'] = err_semia
df_Z[Conc] = conc
return df_Z
def obtendo_dados(img,param):
'''função para leitura do arquivo fits, criando um dataframe com os dados'''
df = pd.DataFrame()
nrows, ncols = img.shape
xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )
table = np.column_stack(( xx.flatten(), yy.flatten(), img.flatten() ))
temp = pd.DataFrame(table, columns=['x','y',param])
df = pd.concat([df,temp], axis=1)
return(df)
def plots(df,param1,param2,param3,diretorio):
'''Função para plotar os gráficos'''
plt.figure()
incr = param3*(df.ix[:,0].mean())
plt.xlim([(df.ix[:,0].min()-(incr)),(df.ix[:,0].max()+(incr))])
plt.scatter(df.ix[:,0], df.ix[:,12])
plt.title(gal+' '+tipo, fontsize=30)
plt.ylabel('Concentraction', fontsize=30)
plt.xlabel(param2, fontsize=30)
plt.savefig('figures/%s/gal_%s_concentration_%s' %(diretorio,param1,param2))
plt.close()
# plt.figure()
# plt.title('Distribuicao C(%s)- %s' %(param2,param1))
# df.ix[:,0].hist(bins=100)
# plt.savefig('figures/%s/gal%s_hist_%s' %(diretorio,param1,param2))
# plt.close()
data_dir = '/home/pnovais/Dropbox/DOUTORADO/renew'
age = pd.read_csv('Paty_at_flux__yx/age.csv')
mass = pd.read_csv('PatImages/mass.csv')
halpha = pd.read_csv('Hamaps/halpha.csv')
#halpha = pd.read_csv('Hamaps/teste.csv')
hu1 = []
hu2 = []
hu3 = []
hu4 = []
hu5 = []
hu6 = []
hu7 = []
hugal = []
hutype = []
df_hu = pd.DataFrame()
for i_gal in range(len(halpha)):
#for i_gal in range(0,2):
print(bcolors.FAIL +'-'*79+ bcolors.EN
|
#obtendo os dados de Halpha da imagem fits
df_ha = obtendo_dados(img,'halpha')
#obtendo os dados de densidade de massa da imagem fits
image_mass = fits.open('PatImages/PatImagesMcorSD__yx_%s.fits' %halpha['num_gal'][i_gal])
img = get_image(image_mass)
df_mass = obtendo_dados(img, 'mass')
#obtendo os dados de idade da imagem fits
image_age = fits.open('Paty_at_flux__yx/at_flux__yx_%s.fits' %halpha['num_gal'][i_gal])
img = get_image(image_age)
df_age = obtendo_dados(img, 'age')
#selecionando apenas os dados de idade > 0 e mass > 0
df0 = pd.merge(df_age,df_mass)
df1 = pd.merge(df0,df_ha, how='inner')
df = df1[(df1.age > 0.0) & (df1.mass > 0.0) & (df1.halpha > 0.0)]
Re = mom.equivalent_radius(df)
cx, cy = mom.centro_mass(df)
tetha, exc, a, b = mom.param_elipse(df)
df['raio'] = np.sqrt((df['x'] - cx)**2 + (df['y'] - cy)**2)
acres = math.radians(180)
d = ((df['x'] - cx)*np.cos(tetha) + (df['y'] - cy)*np.sin(-tetha+acres))**2
e = ((df['x'] - cx)*np.sin(tetha) + (df['y'] - cy)*np.cos(-tetha+acres))**2
df['a'] = np.sqrt(d + e/((1-exc)**2))
gal = halpha['num_gal'][i_gal]
tipo = halpha['type'][i_gal]
age_test = Z(df,gal,'conc_age','age')
mass_test = Z(df,gal,'conc_mass','mass')
ha_test = Z(df,gal,'conc_ha','halpha')
raio_test = Z(df,gal,'conc_raio', 'raio')
a_test = Z(df,gal, 'conc_a', 'a')
plots(age_test,gal,'Age',0,'concentracao')
plots(mass_test,gal,'Mass_density',1,'concentracao')
plots(ha_test,gal,'Halpha',1,'concentracao')
#perfis circulares
plt.figure(1)
plt.title(gal)
ax1 = plt.subplot(311)
plt.title('%s - %s' %(gal, tipo))
ax1.errorbar(raio_test.raio_m/Re, raio_test.age_m, yerr=raio_test.err_age, fmt='o')
plt.plot(raio_test.raio_m/Re, raio_test.age_m, color='#7e2601',linewidth=1)
plt.ylabel('Mean Age')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(312, sharex=ax1)
plt.ylim([(raio_test.halpha_m.min()-(raio_test.err_halpha.max() + 1e-17)),
(raio_test.halpha_m.max()+(raio_test.err_halpha.max() + 1e-17))])
ax
|
DC)
print(bcolors.FAIL + '-'*33 + 'OBJETO: %s' %halpha['num_gal'][i_gal] + '-'*33 + bcolors.ENDC)
print(bcolors.FAIL +'-'*79+ bcolors.ENDC)
plt.close()
image_ha = fits.open('Hamaps/%s_%s_Ha.fits' %(halpha['num_gal'][i_gal],halpha['type'][i_gal]))
img = get_image(image_ha)
#plotando a imagem fits
plt.figure(1)
plt.clf()
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
plt.axis([0,77,0,72])
plt.xlabel('X',fontweight='bold')
plt.ylabel('Y',fontweight='bold')
imgplot = plt.imshow(100*np.log10(img/255), cmap=cx)
titulo='Halpha Maps - Galaxy %s ' %halpha['num_gal'][i_gal]
plt.title(titulo)
#plt.colorbar()
figura = 'figures/imagens_Ha/galaxy_%s' %halpha['num_gal'][i_gal]
plt.savefig(figura)
|
conditional_block
|
modbase.py
|
url += '&' + i +'='+ search_options[i]
# the dataset
# if not 'dataset' in search_options.keys() and dataset:
if dataset:
url += '&dataset=' + dataset
# go get the results
print 'obtaining model results from:\n\t' + url
raw_stream = urllib2.urlopen( url + '&type=model' )
print 'finished downloading models, summarizing the results...'
# parse the results
results = xml_parse( raw_stream )
# check if empty
if not len( results.toxml() ) > 100: # ahhh! I hate arbitrary numbers!!!
print 'no models exist in ModBase for this protein...'
return {}
# get the ids
#ids = get_str_from_xml_tag( results , 'model_id' )
# no need, in the header of the model
# get the models
models = get_str_from_xml_tag( results , 'content' )
# extract the details
details , text = get_modbase_model_details( models , display or write_summary , export = True )
# defaults for writing files
if not root_filename:
root_filename = 'modbase_' + query
# optionally write the models
if out_directory:
create_directory( out_directory , ' to store the models as PDB files' )
print 'writing the downloaded models to ' + out_directory
count = 1
filenames = []
for i in models:
# write it
filename = out_directory + '/' + root_filename + '_model_' + str( count ) + '.pdb'
filenames.append( os.path.abspath( filename ) )
# write the alignment
f = open( filename , 'w' )
f.write( i.strip() )
f.close()
count += 1
# change this in this case
models = filenames
# SOOO HACKY!!!!
# for later safety...
out_directory += '/'
# optionally grab the alignment too
if get_alignment:
print 'also downloading the alignments...'
raw_aln_stream = urllib2.urlopen( url + '&type=alignment' )
# parse the results
aln_results = xml_parse( raw_aln_stream )
# get the files
aln_results = aln_results.getElementsByTagName( 'alignmentfile' )
# ...for now, just get the text itself
# don't worry about the other details in the XML file
print 'writing the alignments as PIR files...'
count = 1
for i in aln_results:
i = get_str_from_xml_tag( i , 'content' )[0] # just 1, always the first
# if out_directory is empty...this will just do as we want
filename = out_directory + root_filename + '_model_' + str( count ) + '_alignment.pir'
f = open( filename , 'w' )
f.write( i )
f.close()
# convert them?
# doesn't seem to load these "pir" files...? :(
# save in the details?
details[count - 1]['alignment'] = i
count += 1
# put the models (filenames) into details...cleaner output, just 1 dict
for i in xrange( len( models ) ):
details[i]['coordinates'] = models[i]
# find the "best" model
temp = '\nevaluating the \"best\" model by comparing:\n\t1. sequence identity\n\t2. model score\n\t3. target length'
print temp
text += temp +'\n'
best_score = max( [i['sequence identity'] for i in details] )
matches = [i for i in details if i['sequence identity'] == best_score]
if len( matches ) > 1 and sum( [not i['model score'] == matches[0]['model score'] for i in matches[1:]] ):
# find the best model score
best_score = max( [i['model score'] for i in details] )
matches = [i for i in details if i['model score'] == best_score]
if len( matches ) > 1 and sum( [not i['target length'] == matches[0]['target length'] for i in matches[1:]] ):
|
# debug output
if len( matches ) > 1:
temp = 'multiple models are \"equally the best\":'
print temp
text += temp +'\n'
for i in matches:
temp = '\t'+ i['coordinates']
print temp
text += temp +'\n'
temp = 'copying the first on to best_model.pdb'
print temp
text += temp +'\n'
else:
temp = 'best model: ' + matches[0]['coordinates']
print temp
text += temp
# move it to a indicative filename
copy_file( matches[0]['coordinates'] , out_directory + '/best_model.pdb' )
# optionally write a summary file
if write_summary:
# if out_directory is empty...this will just do as we want
filename = out_directory + root_filename + '_summary.txt'
f = open( filename , 'w' )
f.write( text )
f.close()
# just the details, has everything else...
return details
# very hacky wrapper
def get_str_from_xml_tag( xml_object , tag ):
"""
So...I don't have time to learn proper XML parsing with the Python "xml"
library right now and this approach works...so yeah
simply return a list of str for the target <tag> in <xml_object>
"""
# get it
results = xml_object.getElementsByTagName( tag )
# convert to string
L = len( tag )
results = [i.toxml()[L + 2:-(L + 3)].strip() for i in results]
return results
# useful simple text parsing
def extract_model_details_from_modbase_header( modbase_model_text ):
"""
Returns a dict of the model details from <modbase_model_text>
this includes the PDB template, coverage details (always continuous),
and alignment/modeling details
"""
# setup defaults, cleaner display
details = {
'model' : '?' ,
'organism' : '?' ,
'experiment' : '?' ,
'method' : '?' ,
'program' : '?' ,
'sequence identity' : 0 ,
'model score' : 0 ,
'evalue' : 0 ,
'template' : '?' ,
'template chain' : '?' ,
'template coverage' : [] ,
'target length record' : 0 ,
'target coverage' : [] ,
'template length' : 0 ,
'target length' : 0 ,
'ModPipe run' : '?' ,
'modelID' : '?' ,
'alignmentID' : '?'
}
# over the lines
for i in modbase_model_text.split( '\n' ):
if i[:4] == 'ATOM':
# done! end of the header
break
elif i[:6] == 'HEADER':
details['model'] = str( i.replace( 'HEADER' , '' ).strip() )
#elif i[:5] == 'TITLE': # ...uh, the ones I looked at, this was useless...
elif i[:6] == 'SOURCE':
details['organism'] = str( i.replace( 'SOURCE' , '' ).strip() )
#elif i[:6] == 'AUTHOR': # don't care about authors for now...
elif i[:10] == 'REMARK 220':
i = str( i.replace( 'REMARK 220' , '' ).strip() )
# keep sorting...
if i[:16] == 'EXPERIMENT TYPE:':
details['experiment'] = str( i.replace( 'EXPERIMENT TYPE:' , '' ).strip() ).capitalize()
elif i[:7] == 'METHOD:':
details['method'] = str( i.replace( 'METHOD:' , '' ).strip() ).capitalize()
elif i[:8] == 'PROGRAM:':
details['program'] = str( i.replace( 'PROGRAM:' , '' ).strip() )
elif i[:18] == 'SEQUENCE IDENTITY:':
# as fraction please
details['sequence identity'] = float( i.replace( 'SEQUENCE IDENTITY:' , '' ).strip() )/100
elif i[:12] == 'MODEL SCORE:':
# as float
details['model score'] = float( i.replace( 'MODEL SCORE:' , '' ).strip() )
elif i[:7] == 'EVALUE:':
# as float
details['evalue'] = float( i.replace( 'EVALUE:' , '' ).strip() )
elif i[:13] == 'TEMPLATE PDB:':
details['template'] = str( i.replace( 'TEMPLATE PDB:' , '' ).strip
|
best_score = max( [i['target length'] for i in details] )
matches = [i for i in details if i['target length'] == best_score]
|
conditional_block
|
modbase.py
|
( dir_name , tagline = ' to sort the data' ):
"""
Creates the directory <dir_name>
WARNING: this will delete the directory and its contents if it already
exists!
Optionally output something special in <tagline>
"""
# check if it exists
print 'Creating a new directory ' + os.path.relpath( dir_name ) + tagline
if os.path.isdir( dir_name ):
print 'a directory named ' + os.path.relpath( dir_name ) + ' already exists, deleting it now...'
shutil.rmtree( dir_name )
os.mkdir( dir_name )
# copy helper
def copy_file( filename , destination , display = False ):
"""
Copy <filename> to/into <destination>
just a cp wrapper...what?
"""
if display: # optional
if os.path.isdir( destination ):
print 'placing a copy of ' + os.path.relpath( filename ) + ' into the ' + os.path.relpath( destination ) + ' directory'
elif os.path.isfile( destination ):
print 'copying ' + os.path.relpath( filename ) + ' to ' + os.path.relpath( destination )
shutil.copy( filename , destination )
################################################################################
# METHODS
# woohoo!
def download_models_from_modbase( query ,
out_directory = 'modbase_models' , root_filename = '' ,
dataset = '' , get_alignment = True , write_summary = True ,
display = True ):
"""
REQUIRES INTERNET CONNECTION
Returns "details" on the models for <query> in ModBase
write results to <out_directory> with the base <root_filename>
Optionally request models from a specific <dataset>
Optionally <get_alingment> too (as PIR file)
Optionally <display> a summary of the results
Optionally <write_summary> of the models (human readable, also displays)
ModBase documentation claims that the interface can accept:
databaseID database ID, let's use UniProt
dataset a particular ModBase run?
modelID same?
seqID same?
dataset the ModWeb JobID...
type "model" or "alignment", this method handles this
and that any of the first 4 is enough to identify the target (?)
...for simplicity, let's just look using UniProt IDs as "databaseIDs"
apparently to use "non-public" access additional work must be done
(something about a "cookies.txt" file, though this seems specific to "wget",
may be able to pass in user/password as "modbase_user" and "modbase_passwd")
uses xml.dom.minidom to parse the HTML returned...this may not be kosher...
but it works...and is easier than using htmllib or sgmllib...(?)
"""
# url
url = 'http://salilab.org/modbase/retrieve/modbase'
# format the search query
print 'searching modbase for \"' + query +'\"'
url += '?databaseID=' + query
# currently unused...so why put it here?
#for i in search_options.keys():
# url += '&' + i +'='+ search_options[i]
# the dataset
# if not 'dataset' in search_options.keys() and dataset:
if dataset:
url += '&dataset=' + dataset
# go get the results
print 'obtaining model results from:\n\t' + url
raw_stream = urllib2.urlopen( url + '&type=model' )
print 'finished downloading models, summarizing the results...'
# parse the results
results = xml_parse( raw_stream )
# check if empty
if not len( results.toxml() ) > 100: # ahhh! I hate arbitrary numbers!!!
print 'no models exist in ModBase for this protein...'
return {}
# get the ids
#ids = get_str_from_xml_tag( results , 'model_id' )
# no need, in the header of the model
# get the models
models = get_str_from_xml_tag( results , 'content' )
# extract the details
details , text = get_modbase_model_details( models , display or write_summary , export = True )
# defaults for writing files
if not root_filename:
root_filename = 'modbase_' + query
# optionally write the models
if out_directory:
create_directory( out_directory , ' to store the models as PDB files' )
print 'writing the downloaded models to ' + out_directory
count = 1
filenames = []
for i in models:
# write it
filename = out_directory + '/' + root_filename + '_model_' + str( count ) + '.pdb'
filenames.append( os.path.abspath( filename ) )
# write the alignment
f = open( filename , 'w' )
f.write( i.strip() )
f.close()
count += 1
# change this in this case
models = filenames
# SOOO HACKY!!!!
# for later safety...
out_directory += '/'
# optionally grab the alignment too
if get_alignment:
print 'also downloading the alignments...'
raw_aln_stream = urllib2.urlopen( url + '&type=alignment' )
# parse the results
aln_results = xml_parse( raw_aln_stream )
# get the files
aln_results = aln_results.getElementsByTagName( 'alignmentfile' )
# ...for now, just get the text itself
# don't worry about the other details in the XML file
print 'writing the alignments as PIR files...'
count = 1
for i in aln_results:
i = get_str_from_xml_tag( i , 'content' )[0] # just 1, always the first
# if out_directory is empty...this will just do as we want
filename = out_directory + root_filename + '_model_' + str( count ) + '_alignment.pir'
f = open( filename , 'w' )
f.write( i )
f.close()
# convert them?
# doesn't seem to load these "pir" files...? :(
# save in the details?
details[count - 1]['alignment'] = i
count += 1
# put the models (filenames) into details...cleaner output, just 1 dict
for i in xrange( len( models ) ):
details[i]['coordinates'] = models[i]
# find the "best" model
temp = '\nevaluating the \"best\" model by comparing:\n\t1. sequence identity\n\t2. model score\n\t3. target length'
print temp
text += temp +'\n'
best_score = max( [i['sequence identity'] for i in details] )
matches = [i for i in details if i['sequence identity'] == best_score]
if len( matches ) > 1 and sum( [not i['model score'] == matches[0]['model score'] for i in matches[1:]] ):
# find the best model score
best_score = max( [i['model score'] for i in details] )
matches = [i for i in details if i['model score'] == best_score]
if len( matches ) > 1 and sum( [not i['target length'] == matches[0]['target length'] for i in matches[1:]] ):
best_score = max( [i['target length'] for i in details] )
matches = [i for i in details if i['target length'] == best_score]
# debug output
if len( matches ) > 1:
temp = 'multiple models are \"equally the best\":'
print temp
text += temp +'\n'
for i in matches:
temp = '\t'+ i['coordinates']
print temp
text += temp +'\n'
temp = 'copying the first on to best_model.pdb'
print temp
text += temp +'\n'
else:
temp = 'best model: ' + matches[0]['coordinates']
print temp
text += temp
# move it to a indicative filename
copy_file( matches[0]['coordinates'] , out_directory + '/best_model.pdb' )
# optionally write a summary file
if write_summary:
# if out_directory is empty...this will just do as we want
filename = out_directory + root_filename + '_summary.txt'
f = open( filename , 'w' )
f.write( text )
f.close()
# just the details, has everything else...
return details
# very hacky wrapper
def get_str_from_xml_tag( xml_object , tag ):
"""
So...I don't have time to learn proper XML parsing with the Python "xml"
library right now and this approach works...so yeah
simply return a list of str for the target <tag> in <xml_object>
"""
# get it
results = xml_object.getElementsByTagName( tag )
# convert to string
L = len( tag )
results = [i.toxml()[L +
|
create_directory
|
identifier_name
|
|
modbase.py
|
# url += '&' + i +'='+ search_options[i]
# the dataset
# if not 'dataset' in search_options.keys() and dataset:
if dataset:
url += '&dataset=' + dataset
# go get the results
print 'obtaining model results from:\n\t' + url
raw_stream = urllib2.urlopen( url + '&type=model' )
print 'finished downloading models, summarizing the results...'
# parse the results
results = xml_parse( raw_stream )
# check if empty
if not len( results.toxml() ) > 100: # ahhh! I hate arbitrary numbers!!!
print 'no models exist in ModBase for this protein...'
return {}
# get the ids
#ids = get_str_from_xml_tag( results , 'model_id' )
# no need, in the header of the model
# get the models
models = get_str_from_xml_tag( results , 'content' )
# extract the details
details , text = get_modbase_model_details( models , display or write_summary , export = True )
# defaults for writing files
if not root_filename:
root_filename = 'modbase_' + query
# optionally write the models
if out_directory:
create_directory( out_directory , ' to store the models as PDB files' )
print 'writing the downloaded models to ' + out_directory
count = 1
filenames = []
for i in models:
# write it
filename = out_directory + '/' + root_filename + '_model_' + str( count ) + '.pdb'
filenames.append( os.path.abspath( filename ) )
# write the alignment
f = open( filename , 'w' )
f.write( i.strip() )
f.close()
count += 1
# change this in this case
models = filenames
# SOOO HACKY!!!!
# for later safety...
out_directory += '/'
# optionally grab the alignment too
if get_alignment:
print 'also downloading the alignments...'
raw_aln_stream = urllib2.urlopen( url + '&type=alignment' )
# parse the results
aln_results = xml_parse( raw_aln_stream )
# get the files
aln_results = aln_results.getElementsByTagName( 'alignmentfile' )
# ...for now, just get the text itself
# don't worry about the other details in the XML file
print 'writing the alignments as PIR files...'
count = 1
for i in aln_results:
i = get_str_from_xml_tag( i , 'content' )[0] # just 1, always the first
# if out_directory is empty...this will just do as we want
filename = out_directory + root_filename + '_model_' + str( count ) + '_alignment.pir'
f = open( filename , 'w' )
f.write( i )
f.close()
# convert them?
# doesn't seem to load these "pir" files...? :(
# save in the details?
details[count - 1]['alignment'] = i
count += 1
# put the models (filenames) into details...cleaner output, just 1 dict
for i in xrange( len( models ) ):
details[i]['coordinates'] = models[i]
# find the "best" model
temp = '\nevaluating the \"best\" model by comparing:\n\t1. sequence identity\n\t2. model score\n\t3. target length'
print temp
text += temp +'\n'
best_score = max( [i['sequence identity'] for i in details] )
matches = [i for i in details if i['sequence identity'] == best_score]
if len( matches ) > 1 and sum( [not i['model score'] == matches[0]['model score'] for i in matches[1:]] ):
# find the best model score
best_score = max( [i['model score'] for i in details] )
matches = [i for i in details if i['model score'] == best_score]
if len( matches ) > 1 and sum( [not i['target length'] == matches[0]['target length'] for i in matches[1:]] ):
best_score = max( [i['target length'] for i in details] )
matches = [i for i in details if i['target length'] == best_score]
# debug output
if len( matches ) > 1:
temp = 'multiple models are \"equally the best\":'
print temp
text += temp +'\n'
for i in matches:
temp = '\t'+ i['coordinates']
print temp
text += temp +'\n'
temp = 'copying the first on to best_model.pdb'
print temp
text += temp +'\n'
else:
temp = 'best model: ' + matches[0]['coordinates']
print temp
text += temp
# move it to a indicative filename
copy_file( matches[0]['coordinates'] , out_directory + '/best_model.pdb' )
# optionally write a summary file
if write_summary:
# if out_directory is empty...this will just do as we want
filename = out_directory + root_filename + '_summary.txt'
f = open( filename , 'w' )
f.write( text )
f.close()
# just the details, has everything else...
return details
# very hacky wrapper
def get_str_from_xml_tag( xml_object , tag ):
"""
So...I don't have time to learn proper XML parsing with the Python "xml"
library right now and this approach works...so yeah
simply return a list of str for the target <tag> in <xml_object>
"""
# get it
results = xml_object.getElementsByTagName( tag )
# convert to string
L = len( tag )
results = [i.toxml()[L + 2:-(L + 3)].strip() for i in results]
return results
# useful simple text parsing
def extract_model_details_from_modbase_header( modbase_model_text ):
"""
Returns a dict of the model details from <modbase_model_text>
this includes the PDB template, coverage details (always continuous),
and alignment/modeling details
"""
# setup defaults, cleaner display
details = {
'model' : '?' ,
'organism' : '?' ,
'experiment' : '?' ,
'method' : '?' ,
'program' : '?' ,
'sequence identity' : 0 ,
'model score' : 0 ,
'evalue' : 0 ,
'template' : '?' ,
'template chain' : '?' ,
'template coverage' : [] ,
'target length record' : 0 ,
'target coverage' : [] ,
'template length' : 0 ,
'target length' : 0 ,
'ModPipe run' : '?' ,
'modelID' : '?' ,
'alignmentID' : '?'
}
# over the lines
for i in modbase_model_text.split( '\n' ):
if i[:4] == 'ATOM':
# done! end of the header
break
elif i[:6] == 'HEADER':
details['model'] = str( i.replace( 'HEADER' , '' ).strip() )
#elif i[:5] == 'TITLE': # ...uh, the ones I looked at, this was useless...
elif i[:6] == 'SOURCE':
details['organism'] = str( i.replace( 'SOURCE' , '' ).strip() )
#elif i[:6] == 'AUTHOR': # don't care about authors for now...
elif i[:10] == 'REMARK 220':
i = str( i.replace( 'REMARK 220' , '' ).strip() )
# keep sorting...
if i[:16] == 'EXPERIMENT TYPE:':
details['experiment'] = str( i.replace( 'EXPERIMENT TYPE:' , '' ).strip() ).capitalize()
elif i[:7] == 'METHOD:':
details['method'] = str( i.replace( 'METHOD:' , '' ).strip() ).capitalize()
elif i[:8] == 'PROGRAM:':
details['program'] = str( i.replace( 'PROGRAM:' , '' ).strip() )
elif i[:18] == 'SEQUENCE IDENTITY:':
# as fraction please
details['sequence identity'] = float( i.replace( 'SEQUENCE IDENTITY:' , '' ).strip() )/100
elif i[:12] == 'MODEL SCORE:':
# as float
|
# as float
details['evalue'] = float( i.replace( 'EVALUE:' , '' ).strip() )
elif i[:13] == 'TEMPLATE PDB:':
details['template'] = str( i.replace( 'TEMPLATE PDB:' , '' ).strip
|
details['model score'] = float( i.replace( 'MODEL SCORE:' , '' ).strip() )
elif i[:7] == 'EVALUE:':
|
random_line_split
|
modbase.py
|
_from_xml_tag( i , 'content' )[0] # just 1, always the first
# if out_directory is empty...this will just do as we want
filename = out_directory + root_filename + '_model_' + str( count ) + '_alignment.pir'
f = open( filename , 'w' )
f.write( i )
f.close()
# convert them?
# doesn't seem to load these "pir" files...? :(
# save in the details?
details[count - 1]['alignment'] = i
count += 1
# put the models (filenames) into details...cleaner output, just 1 dict
for i in xrange( len( models ) ):
details[i]['coordinates'] = models[i]
# find the "best" model
temp = '\nevaluating the \"best\" model by comparing:\n\t1. sequence identity\n\t2. model score\n\t3. target length'
print temp
text += temp +'\n'
best_score = max( [i['sequence identity'] for i in details] )
matches = [i for i in details if i['sequence identity'] == best_score]
if len( matches ) > 1 and sum( [not i['model score'] == matches[0]['model score'] for i in matches[1:]] ):
# find the best model score
best_score = max( [i['model score'] for i in details] )
matches = [i for i in details if i['model score'] == best_score]
if len( matches ) > 1 and sum( [not i['target length'] == matches[0]['target length'] for i in matches[1:]] ):
best_score = max( [i['target length'] for i in details] )
matches = [i for i in details if i['target length'] == best_score]
# debug output
if len( matches ) > 1:
temp = 'multiple models are \"equally the best\":'
print temp
text += temp +'\n'
for i in matches:
temp = '\t'+ i['coordinates']
print temp
text += temp +'\n'
temp = 'copying the first on to best_model.pdb'
print temp
text += temp +'\n'
else:
temp = 'best model: ' + matches[0]['coordinates']
print temp
text += temp
# move it to a indicative filename
copy_file( matches[0]['coordinates'] , out_directory + '/best_model.pdb' )
# optionally write a summary file
if write_summary:
# if out_directory is empty...this will just do as we want
filename = out_directory + root_filename + '_summary.txt'
f = open( filename , 'w' )
f.write( text )
f.close()
# just the details, has everything else...
return details
# very hacky wrapper
def get_str_from_xml_tag( xml_object , tag ):
"""
So...I don't have time to learn proper XML parsing with the Python "xml"
library right now and this approach works...so yeah
simply return a list of str for the target <tag> in <xml_object>
"""
# get it
results = xml_object.getElementsByTagName( tag )
# convert to string
L = len( tag )
results = [i.toxml()[L + 2:-(L + 3)].strip() for i in results]
return results
# useful simple text parsing
def extract_model_details_from_modbase_header( modbase_model_text ):
"""
Returns a dict of the model details from <modbase_model_text>
this includes the PDB template, coverage details (always continuous),
and alignment/modeling details
"""
# setup defaults, cleaner display
details = {
'model' : '?' ,
'organism' : '?' ,
'experiment' : '?' ,
'method' : '?' ,
'program' : '?' ,
'sequence identity' : 0 ,
'model score' : 0 ,
'evalue' : 0 ,
'template' : '?' ,
'template chain' : '?' ,
'template coverage' : [] ,
'target length record' : 0 ,
'target coverage' : [] ,
'template length' : 0 ,
'target length' : 0 ,
'ModPipe run' : '?' ,
'modelID' : '?' ,
'alignmentID' : '?'
}
# over the lines
for i in modbase_model_text.split( '\n' ):
if i[:4] == 'ATOM':
# done! end of the header
break
elif i[:6] == 'HEADER':
details['model'] = str( i.replace( 'HEADER' , '' ).strip() )
#elif i[:5] == 'TITLE': # ...uh, the ones I looked at, this was useless...
elif i[:6] == 'SOURCE':
details['organism'] = str( i.replace( 'SOURCE' , '' ).strip() )
#elif i[:6] == 'AUTHOR': # don't care about authors for now...
elif i[:10] == 'REMARK 220':
i = str( i.replace( 'REMARK 220' , '' ).strip() )
# keep sorting...
if i[:16] == 'EXPERIMENT TYPE:':
details['experiment'] = str( i.replace( 'EXPERIMENT TYPE:' , '' ).strip() ).capitalize()
elif i[:7] == 'METHOD:':
details['method'] = str( i.replace( 'METHOD:' , '' ).strip() ).capitalize()
elif i[:8] == 'PROGRAM:':
details['program'] = str( i.replace( 'PROGRAM:' , '' ).strip() )
elif i[:18] == 'SEQUENCE IDENTITY:':
# as fraction please
details['sequence identity'] = float( i.replace( 'SEQUENCE IDENTITY:' , '' ).strip() )/100
elif i[:12] == 'MODEL SCORE:':
# as float
details['model score'] = float( i.replace( 'MODEL SCORE:' , '' ).strip() )
elif i[:7] == 'EVALUE:':
# as float
details['evalue'] = float( i.replace( 'EVALUE:' , '' ).strip() )
elif i[:13] == 'TEMPLATE PDB:':
details['template'] = str( i.replace( 'TEMPLATE PDB:' , '' ).strip().upper() )
elif i[:15] == 'TEMPLATE CHAIN:':
details['template chain'] = str( i.replace( 'TEMPLATE CHAIN:' , '' ).strip() )
elif i[:15] == 'TEMPLATE BEGIN:':
details['template coverage'].append( int( i.replace( 'TEMPLATE BEGIN:' , '' ).strip() ) )
elif i[:13] == 'TEMPLATE END:':
details['template coverage'].append( int( i.replace( 'TEMPLATE END:' , '' ).strip() ) )
elif i[:14] == 'TARGET LENGTH:':
details['target length record'] = int( i.replace( 'TARGET LENGTH:' , '' ).strip() )
elif i[:13] == 'TARGET BEGIN:':
details['target coverage'].append( int( i.replace( 'TARGET BEGIN:' , '' ).strip() ) )
elif i[:11] == 'TARGET END:':
details['target coverage'].append( int( i.replace( 'TARGET END:' , '' ).strip() ) )
elif i[:12] == 'MODPIPE RUN:':
details['ModPipe run'] = str( i.replace( 'MODPIPE RUN:' , '' ).strip() )
elif i[:17] == 'MODPIPE MODEL ID:':
details['modelID'] = str( i.replace( 'MODPIPE MODEL ID:' , '' ).strip() )
elif i[:21] == 'MODPIPE ALIGNMENT ID:':
details['alignmentID'] = str( i.replace( 'MODPIPE ALIGNMENT ID:' , '' ).strip() )
# for own sanity
details['template coverage'].sort()
details['template length'] = details['template coverage'][1] - details['template coverage'][0] + 1
details['target coverage'].sort()
details['target length'] = details['target coverage'][1] - details['target coverage'][0] + 1
return details
# silly interactive method
def display_modbase_model_details( details , include_run_details = False , export = False ):
|
"""
Displays a summary of the ModBase model <details>
Optionally <include_run_details>
Optionally <export> the summary text
"""
# check the input
if isinstance( details , str ):
# assume it just needs to be parsed out
details = extract_model_details_from_modbase_header( details )
# exit condition
if 'FAIL' in details.keys():
text = details['model'] +'\n'
text += 'FAILED: ' + details['FAIL'] +'\n'
print text[:-1]
return text[:-1]
|
identifier_body
|
|
dir.go
|
d.cache.NoteDir(now, name)
}
// Return everything we learned.
out = append(out, filteredSlice...)
return
}
////////////////////////////////////////////////////////////////////////
// Public interface
////////////////////////////////////////////////////////////////////////
func (d *dirInode) Lock() {
d.mu.Lock()
}
func (d *dirInode) Unlock() {
d.mu.Unlock()
}
func (d *dirInode) ID() fuseops.InodeID {
return d.id
}
func (d *dirInode) Name() Name {
return d.name
}
// LOCKS_REQUIRED(d)
func (d *dirInode) IncrementLookupCount() {
d.lc.Inc()
}
// LOCKS_REQUIRED(d)
func (d *dirInode) DecrementLookupCount(n uint64) (destroy bool) {
destroy = d.lc.Dec(n)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) Destroy() (err error) {
// Nothing interesting to do.
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) Attributes(
ctx context.Context) (attrs fuseops.InodeAttributes, err error) {
// Set up basic attributes.
attrs = d.attrs
attrs.Nlink = 1
return
}
func (d *dirInode) Bucket() gcsx.SyncerBucket {
return d.bucket
}
// A suffix that can be used to unambiguously tag a file system name.
// (Unambiguous because U+000A is not allowed in GCS object names.) This is
// used to refer to the file/symlink in a (file/symlink, directory) pair with
// conflicting object names.
//
// See also the notes on DirInode.LookUpChild.
const ConflictingFileNameSuffix = "\n"
// LOCKS_REQUIRED(d)
func (d *dirInode) LookUpChild(
ctx context.Context,
name string) (result LookUpResult, err error) {
// Consult the cache about the type of the child. This may save us work
// below.
now := d.cacheClock.Now()
cacheSaysFile := d.cache.IsFile(now, name)
cacheSaysDir := d.cache.IsDir(now, name)
// Is this a conflict marker name?
if strings.HasSuffix(name, ConflictingFileNameSuffix) {
result, err = d.lookUpConflicting(ctx, name)
return
}
// Stat the child as a file, unless the cache has told us it's a directory
// but not a file.
b := syncutil.NewBundle(ctx)
var fileResult LookUpResult
if !(cacheSaysDir && !cacheSaysFile) {
b.Add(func(ctx context.Context) (err error) {
fileResult, err = d.lookUpChildFile(ctx, name)
return
})
}
// Stat the child as a directory, unless the cache has told us it's a file
// but not a directory.
var dirResult LookUpResult
if !(cacheSaysFile && !cacheSaysDir) {
b.Add(func(ctx context.Context) (err error) {
dirResult, err = d.lookUpChildDir(ctx, name)
return
})
}
// Wait for both.
err = b.Join()
if err != nil {
return
}
// Prefer directories over files.
switch {
case dirResult.Exists():
result = dirResult
case fileResult.Exists():
result = fileResult
}
// Update the cache.
now = d.cacheClock.Now()
if fileResult.Exists() {
d.cache.NoteFile(now, name)
}
if dirResult.Exists() {
d.cache.NoteDir(now, name)
}
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) ReadEntries(
ctx context.Context,
tok string) (entries []fuseutil.Dirent, newTok string, err error) {
// Ask the bucket to list some objects.
req := &gcs.ListObjectsRequest{
Delimiter: "/",
Prefix: d.Name().GcsObjectName(),
ContinuationToken: tok,
}
listing, err := d.bucket.ListObjects(ctx, req)
if err != nil {
err = fmt.Errorf("ListObjects: %v", err)
return
}
// Convert objects to entries for files or symlinks.
for _, o := range listing.Objects {
// Skip the entry for the backing object itself, which of course has its
// own name as a prefix but which we don't wan to appear to contain itself.
if o.Name == d.Name().GcsObjectName() {
continue
}
e := fuseutil.Dirent{
Name: path.Base(o.Name),
Type: fuseutil.DT_File,
}
if IsSymlink(o) {
e.Type = fuseutil.DT_Link
}
entries = append(entries, e)
}
// Extract directory names from the collapsed runs.
var dirNames []string
for _, p := range listing.CollapsedRuns {
dirNames = append(dirNames, path.Base(p))
}
// Filter the directory names according to our implicit directory settings.
dirNames, err = d.filterMissingChildDirs(ctx, dirNames)
if err != nil {
err = fmt.Errorf("filterMissingChildDirs: %v", err)
return
}
// Return entries for directories.
for _, name := range dirNames {
e := fuseutil.Dirent{
Name: name,
Type: fuseutil.DT_Directory,
}
entries = append(entries, e)
}
// Return an appropriate continuation token, if any.
newTok = listing.ContinuationToken
// Update the type cache with everything we learned.
now := d.cacheClock.Now()
for _, e := range entries {
switch e.Type {
case fuseutil.DT_File:
d.cache.NoteFile(now, e.Name)
case fuseutil.DT_Directory:
d.cache.NoteDir(now, e.Name)
}
}
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) CreateChildFile(
ctx context.Context,
name string) (fn Name, o *gcs.Object, err error) {
metadata := map[string]string{
FileMtimeMetadataKey: d.mtimeClock.Now().UTC().Format(time.RFC3339Nano),
}
fn = NewFileName(d.Name(), name)
o, err = d.createNewObject(ctx, fn, metadata)
if err != nil {
return
}
d.cache.NoteFile(d.cacheClock.Now(), name)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) CloneToChildFile(
ctx context.Context,
name string,
src *gcs.Object) (fn Name, o *gcs.Object, err error) {
// Erase any existing type information for this name.
d.cache.Erase(name)
fn = NewFileName(d.Name(), name)
// Clone over anything that might already exist for the name.
o, err = d.bucket.CopyObject(
ctx,
&gcs.CopyObjectRequest{
SrcName: src.Name,
SrcGeneration: src.Generation,
SrcMetaGenerationPrecondition: &src.MetaGeneration,
DstName: fn.GcsObjectName(),
})
if err != nil {
return
}
// Update the type cache.
d.cache.NoteFile(d.cacheClock.Now(), name)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) CreateChildSymlink(
ctx context.Context,
name string,
target string) (fn Name, o *gcs.Object, err error) {
fn = NewFileName(d.Name(), name)
metadata := map[string]string{
SymlinkMetadataKey: target,
}
o, err = d.createNewObject(ctx, fn, metadata)
if err != nil {
return
}
d.cache.NoteFile(d.cacheClock.Now(), name)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) CreateChildDir(
ctx context.Context,
name string) (fn Name, o *gcs.Object, err error) {
fn = NewDirName(d.Name(), name)
o, err = d.createNewObject(ctx, fn, nil)
if err != nil {
return
}
d.cache.NoteDir(d.cacheClock.Now(), name)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) DeleteChildFile(
ctx context.Context,
name string,
generation int64,
metaGeneration *int64) (err error) {
d.cache.Erase(name)
childName := NewFileName(d.Name(), name)
err = d.bucket.DeleteObject(
ctx,
&gcs.DeleteObjectRequest{
Name: childName.GcsObjectName(),
Generation: generation,
MetaGenerationPrecondition: metaGeneration,
})
if err != nil {
err = fmt.Errorf("DeleteObject: %v", err)
return
}
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) DeleteChildDir(
ctx context.Context,
name string) (err error) {
d.cache.Erase(name)
childName := NewDirName(d.Name(), name)
// Delete the backing object. Unfortunately we have no way to precondition
// this on the directory being empty.
err = d.bucket.DeleteObject(
ctx,
&gcs.DeleteObjectRequest{
Name: childName.GcsObjectName(),
})
if err != nil {
|
err = fmt.Errorf("DeleteObject: %v", err)
return
}
return
|
random_line_split
|
|
dir.go
|
gcs.Object
// Stat the placeholder.
o, err = statObjectMayNotExist(
ctx,
bucket,
NewDirName(dirName, name),
)
if err != nil {
err = fmt.Errorf("statObjectMayNotExist: %v", err)
return
}
// Should we pass on this name?
if o == nil {
continue
}
select {
case <-ctx.Done():
err = ctx.Err()
return
case filtered <- name:
}
}
return
}
// Given a list of child names that appear to be directories according to
// d.bucket.ListObjects (which always behaves as if implicit directories are
// enabled), filter out the ones for which a placeholder object does not
// actually exist. If implicit directories are enabled, simply return them all.
//
// LOCKS_REQUIRED(d)
func (d *dirInode) filterMissingChildDirs(
ctx context.Context,
in []string) (out []string, err error) {
// Do we need to do anything?
if d.implicitDirs {
out = in
return
}
b := syncutil.NewBundle(ctx)
// First add any names that we already know are directories according to our
// cache, removing them from the input.
now := d.cacheClock.Now()
var tmp []string
for _, name := range in {
if d.cache.IsDir(now, name) {
out = append(out, name)
} else {
tmp = append(tmp, name)
}
}
in = tmp
// Feed names into a channel.
unfiltered := make(chan string, 100)
b.Add(func(ctx context.Context) (err error) {
defer close(unfiltered)
for _, name := range in {
select {
case <-ctx.Done():
err = ctx.Err()
return
case unfiltered <- name:
}
}
return
})
// Stat the placeholder object for each, filtering out placeholders that are
// not found. Use some parallelism.
const statWorkers = 32
filtered := make(chan string, 100)
var wg sync.WaitGroup
for i := 0; i < statWorkers; i++ {
wg.Add(1)
b.Add(func(ctx context.Context) (err error) {
defer wg.Done()
err = filterMissingChildDirNames(
ctx,
d.bucket,
d.Name(),
unfiltered,
filtered)
return
})
}
go func() {
wg.Wait()
close(filtered)
}()
// Accumulate into a slice.
var filteredSlice []string
b.Add(func(ctx context.Context) (err error) {
for name := range filtered {
filteredSlice = append(filteredSlice, name)
}
return
})
// Wait for everything to complete.
err = b.Join()
// Update the cache with everything we learned.
now = d.cacheClock.Now()
for _, name := range filteredSlice {
d.cache.NoteDir(now, name)
}
// Return everything we learned.
out = append(out, filteredSlice...)
return
}
////////////////////////////////////////////////////////////////////////
// Public interface
////////////////////////////////////////////////////////////////////////
func (d *dirInode) Lock() {
d.mu.Lock()
}
func (d *dirInode) Unlock() {
d.mu.Unlock()
}
func (d *dirInode) ID() fuseops.InodeID {
return d.id
}
func (d *dirInode) Name() Name {
return d.name
}
// LOCKS_REQUIRED(d)
func (d *dirInode) IncrementLookupCount() {
d.lc.Inc()
}
// LOCKS_REQUIRED(d)
func (d *dirInode) DecrementLookupCount(n uint64) (destroy bool) {
destroy = d.lc.Dec(n)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) Destroy() (err error) {
// Nothing interesting to do.
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) Attributes(
ctx context.Context) (attrs fuseops.InodeAttributes, err error) {
// Set up basic attributes.
attrs = d.attrs
attrs.Nlink = 1
return
}
func (d *dirInode) Bucket() gcsx.SyncerBucket {
return d.bucket
}
// A suffix that can be used to unambiguously tag a file system name.
// (Unambiguous because U+000A is not allowed in GCS object names.) This is
// used to refer to the file/symlink in a (file/symlink, directory) pair with
// conflicting object names.
//
// See also the notes on DirInode.LookUpChild.
const ConflictingFileNameSuffix = "\n"
// LOCKS_REQUIRED(d)
func (d *dirInode) LookUpChild(
ctx context.Context,
name string) (result LookUpResult, err error) {
// Consult the cache about the type of the child. This may save us work
// below.
now := d.cacheClock.Now()
cacheSaysFile := d.cache.IsFile(now, name)
cacheSaysDir := d.cache.IsDir(now, name)
// Is this a conflict marker name?
if strings.HasSuffix(name, ConflictingFileNameSuffix) {
result, err = d.lookUpConflicting(ctx, name)
return
}
// Stat the child as a file, unless the cache has told us it's a directory
// but not a file.
b := syncutil.NewBundle(ctx)
var fileResult LookUpResult
if !(cacheSaysDir && !cacheSaysFile) {
b.Add(func(ctx context.Context) (err error) {
fileResult, err = d.lookUpChildFile(ctx, name)
return
})
}
// Stat the child as a directory, unless the cache has told us it's a file
// but not a directory.
var dirResult LookUpResult
if !(cacheSaysFile && !cacheSaysDir) {
b.Add(func(ctx context.Context) (err error) {
dirResult, err = d.lookUpChildDir(ctx, name)
return
})
}
// Wait for both.
err = b.Join()
if err != nil {
return
}
// Prefer directories over files.
switch {
case dirResult.Exists():
result = dirResult
case fileResult.Exists():
result = fileResult
}
// Update the cache.
now = d.cacheClock.Now()
if fileResult.Exists() {
d.cache.NoteFile(now, name)
}
if dirResult.Exists() {
d.cache.NoteDir(now, name)
}
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) ReadEntries(
ctx context.Context,
tok string) (entries []fuseutil.Dirent, newTok string, err error) {
// Ask the bucket to list some objects.
req := &gcs.ListObjectsRequest{
Delimiter: "/",
Prefix: d.Name().GcsObjectName(),
ContinuationToken: tok,
}
listing, err := d.bucket.ListObjects(ctx, req)
if err != nil {
err = fmt.Errorf("ListObjects: %v", err)
return
}
// Convert objects to entries for files or symlinks.
for _, o := range listing.Objects {
// Skip the entry for the backing object itself, which of course has its
// own name as a prefix but which we don't wan to appear to contain itself.
if o.Name == d.Name().GcsObjectName() {
continue
}
e := fuseutil.Dirent{
Name: path.Base(o.Name),
Type: fuseutil.DT_File,
}
if IsSymlink(o) {
e.Type = fuseutil.DT_Link
}
entries = append(entries, e)
}
// Extract directory names from the collapsed runs.
var dirNames []string
for _, p := range listing.CollapsedRuns {
dirNames = append(dirNames, path.Base(p))
}
// Filter the directory names according to our implicit directory settings.
dirNames, err = d.filterMissingChildDirs(ctx, dirNames)
if err != nil {
err = fmt.Errorf("filterMissingChildDirs: %v", err)
return
}
// Return entries for directories.
for _, name := range dirNames {
e := fuseutil.Dirent{
Name: name,
Type: fuseutil.DT_Directory,
}
entries = append(entries, e)
}
// Return an appropriate continuation token, if any.
newTok = listing.ContinuationToken
// Update the type cache with everything we learned.
now := d.cacheClock.Now()
for _, e := range entries {
switch e.Type {
case fuseutil.DT_File:
d.cache.NoteFile(now, e.Name)
case fuseutil.DT_Directory:
d.cache.NoteDir(now, e.Name)
}
}
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) CreateChildFile(
ctx context.Context,
name string) (fn Name, o *gcs.Object, err error) {
metadata := map[string]string{
FileMtimeMetadataKey: d.mtimeClock.Now().UTC().Format(time.RFC3339Nano),
}
fn = NewFileName(d.Name(), name)
o, err = d.createNewObject(ctx, fn, metadata)
if err != nil
|
{
return
}
|
conditional_block
|
|
dir.go
|
(now, name) {
out = append(out, name)
} else {
tmp = append(tmp, name)
}
}
in = tmp
// Feed names into a channel.
unfiltered := make(chan string, 100)
b.Add(func(ctx context.Context) (err error) {
defer close(unfiltered)
for _, name := range in {
select {
case <-ctx.Done():
err = ctx.Err()
return
case unfiltered <- name:
}
}
return
})
// Stat the placeholder object for each, filtering out placeholders that are
// not found. Use some parallelism.
const statWorkers = 32
filtered := make(chan string, 100)
var wg sync.WaitGroup
for i := 0; i < statWorkers; i++ {
wg.Add(1)
b.Add(func(ctx context.Context) (err error) {
defer wg.Done()
err = filterMissingChildDirNames(
ctx,
d.bucket,
d.Name(),
unfiltered,
filtered)
return
})
}
go func() {
wg.Wait()
close(filtered)
}()
// Accumulate into a slice.
var filteredSlice []string
b.Add(func(ctx context.Context) (err error) {
for name := range filtered {
filteredSlice = append(filteredSlice, name)
}
return
})
// Wait for everything to complete.
err = b.Join()
// Update the cache with everything we learned.
now = d.cacheClock.Now()
for _, name := range filteredSlice {
d.cache.NoteDir(now, name)
}
// Return everything we learned.
out = append(out, filteredSlice...)
return
}
////////////////////////////////////////////////////////////////////////
// Public interface
////////////////////////////////////////////////////////////////////////
func (d *dirInode) Lock() {
d.mu.Lock()
}
func (d *dirInode) Unlock() {
d.mu.Unlock()
}
func (d *dirInode) ID() fuseops.InodeID {
return d.id
}
func (d *dirInode) Name() Name {
return d.name
}
// LOCKS_REQUIRED(d)
func (d *dirInode) IncrementLookupCount() {
d.lc.Inc()
}
// LOCKS_REQUIRED(d)
func (d *dirInode) DecrementLookupCount(n uint64) (destroy bool) {
destroy = d.lc.Dec(n)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) Destroy() (err error) {
// Nothing interesting to do.
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) Attributes(
ctx context.Context) (attrs fuseops.InodeAttributes, err error) {
// Set up basic attributes.
attrs = d.attrs
attrs.Nlink = 1
return
}
func (d *dirInode) Bucket() gcsx.SyncerBucket {
return d.bucket
}
// A suffix that can be used to unambiguously tag a file system name.
// (Unambiguous because U+000A is not allowed in GCS object names.) This is
// used to refer to the file/symlink in a (file/symlink, directory) pair with
// conflicting object names.
//
// See also the notes on DirInode.LookUpChild.
const ConflictingFileNameSuffix = "\n"
// LOCKS_REQUIRED(d)
func (d *dirInode) LookUpChild(
ctx context.Context,
name string) (result LookUpResult, err error) {
// Consult the cache about the type of the child. This may save us work
// below.
now := d.cacheClock.Now()
cacheSaysFile := d.cache.IsFile(now, name)
cacheSaysDir := d.cache.IsDir(now, name)
// Is this a conflict marker name?
if strings.HasSuffix(name, ConflictingFileNameSuffix) {
result, err = d.lookUpConflicting(ctx, name)
return
}
// Stat the child as a file, unless the cache has told us it's a directory
// but not a file.
b := syncutil.NewBundle(ctx)
var fileResult LookUpResult
if !(cacheSaysDir && !cacheSaysFile) {
b.Add(func(ctx context.Context) (err error) {
fileResult, err = d.lookUpChildFile(ctx, name)
return
})
}
// Stat the child as a directory, unless the cache has told us it's a file
// but not a directory.
var dirResult LookUpResult
if !(cacheSaysFile && !cacheSaysDir) {
b.Add(func(ctx context.Context) (err error) {
dirResult, err = d.lookUpChildDir(ctx, name)
return
})
}
// Wait for both.
err = b.Join()
if err != nil {
return
}
// Prefer directories over files.
switch {
case dirResult.Exists():
result = dirResult
case fileResult.Exists():
result = fileResult
}
// Update the cache.
now = d.cacheClock.Now()
if fileResult.Exists() {
d.cache.NoteFile(now, name)
}
if dirResult.Exists() {
d.cache.NoteDir(now, name)
}
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) ReadEntries(
ctx context.Context,
tok string) (entries []fuseutil.Dirent, newTok string, err error) {
// Ask the bucket to list some objects.
req := &gcs.ListObjectsRequest{
Delimiter: "/",
Prefix: d.Name().GcsObjectName(),
ContinuationToken: tok,
}
listing, err := d.bucket.ListObjects(ctx, req)
if err != nil {
err = fmt.Errorf("ListObjects: %v", err)
return
}
// Convert objects to entries for files or symlinks.
for _, o := range listing.Objects {
// Skip the entry for the backing object itself, which of course has its
// own name as a prefix but which we don't wan to appear to contain itself.
if o.Name == d.Name().GcsObjectName() {
continue
}
e := fuseutil.Dirent{
Name: path.Base(o.Name),
Type: fuseutil.DT_File,
}
if IsSymlink(o) {
e.Type = fuseutil.DT_Link
}
entries = append(entries, e)
}
// Extract directory names from the collapsed runs.
var dirNames []string
for _, p := range listing.CollapsedRuns {
dirNames = append(dirNames, path.Base(p))
}
// Filter the directory names according to our implicit directory settings.
dirNames, err = d.filterMissingChildDirs(ctx, dirNames)
if err != nil {
err = fmt.Errorf("filterMissingChildDirs: %v", err)
return
}
// Return entries for directories.
for _, name := range dirNames {
e := fuseutil.Dirent{
Name: name,
Type: fuseutil.DT_Directory,
}
entries = append(entries, e)
}
// Return an appropriate continuation token, if any.
newTok = listing.ContinuationToken
// Update the type cache with everything we learned.
now := d.cacheClock.Now()
for _, e := range entries {
switch e.Type {
case fuseutil.DT_File:
d.cache.NoteFile(now, e.Name)
case fuseutil.DT_Directory:
d.cache.NoteDir(now, e.Name)
}
}
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) CreateChildFile(
ctx context.Context,
name string) (fn Name, o *gcs.Object, err error) {
metadata := map[string]string{
FileMtimeMetadataKey: d.mtimeClock.Now().UTC().Format(time.RFC3339Nano),
}
fn = NewFileName(d.Name(), name)
o, err = d.createNewObject(ctx, fn, metadata)
if err != nil {
return
}
d.cache.NoteFile(d.cacheClock.Now(), name)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) CloneToChildFile(
ctx context.Context,
name string,
src *gcs.Object) (fn Name, o *gcs.Object, err error) {
// Erase any existing type information for this name.
d.cache.Erase(name)
fn = NewFileName(d.Name(), name)
// Clone over anything that might already exist for the name.
o, err = d.bucket.CopyObject(
ctx,
&gcs.CopyObjectRequest{
SrcName: src.Name,
SrcGeneration: src.Generation,
SrcMetaGenerationPrecondition: &src.MetaGeneration,
DstName: fn.GcsObjectName(),
})
if err != nil {
return
}
// Update the type cache.
d.cache.NoteFile(d.cacheClock.Now(), name)
return
}
// LOCKS_REQUIRED(d)
func (d *dirInode) CreateChildSymlink(
ctx context.Context,
name string,
target string) (fn Name, o *gcs.Object, err error)
|
{
fn = NewFileName(d.Name(), name)
metadata := map[string]string{
SymlinkMetadataKey: target,
}
o, err = d.createNewObject(ctx, fn, metadata)
if err != nil {
return
}
d.cache.NoteFile(d.cacheClock.Now(), name)
return
}
|
identifier_body
|
|
dir.go
|
// exists in GCS.
// Return the full name of the child and the GCS object it backs up.
CreateChildSymlink(
ctx context.Context,
name string,
target string) (fn Name, o *gcs.Object, err error)
// Create a backing object for a child directory with the supplied (relative)
// name, failing with *gcs.PreconditionError if a backing object already
// exists in GCS.
// Return the full name of the child and the GCS object it backs up.
CreateChildDir(
ctx context.Context,
name string) (fn Name, o *gcs.Object, err error)
// Delete the backing object for the child file or symlink with the given
// (relative) name and generation number, where zero means the latest
// generation. If the object/generation doesn't exist, no error is returned.
//
// metaGeneration may be set to a non-nil pointer giving a meta-generation
// precondition, but need not be.
DeleteChildFile(
ctx context.Context,
name string,
generation int64,
metaGeneration *int64) (err error)
// Delete the backing object for the child directory with the given
// (relative) name.
DeleteChildDir(
ctx context.Context,
name string) (err error)
}
type dirInode struct {
/////////////////////////
// Dependencies
/////////////////////////
bucket gcsx.SyncerBucket
mtimeClock timeutil.Clock
cacheClock timeutil.Clock
/////////////////////////
// Constant data
/////////////////////////
id fuseops.InodeID
implicitDirs bool
// INVARIANT: name.IsDir()
name Name
attrs fuseops.InodeAttributes
/////////////////////////
// Mutable state
/////////////////////////
// A mutex that must be held when calling certain methods. See documentation
// for each method.
mu syncutil.InvariantMutex
// GUARDED_BY(mu)
lc lookupCount
// cache.CheckInvariants() does not panic.
//
// GUARDED_BY(mu)
cache typeCache
}
var _ DirInode = &dirInode{}
// Create a directory inode for the name, representing the directory containing
// the objects for which it is an immediate prefix. For the root directory,
// this is the empty string.
//
// If implicitDirs is set, LookUpChild will use ListObjects to find child
// directories that are "implicitly" defined by the existence of their own
// descendents. For example, if there is an object named "foo/bar/baz" and this
// is the directory "foo", a child directory named "bar" will be implied.
//
// If typeCacheTTL is non-zero, a cache from child name to information about
// whether that name exists as a file/symlink and/or directory will be
// maintained. This may speed up calls to LookUpChild, especially when combined
// with a stat-caching GCS bucket, but comes at the cost of consistency: if the
// child is removed and recreated with a different type before the expiration,
// we may fail to find it.
//
// The initial lookup count is zero.
//
// REQUIRES: IsDirName(name)
func NewDirInode(
id fuseops.InodeID,
name Name,
attrs fuseops.InodeAttributes,
implicitDirs bool,
typeCacheTTL time.Duration,
bucket gcsx.SyncerBucket,
mtimeClock timeutil.Clock,
cacheClock timeutil.Clock) (d DirInode) {
if !name.IsDir() {
panic(fmt.Sprintf("Unexpected name: %s", name))
}
// Set up the struct.
const typeCacheCapacity = 1 << 16
typed := &dirInode{
bucket: bucket,
mtimeClock: mtimeClock,
cacheClock: cacheClock,
id: id,
implicitDirs: implicitDirs,
name: name,
attrs: attrs,
cache: newTypeCache(typeCacheCapacity/2, typeCacheTTL),
}
typed.lc.Init(id)
// Set up invariant checking.
typed.mu = syncutil.NewInvariantMutex(typed.checkInvariants)
d = typed
return
}
////////////////////////////////////////////////////////////////////////
// Helpers
////////////////////////////////////////////////////////////////////////
func (d *dirInode) checkInvariants() {
// INVARIANT: d.name.IsDir()
if !d.name.IsDir() {
panic(fmt.Sprintf("Unexpected name: %s", d.name))
}
// cache.CheckInvariants() does not panic.
d.cache.CheckInvariants()
}
func (d *dirInode) lookUpChildFile(
ctx context.Context,
name string) (result LookUpResult, err error) {
result.Bucket = d.Bucket()
result.FullName = NewFileName(d.Name(), name)
result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName)
if err != nil {
err = fmt.Errorf("statObjectMayNotExist: %v", err)
return
}
return
}
func (d *dirInode) lookUpChildDir(
ctx context.Context,
dirName string) (result LookUpResult, err error) {
b := syncutil.NewBundle(ctx)
childName := NewDirName(d.Name(), dirName)
// Stat the placeholder object.
b.Add(func(ctx context.Context) (err error) {
result.Bucket = d.Bucket()
result.FullName = childName
result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName)
if err != nil {
err = fmt.Errorf("statObjectMayNotExist: %v", err)
return
}
return
})
// If implicit directories are enabled, find out whether the child name is
// implicitly defined.
if d.implicitDirs {
b.Add(func(ctx context.Context) (err error) {
result.ImplicitDir, err = objectNamePrefixNonEmpty(
ctx,
d.bucket,
childName.GcsObjectName())
if err != nil {
err = fmt.Errorf("objectNamePrefixNonEmpty: %v", err)
return
}
return
})
}
// Wait for both.
err = b.Join()
if err != nil {
return
}
return
}
// Look up the file for a (file, dir) pair with conflicting names, overriding
// the default behavior. If the file doesn't exist, return a nil record with a
// nil error. If the directory doesn't exist, pretend the file doesn't exist.
//
// REQUIRES: strings.HasSuffix(name, ConflictingFileNameSuffix)
func (d *dirInode) lookUpConflicting(
ctx context.Context,
name string) (result LookUpResult, err error) {
strippedName := strings.TrimSuffix(name, ConflictingFileNameSuffix)
// In order to a marked name to be accepted, we require the conflicting
// directory to exist.
var dirResult LookUpResult
dirResult, err = d.lookUpChildDir(ctx, strippedName)
if err != nil {
err = fmt.Errorf("lookUpChildDir for stripped name: %v", err)
return
}
if !dirResult.Exists() {
return
}
// The directory name exists. Find the conflicting file.
result, err = d.lookUpChildFile(ctx, strippedName)
if err != nil {
err = fmt.Errorf("lookUpChildFile for stripped name: %v", err)
return
}
return
}
// List the supplied object name prefix to find out whether it is non-empty.
func objectNamePrefixNonEmpty(
ctx context.Context,
bucket gcs.Bucket,
prefix string) (nonEmpty bool, err error) {
req := &gcs.ListObjectsRequest{
Prefix: prefix,
MaxResults: 1,
}
listing, err := bucket.ListObjects(ctx, req)
if err != nil {
err = fmt.Errorf("ListObjects: %v", err)
return
}
nonEmpty = len(listing.Objects) != 0
return
}
// Stat the object with the given name, returning (nil, nil) if the object
// doesn't exist rather than failing.
func
|
(
ctx context.Context,
bucket gcs.Bucket,
name Name) (o *gcs.Object, err error) {
// Call the bucket.
req := &gcs.StatObjectRequest{
Name: name.GcsObjectName(),
}
o, err = bucket.StatObject(ctx, req)
// Suppress "not found" errors.
if _, ok := err.(*gcs.NotFoundError); ok {
err = nil
}
// Annotate others.
if err != nil {
err = fmt.Errorf("StatObject: %v", err)
return
}
return
}
// Fail if the name already exists. Pass on errors directly.
func (d *dirInode) createNewObject(
ctx context.Context,
name Name,
metadata map[string]string) (o *gcs.Object, err error) {
// Create an empty backing object for the child, failing if it already
// exists.
var precond int64
createReq := &gcs.CreateObjectRequest{
Name: name.GcsObjectName(),
Contents: strings.NewReader(""),
GenerationPrecondition: &precond,
Metadata: metadata,
}
o, err = d.bucket.CreateObject(ctx, createReq)
if err != nil {
return
}
return
}
// An implementation detail fo filterMissing
|
statObjectMayNotExist
|
identifier_name
|
forwarder_test.go
|
hmap := map[string]interface{}{
"/ping": func(ctx json.Context, ping *Ping) (*Pong, error) {
return &Pong{"Hello, world!", address, ctx.Headers()}, nil
},
"/error": func(ctx json.Context, ping *Ping) (*Pong, error) {
return nil, errors.New("remote error")
},
}
s.Require().NoError(json.Register(channel, hmap, func(ctx context.Context, err error) {}))
thriftHandler := &pingpong.MockTChanPingPong{}
// successful request with context
thriftHandler.On("Ping", mock.MatchedBy(
func(c thrift.Context) bool {
return true
}), &pingpong.Ping{
Key: "ctxTest",
}).Return(&pingpong.Pong{
Source: address,
}, nil)
// successful request
thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{
Key: "success",
}).Return(&pingpong.Pong{
Source: address,
}, nil)
// error request
thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{
Key: "error",
}).Return(nil, &pingpong.PingError{})
server := thrift.NewServer(channel)
server.Register(pingpong.NewTChanPingPongServer(thriftHandler))
}
func (s *ForwarderTestSuite) SetupSuite() {
channel, err := tchannel.NewChannel("test", nil)
s.Require().NoError(err, "channel must be created successfully")
s.channel = channel
peer, err := tchannel.NewChannel("test", nil)
s.Require().NoError(err, "channel must be created successfully")
s.registerPong("correct pinging host", peer)
s.Require().NoError(peer.ListenAndServe("127.0.0.1:0"), "channel must listen")
sender := &MockSender{}
sender.On("Lookup", "me").Return("192.0.2.1:1", nil)
sender.On("WhoAmI").Return("192.0.2.1:1", nil)
// processes can not listen on port 0 so it is safe to assume that this address is failing immediatly, preventing the timeout path to kick in.
sender.On("Lookup", "immediate fail").Return("127.0.0.1:0", nil)
sender.On("Lookup", "reachable").Return(peer.PeerInfo().HostPort, nil)
sender.On("Lookup", "unreachable").Return("192.0.2.128:1", nil)
sender.On("Lookup", "error").Return("", errors.New("lookup error"))
s.sender = sender
s.peer = peer
s.forwarder = NewForwarder(s.sender, s.channel.GetSubChannel("forwarder"))
}
func (s *ForwarderTestSuite) TearDownSuite()
|
func (s *ForwarderTestSuite) TestForwardJSON() {
var ping Ping
var pong Pong
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
headerBytes := []byte(`{"hdr1": "val1"}`)
res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"},
tchannel.JSON, &Options{Headers: headerBytes})
s.NoError(err, "expected request to be forwarded")
s.NoError(json2.Unmarshal(res, &pong))
s.Equal("correct pinging host", pong.From)
s.Equal("Hello, world!", pong.Message)
s.Equal(map[string]string{"hdr1": "val1"}, pong.Headers)
}
func (s *ForwarderTestSuite) TestForwardJSONErrorResponse() {
var ping Ping
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/error", []string{"reachable"},
tchannel.JSON, nil)
s.EqualError(err, "remote error")
}
func (s *ForwarderTestSuite) TestForwardJSONInvalidEndpoint() {
var ping Ping
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/invalid", []string{"reachable"},
tchannel.JSON, &Options{
MaxRetries: 1,
RetrySchedule: []time.Duration{
100 * time.Millisecond,
},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestForwardThrift() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "success",
},
}
bytes, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, nil)
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.Equal("correct pinging host", response.Success.Source)
}
func (s *ForwarderTestSuite) TestForwardThriftWithCtxOption() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "ctxTest",
},
}
bytes1, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
k := ContextKey("key")
ctx := thrift.Wrap(context.WithValue(context.Background(), k, "val"))
res, err := s.forwarder.ForwardRequest(bytes1, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, &Options{
Ctx: ctx,
})
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.Equal("correct pinging host", response.Success.Source)
}
func (s *ForwarderTestSuite) TestForwardThriftErrorResponse() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "error",
},
}
bytes, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, nil)
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.NotNil(response.PingError, "expected a pingerror")
}
func (s *ForwarderTestSuite) TestMaxRetries() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"immediate fail"},
tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestLookupErrorInRetry() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"error"},
tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
// lookup errors are swallowed and result in the key missing in the dests list, so a diverged error is expected
s.EqualError(err, "key destinations have diverged")
}
func (s *ForwarderTestSuite) TestKeysDiverged() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
// no keys should result in destinations length of 0 during retry, causing abortion of request
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", nil, tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
s.EqualError(err, "key destinations have diverged")
}
func (s *ForwarderTestSuite) TestRequestTimesOut() {
var ping Ping
dest, err := s.sender.Lookup("unreachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"unreachable"}, tchannel.JSON, &Options{
// By providing a negative timeout the context will directly return with
// a DeadlineExceeded error
Timeout: -1,
})
s.EqualError(err, "request timed out")
}
func (s *ForwarderTestSuite) TestRequest
|
{
s.channel.Close()
s.peer.Close()
}
|
identifier_body
|
forwarder_test.go
|
request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.Equal("correct pinging host", response.Success.Source)
}
func (s *ForwarderTestSuite) TestForwardThriftWithCtxOption() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "ctxTest",
},
}
bytes1, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
k := ContextKey("key")
ctx := thrift.Wrap(context.WithValue(context.Background(), k, "val"))
res, err := s.forwarder.ForwardRequest(bytes1, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, &Options{
Ctx: ctx,
})
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.Equal("correct pinging host", response.Success.Source)
}
func (s *ForwarderTestSuite) TestForwardThriftErrorResponse() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "error",
},
}
bytes, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, nil)
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.NotNil(response.PingError, "expected a pingerror")
}
func (s *ForwarderTestSuite) TestMaxRetries() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"immediate fail"},
tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestLookupErrorInRetry() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"error"},
tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
// lookup errors are swallowed and result in the key missing in the dests list, so a diverged error is expected
s.EqualError(err, "key destinations have diverged")
}
func (s *ForwarderTestSuite) TestKeysDiverged() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
// no keys should result in destinations length of 0 during retry, causing abortion of request
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", nil, tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
s.EqualError(err, "key destinations have diverged")
}
func (s *ForwarderTestSuite) TestRequestTimesOut() {
var ping Ping
dest, err := s.sender.Lookup("unreachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"unreachable"}, tchannel.JSON, &Options{
// By providing a negative timeout the context will directly return with
// a DeadlineExceeded error
Timeout: -1,
})
s.EqualError(err, "request timed out")
}
func (s *ForwarderTestSuite) TestRequestRerouted() {
var ping Ping
var pong Pong
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"},
tchannel.JSON, &Options{
MaxRetries: 1,
RerouteRetries: true,
RetrySchedule: []time.Duration{time.Millisecond},
})
s.NoError(err, "expected request to be rerouted")
s.NoError(json2.Unmarshal(res, &pong))
s.Equal("correct pinging host", pong.From)
s.Equal("Hello, world!", pong.Message)
}
func (s *ForwarderTestSuite) TestRequestNoReroutes() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"},
tchannel.JSON, &Options{
MaxRetries: 1,
RetrySchedule: []time.Duration{time.Millisecond},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestInvalidInflightDecrement() {
var wg sync.WaitGroup
wg.Add(1)
listener := &events.EventListener{}
listener.On("HandleEvent", mock.AnythingOfTypeArgument("forward.InflightRequestsMiscountEvent")).Run(func(args mock.Arguments) {
wg.Done()
}).Return()
s.forwarder.inflight = 0
s.forwarder.AddListener(listener)
defer s.forwarder.RemoveListener(listener)
s.forwarder.decrementInflight()
s.Assertions.Equal(int64(0), s.forwarder.inflight, "Expected inflight to stay at 0 when decremented at 0")
// wait for HandleEvent with forward.InflightRequestsMiscountEvent being called
wg.Wait()
}
func TestForwarderTestSuite(t *testing.T) {
suite.Run(t, new(ForwarderTestSuite))
}
func TestSetForwardedHeader(t *testing.T) {
// empty keys array test
ctx, _ := thrift.NewContext(0 * time.Second)
ctx = SetForwardedHeader(ctx, nil)
assert.Equal(t, "[]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set and be an empty array instead of null for the nil pointer")
// preserve existing headers
ctx, _ = thrift.NewContext(0 * time.Second)
ctx = thrift.WithHeaders(ctx, map[string]string{
"keep": "this key",
})
ctx = SetForwardedHeader(ctx, []string{"foo"})
assert.Equal(t, "[\"foo\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set to a serialized array of keys used in forwarding")
assert.Equal(t, "this key", ctx.Headers()["keep"], "expected the header set before the forwarding header to still exist")
// multiple keys encoded in the header
ctx, _ = thrift.NewContext(0 * time.Second)
ctx = SetForwardedHeader(ctx, []string{"key1", "key2"})
assert.Equal(t, "[\"key1\",\"key2\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set with both keys encoded")
}
func TestDeleteForwardedHeader(t *testing.T) {
ctx, _ := thrift.NewContext(0 * time.Second)
if DeleteForwardedHeader(ctx) {
t.Errorf("ringpop claimed that the forwarded header was set before it was set")
}
ctx = SetForwardedHeader(ctx, nil)
if !DeleteForwardedHeader(ctx) {
t.Errorf("ringpop was not able to identify that the forwarded header was set")
}
ctx, _ = thrift.NewContext(0 * time.Second)
ctx = thrift.WithHeaders(ctx, map[string]string{
"keep": "this key",
})
if DeleteForwardedHeader(ctx) {
t.Errorf("ringpop claimed that the forwarded header was set before it was set in the case of alread present headers")
}
ctx = SetForwardedHeader(ctx, nil)
if !DeleteForwardedHeader(ctx) {
t.Errorf("ringpop was not able to identify that the forwarded header was set in the case of alread present headers")
}
}
// SerializeThrift takes a thrift struct and returns the serialized bytes
// of that struct using the thrift binary protocol. This is a temporary
// measure before frames can be forwarded directly past the endpoint to the proper
// destinaiton.
func SerializeThrift(ctx context.Context, s athrift.TStruct) ([]byte, error) {
var b []byte
var buffer = bytes.NewBuffer(b)
transport := athrift.NewStreamTransportW(buffer)
if err := s.Write(ctx, athrift.NewTBinaryProtocolTransport(transport)); err != nil {
return nil, err
}
if err := transport.Flush(ctx); err != nil {
return nil, err
}
return buffer.Bytes(), nil
}
// DeserializeThrift takes a byte slice and attempts to write it into the
// given thrift struct using the thrift binary protocol. This is a temporary
// measure before frames can be forwarded directly past the endpoint to the proper
// destinaiton.
func
|
DeserializeThrift
|
identifier_name
|
|
forwarder_test.go
|
{
hmap := map[string]interface{}{
"/ping": func(ctx json.Context, ping *Ping) (*Pong, error) {
return &Pong{"Hello, world!", address, ctx.Headers()}, nil
},
"/error": func(ctx json.Context, ping *Ping) (*Pong, error) {
return nil, errors.New("remote error")
},
}
s.Require().NoError(json.Register(channel, hmap, func(ctx context.Context, err error) {}))
thriftHandler := &pingpong.MockTChanPingPong{}
// successful request with context
thriftHandler.On("Ping", mock.MatchedBy(
func(c thrift.Context) bool {
return true
}), &pingpong.Ping{
Key: "ctxTest",
}).Return(&pingpong.Pong{
Source: address,
}, nil)
// successful request
thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{
Key: "success",
}).Return(&pingpong.Pong{
Source: address,
}, nil)
// error request
thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{
Key: "error",
}).Return(nil, &pingpong.PingError{})
server := thrift.NewServer(channel)
server.Register(pingpong.NewTChanPingPongServer(thriftHandler))
}
func (s *ForwarderTestSuite) SetupSuite() {
channel, err := tchannel.NewChannel("test", nil)
s.Require().NoError(err, "channel must be created successfully")
s.channel = channel
peer, err := tchannel.NewChannel("test", nil)
s.Require().NoError(err, "channel must be created successfully")
s.registerPong("correct pinging host", peer)
s.Require().NoError(peer.ListenAndServe("127.0.0.1:0"), "channel must listen")
sender := &MockSender{}
sender.On("Lookup", "me").Return("192.0.2.1:1", nil)
sender.On("WhoAmI").Return("192.0.2.1:1", nil)
// processes can not listen on port 0 so it is safe to assume that this address is failing immediatly, preventing the timeout path to kick in.
sender.On("Lookup", "immediate fail").Return("127.0.0.1:0", nil)
sender.On("Lookup", "reachable").Return(peer.PeerInfo().HostPort, nil)
sender.On("Lookup", "unreachable").Return("192.0.2.128:1", nil)
sender.On("Lookup", "error").Return("", errors.New("lookup error"))
s.sender = sender
s.peer = peer
s.forwarder = NewForwarder(s.sender, s.channel.GetSubChannel("forwarder"))
}
func (s *ForwarderTestSuite) TearDownSuite() {
s.channel.Close()
s.peer.Close()
}
func (s *ForwarderTestSuite) TestForwardJSON() {
var ping Ping
var pong Pong
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
headerBytes := []byte(`{"hdr1": "val1"}`)
res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"},
tchannel.JSON, &Options{Headers: headerBytes})
s.NoError(err, "expected request to be forwarded")
s.NoError(json2.Unmarshal(res, &pong))
s.Equal("correct pinging host", pong.From)
s.Equal("Hello, world!", pong.Message)
s.Equal(map[string]string{"hdr1": "val1"}, pong.Headers)
}
func (s *ForwarderTestSuite) TestForwardJSONErrorResponse() {
var ping Ping
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/error", []string{"reachable"},
tchannel.JSON, nil)
s.EqualError(err, "remote error")
}
func (s *ForwarderTestSuite) TestForwardJSONInvalidEndpoint() {
var ping Ping
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/invalid", []string{"reachable"},
tchannel.JSON, &Options{
MaxRetries: 1,
RetrySchedule: []time.Duration{
100 * time.Millisecond,
},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestForwardThrift() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "success",
},
}
bytes, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, nil)
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.Equal("correct pinging host", response.Success.Source)
}
func (s *ForwarderTestSuite) TestForwardThriftWithCtxOption() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "ctxTest",
},
}
bytes1, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
k := ContextKey("key")
ctx := thrift.Wrap(context.WithValue(context.Background(), k, "val"))
res, err := s.forwarder.ForwardRequest(bytes1, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, &Options{
Ctx: ctx,
})
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.Equal("correct pinging host", response.Success.Source)
}
func (s *ForwarderTestSuite) TestForwardThriftErrorResponse() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
|
Key: "error",
},
}
bytes, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, nil)
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.NotNil(response.PingError, "expected a pingerror")
}
func (s *ForwarderTestSuite) TestMaxRetries() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"immediate fail"},
tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestLookupErrorInRetry() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"error"},
tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
// lookup errors are swallowed and result in the key missing in the dests list, so a diverged error is expected
s.EqualError(err, "key destinations have diverged")
}
func (s *ForwarderTestSuite) TestKeysDiverged() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
// no keys should result in destinations length of 0 during retry, causing abortion of request
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", nil, tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
s.EqualError(err, "key destinations have diverged")
}
func (s *ForwarderTestSuite) TestRequestTimesOut() {
var ping Ping
dest, err := s.sender.Lookup("unreachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"unreachable"}, tchannel.JSON, &Options{
// By providing a negative timeout the context will directly return with
// a DeadlineExceeded error
Timeout: -1,
})
s.EqualError(err, "request timed out")
}
func (s *ForwarderTestSuite) TestRequest
|
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
|
random_line_split
|
forwarder_test.go
|
(res, &pong))
s.Equal("correct pinging host", pong.From)
s.Equal("Hello, world!", pong.Message)
s.Equal(map[string]string{"hdr1": "val1"}, pong.Headers)
}
func (s *ForwarderTestSuite) TestForwardJSONErrorResponse() {
var ping Ping
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/error", []string{"reachable"},
tchannel.JSON, nil)
s.EqualError(err, "remote error")
}
func (s *ForwarderTestSuite) TestForwardJSONInvalidEndpoint() {
var ping Ping
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/invalid", []string{"reachable"},
tchannel.JSON, &Options{
MaxRetries: 1,
RetrySchedule: []time.Duration{
100 * time.Millisecond,
},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestForwardThrift() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "success",
},
}
bytes, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, nil)
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.Equal("correct pinging host", response.Success.Source)
}
func (s *ForwarderTestSuite) TestForwardThriftWithCtxOption() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "ctxTest",
},
}
bytes1, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
k := ContextKey("key")
ctx := thrift.Wrap(context.WithValue(context.Background(), k, "val"))
res, err := s.forwarder.ForwardRequest(bytes1, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, &Options{
Ctx: ctx,
})
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.Equal("correct pinging host", response.Success.Source)
}
func (s *ForwarderTestSuite) TestForwardThriftErrorResponse() {
dest, err := s.sender.Lookup("reachable")
s.NoError(err)
request := &pingpong.PingPongPingArgs{
Request: &pingpong.Ping{
Key: "error",
},
}
bytes, err := SerializeThrift(context.Background(), request)
s.NoError(err, "expected ping to be serialized")
res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"},
tchannel.Thrift, nil)
s.NoError(err, "expected request to be forwarded")
var response pingpong.PingPongPingResult
err = DeserializeThrift(context.Background(), res, &response)
s.NoError(err)
s.NotNil(response.PingError, "expected a pingerror")
}
func (s *ForwarderTestSuite) TestMaxRetries() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"immediate fail"},
tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestLookupErrorInRetry() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"error"},
tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
// lookup errors are swallowed and result in the key missing in the dests list, so a diverged error is expected
s.EqualError(err, "key destinations have diverged")
}
func (s *ForwarderTestSuite) TestKeysDiverged() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
// no keys should result in destinations length of 0 during retry, causing abortion of request
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", nil, tchannel.JSON, &Options{
MaxRetries: 2,
RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond},
})
s.EqualError(err, "key destinations have diverged")
}
func (s *ForwarderTestSuite) TestRequestTimesOut() {
var ping Ping
dest, err := s.sender.Lookup("unreachable")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"unreachable"}, tchannel.JSON, &Options{
// By providing a negative timeout the context will directly return with
// a DeadlineExceeded error
Timeout: -1,
})
s.EqualError(err, "request timed out")
}
func (s *ForwarderTestSuite) TestRequestRerouted() {
var ping Ping
var pong Pong
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"},
tchannel.JSON, &Options{
MaxRetries: 1,
RerouteRetries: true,
RetrySchedule: []time.Duration{time.Millisecond},
})
s.NoError(err, "expected request to be rerouted")
s.NoError(json2.Unmarshal(res, &pong))
s.Equal("correct pinging host", pong.From)
s.Equal("Hello, world!", pong.Message)
}
func (s *ForwarderTestSuite) TestRequestNoReroutes() {
var ping Ping
dest, err := s.sender.Lookup("immediate fail")
s.NoError(err)
_, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"},
tchannel.JSON, &Options{
MaxRetries: 1,
RetrySchedule: []time.Duration{time.Millisecond},
})
s.EqualError(err, "max retries exceeded")
}
func (s *ForwarderTestSuite) TestInvalidInflightDecrement() {
var wg sync.WaitGroup
wg.Add(1)
listener := &events.EventListener{}
listener.On("HandleEvent", mock.AnythingOfTypeArgument("forward.InflightRequestsMiscountEvent")).Run(func(args mock.Arguments) {
wg.Done()
}).Return()
s.forwarder.inflight = 0
s.forwarder.AddListener(listener)
defer s.forwarder.RemoveListener(listener)
s.forwarder.decrementInflight()
s.Assertions.Equal(int64(0), s.forwarder.inflight, "Expected inflight to stay at 0 when decremented at 0")
// wait for HandleEvent with forward.InflightRequestsMiscountEvent being called
wg.Wait()
}
func TestForwarderTestSuite(t *testing.T) {
suite.Run(t, new(ForwarderTestSuite))
}
func TestSetForwardedHeader(t *testing.T) {
// empty keys array test
ctx, _ := thrift.NewContext(0 * time.Second)
ctx = SetForwardedHeader(ctx, nil)
assert.Equal(t, "[]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set and be an empty array instead of null for the nil pointer")
// preserve existing headers
ctx, _ = thrift.NewContext(0 * time.Second)
ctx = thrift.WithHeaders(ctx, map[string]string{
"keep": "this key",
})
ctx = SetForwardedHeader(ctx, []string{"foo"})
assert.Equal(t, "[\"foo\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set to a serialized array of keys used in forwarding")
assert.Equal(t, "this key", ctx.Headers()["keep"], "expected the header set before the forwarding header to still exist")
// multiple keys encoded in the header
ctx, _ = thrift.NewContext(0 * time.Second)
ctx = SetForwardedHeader(ctx, []string{"key1", "key2"})
assert.Equal(t, "[\"key1\",\"key2\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set with both keys encoded")
}
func TestDeleteForwardedHeader(t *testing.T) {
ctx, _ := thrift.NewContext(0 * time.Second)
if DeleteForwardedHeader(ctx)
|
{
t.Errorf("ringpop claimed that the forwarded header was set before it was set")
}
|
conditional_block
|
|
myrule2.js
|
Date() {
var date = new Date();
var seperator1 = "-";
var seperator2 = ":";
var month = date.getMonth() + 1;
var strDate = date.getDate();
if (month >= 1 && month <= 9) {
month = "0" + month;
}
if (strDate >= 0 && strDate <= 9) {
strDate = "0" + strDate;
}
var currentdate = date.getFullYear() + seperator1 + month + seperator1 + strDate
+ " " + date.getHours() + seperator2 + date.getMinutes()
+ seperator2 + date.getSeconds();
return currentdate;
}
var insertData = function(db, callback) {
//连接到表 site
var collection = db.collection('site');
//插入数据
data = articleLinkArr;
collection.insert(data, function(err, result) {
if(err)
{
console.log('Error:'+ err);
return;
}
callback(result);
});
};
module.exports = {
token: Date.now(),
summary: "哈工大(威海) 基于中间人攻击的微信公众号爬虫",
getNextChunk: function (url, delay, nonce) {
if (nonce) {
var next = '<script nonce="' + nonce + '" type="text/javascript">';
} else {
var next = '<script type="text/javascript">';
}
next += 'setTimeout(function(){window.location.href="' + url + '";},' + delay + ');';
next += 'setTimeout(function(){window.location.href="' + url + '";},10000);';
next += '</script>';
return next;
},
getNotification: function () {
return '<h1 style="color:red; font-size:20px; text-align: center; margin-top: 10px; margin-bottom: 10px;">' +
'哈工大(威海)提示:10秒后没有自动刷新请手动刷新</h1>';
},
getNextUrl: function (currentUrl, rawList, appmsg_token) {
console.log("开始捕获下一页历史消息、、、、、、");
if (!rawList) {
return '';
}
var currentUrlArr = currentUrl.split("&");
var nextHistoryPageArr = [];
for(var item in currentUrlArr){
//console.log(currentUrlArr[item]);
if(currentUrlArr[item].substring(0,5)=="/mp/p"||currentUrlArr[item].substring(0,5)=="__biz"||
currentUrlArr[item].substring(0,5)=="scene"|| currentUrlArr[item].substring(0,5)=="pass_"){
nextHistoryPageArr.push(currentUrlArr[item]);
}
}
nextHistoryPageUrl = nextHistoryPageArr.join('&');
nextHistoryPageUrl += "&f=json";
//偏移量将在主函数中由函数自动修改 这里不必更改
nextHistoryPageUrl += "&offset=10";
nextHistoryPageUrl += "&count=10&is_ok=1";
nextHistoryPageUrl += "&uin=777&key=777";
nextHistoryPageUrl += "&wxtoken=";
nextHistoryPageUrl += "&appmsg_token=";
nextHistoryPageUrl += appmsg_token;
nextHistoryPageUrl = "https://mp.weixin.qq.com" + nextHistoryPageUrl;
nextHistoryPageUrl = nextHistoryPageUrl.replace("home", "getmsg");
console.log("this is raw!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
console.log(nextHistoryPageUrl);
firstLink = nextHistoryPageUrl;
return nextHistoryPageUrl;
},
replaceResponseHeader: function(req,res,header){
header = header || {};
console.log("开始:报头由json改为html")
if(flag && /mp\/profile_ext\?action=getmsg/i.test(req.url)) {
console.log("we have re[.p;ace1");
header['content-type'] = "text/html; charset=UTF-8";
}
console.log("成功结束:报头由json改为html")
return header;
},
//入口函数
replaceServerResDataAsync: function (req, res, serverResData, callback) {
console.log("抓捕到数据包。。。");
// console.log(articleLinkArr.size);
if(/mp\/profile_ext\?action=home/i.test(req.url)){
try{
var historyHomePage = /var msgList = \'(.*?)\';/;
var historyHomePageList = historyHomePage.exec(serverResData.toString());
if(!historyHomePageList){
callback(serverResData);
console.log("抓捕到空包!!");
return;
}
historyHomePageList[1] = historyHomePageList[1].replace(/"/g, "'");
var historyHomePageObj = eval("("+historyHomePageList[1]+")");
|
//问题:抓包显示有8个介绍界面,但只在historyHomePageList中获取到4个,原因是正则匹配时有问题??
//解决:列表中间的一个historyHomePageObj['list'][item]["app_msg_ext_info"]为undefined, 异常阻止了其他
// 介绍页面的获取!
for(var item in historyHomePageObj['list']){
console.log(item);
if(historyHomePageObj['list'][item]["app_msg_ext_info"]==undefined){
continue;
}
console.log(historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]);
var title = historyHomePageObj.list[item].app_msg_ext_info.title;
var author = historyHomePageObj.list[item].app_msg_ext_info.author;
var content_url = historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"];
var datetime = historyHomePageObj.list[item].comm_msg_info.datetime;
var id = historyHomePageObj.list[item].comm_msg_info.id;
console.log(title);
//公众号名称
var nickname_pattern = /var nickname = \"(.*?)\"/;
var nickname = nickname_pattern.exec(serverResData.toString())[1];
console.log("公众号的名字是————————", nickname);
//当前历史页的文章各种信息
var getdatetime = getNowFormatDate();
var articleJson = {
"title": title,
"author": author,
"content_url": content_url,
"datetime": datetime,
"id": id,
"getdatetime": getdatetime
};
// articleLinkArr.push(nickname);
articleLinkArr.push(articleJson);
}
console.log(result);
/*
MongoClient.connect(DB_CONN_STR, function(err, db) {
console.log("连接MongoDB成功!");
insertData(db, function(result) {
console.log(result);
db.close();
articleLinkArr = [];
});
});
*/
var appmsg_token_pattern = /window.appmsg_token = \"(.*?)\";/;
var appmsg_token = appmsg_token_pattern.exec(serverResData.toString())[1];
var nextHistoryPageUrl = this.getNextUrl(req.url, historyHomePageList, appmsg_token);
firstContent = serverResData;
//注入跳转下一历史页面的js
var next = this.getNextChunk(nextHistoryPageUrl, 6000);
var note = this.getNotification();
serverResData = note + serverResData + next;
nowOffset = 0;
console.log("成功获取到第一页历史消息页面666666666666666666666__end");
callback(serverResData);
}
catch (e){
callback(serverResData);
}
}
else if(/mp\/profile_ext\?action=getmsg/i.test(req.url)){
try {
if(!serverResData){
console.log("抓取公众号全部历史文章结束!");
return;
}
nowOffset += 10;
firstLink = firstLink.replace("&offset="+nowOffset.toString(), "&offset="+(nowOffset+10).toString());
//注入跳转再下一页的js
var note = this.getNotification();
var next = this.getNextChunk(firstLink, 6000);
var newContent = note + firstContent + next;
var newData = serverResData;
var ResDataobj = JSON.parse(newData.toString());
var general_msg_list = ResDataobj['general_msg_list'];
var listJson = JSON.parse(general_msg_list);
for(var artileIndex in listJson.list){
try {
var title = listJson.list[artileIndex].app_msg_ext_info.title;
var author = listJson.list[artileIndex].app_msg_ext_info.author;
var content_url = listJson.list[artileIndex].app_msg_ext_info.content_url;
var datetime = listJson.list[artileIndex].comm_msg_info.datetime;
var id = listJson.list[artileIndex].comm_msg_info.id;
console.log(title);
console.log(content_url);
console.log(id);
console.log(datetime);
//当前历史页的文章各种信息
var getdatetime = getNowFormatDate();
var articleJson = {
"title": title,
"author": author,
"content_url": content_url,
"datetime": datetime,
"id": id,
"getdatetime": getdatetime
};
console.log("__________", articleLinkArr);
articleLinkArr.push(articleJson);
console.log("________
|
random_line_split
|
|
myrule2.js
|
() {
var date = new Date();
var seperator1 = "-";
var seperator2 = ":";
var month = date.getMonth() + 1;
var strDate = date.getDate();
if (month >= 1 && month <= 9) {
month = "0" + month;
}
if (strDate >= 0 && strDate <= 9) {
strDate = "0" + strDate;
}
var currentdate = date.getFullYear() + seperator1 + month + seperator1 + strDate
+ " " + date.getHours() + seperator2 + date.getMinutes()
+ seperator2 + date.getSeconds();
return currentdate;
}
var insertData = function(db, callback) {
//连接到表 site
var collection = db.collection('site');
//插入数据
data = articleLinkArr;
collection.insert(data, function(err, result) {
if(err)
{
console.log('Error:'+ err);
return;
}
callback(result);
});
};
module.exports = {
token: Date.now(),
summary: "哈工大(威海) 基于中间人攻击的微信公众号爬虫",
getNextChunk: function (url, delay, nonce) {
if (nonce) {
var next = '<script nonce="' + nonce + '" type="text/javas
|
}
next += 'setTimeout(function(){window.location.href="' + url + '";},' + delay + ');';
next += 'setTimeout(function(){window.location.href="' + url + '";},10000);';
next += '</script>';
return next;
},
getNotification: function () {
return '<h1 style="color:red; font-size:20px; text-align: center; margin-top: 10px; margin-bottom: 10px;">' +
'哈工大(威海)提示:10秒后没有自动刷新请手动刷新</h1>';
},
getNextUrl: function (currentUrl, rawList, appmsg_token) {
console.log("开始捕获下一页历史消息、、、、、、");
if (!rawList) {
return '';
}
var currentUrlArr = currentUrl.split("&");
var nextHistoryPageArr = [];
for(var item in currentUrlArr){
//console.log(currentUrlArr[item]);
if(currentUrlArr[item].substring(0,5)=="/mp/p"||currentUrlArr[item].substring(0,5)=="__biz"||
currentUrlArr[item].substring(0,5)=="scene"|| currentUrlArr[item].substring(0,5)=="pass_"){
nextHistoryPageArr.push(currentUrlArr[item]);
}
}
nextHistoryPageUrl = nextHistoryPageArr.join('&');
nextHistoryPageUrl += "&f=json";
//偏移量将在主函数中由函数自动修改 这里不必更改
nextHistoryPageUrl += "&offset=10";
nextHistoryPageUrl += "&count=10&is_ok=1";
nextHistoryPageUrl += "&uin=777&key=777";
nextHistoryPageUrl += "&wxtoken=";
nextHistoryPageUrl += "&appmsg_token=";
nextHistoryPageUrl += appmsg_token;
nextHistoryPageUrl = "https://mp.weixin.qq.com" + nextHistoryPageUrl;
nextHistoryPageUrl = nextHistoryPageUrl.replace("home", "getmsg");
console.log("this is raw!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
console.log(nextHistoryPageUrl);
firstLink = nextHistoryPageUrl;
return nextHistoryPageUrl;
},
replaceResponseHeader: function(req,res,header){
header = header || {};
console.log("开始:报头由json改为html")
if(flag && /mp\/profile_ext\?action=getmsg/i.test(req.url)) {
console.log("we have re[.p;ace1");
header['content-type'] = "text/html; charset=UTF-8";
}
console.log("成功结束:报头由json改为html")
return header;
},
//入口函数
replaceServerResDataAsync: function (req, res, serverResData, callback) {
console.log("抓捕到数据包。。。");
// console.log(articleLinkArr.size);
if(/mp\/profile_ext\?action=home/i.test(req.url)){
try{
var historyHomePage = /var msgList = \'(.*?)\';/;
var historyHomePageList = historyHomePage.exec(serverResData.toString());
if(!historyHomePageList){
callback(serverResData);
console.log("抓捕到空包!!");
return;
}
historyHomePageList[1] = historyHomePageList[1].replace(/"/g, "'");
var historyHomePageObj = eval("("+historyHomePageList[1]+")");
//问题:抓包显示有8个介绍界面,但只在historyHomePageList中获取到4个,原因是正则匹配时有问题??
//解决:列表中间的一个historyHomePageObj['list'][item]["app_msg_ext_info"]为undefined, 异常阻止了其他
// 介绍页面的获取!
for(var item in historyHomePageObj['list']){
console.log(item);
if(historyHomePageObj['list'][item]["app_msg_ext_info"]==undefined){
continue;
}
console.log(historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]);
var title = historyHomePageObj.list[item].app_msg_ext_info.title;
var author = historyHomePageObj.list[item].app_msg_ext_info.author;
var content_url = historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"];
var datetime = historyHomePageObj.list[item].comm_msg_info.datetime;
var id = historyHomePageObj.list[item].comm_msg_info.id;
console.log(title);
//公众号名称
var nickname_pattern = /var nickname = \"(.*?)\"/;
var nickname = nickname_pattern.exec(serverResData.toString())[1];
console.log("公众号的名字是————————", nickname);
//当前历史页的文章各种信息
var getdatetime = getNowFormatDate();
var articleJson = {
"title": title,
"author": author,
"content_url": content_url,
"datetime": datetime,
"id": id,
"getdatetime": getdatetime
};
// articleLinkArr.push(nickname);
articleLinkArr.push(articleJson);
}
console.log(result);
/*
MongoClient.connect(DB_CONN_STR, function(err, db) {
console.log("连接MongoDB成功!");
insertData(db, function(result) {
console.log(result);
db.close();
articleLinkArr = [];
});
});
*/
var appmsg_token_pattern = /window.appmsg_token = \"(.*?)\";/;
var appmsg_token = appmsg_token_pattern.exec(serverResData.toString())[1];
var nextHistoryPageUrl = this.getNextUrl(req.url, historyHomePageList, appmsg_token);
firstContent = serverResData;
//注入跳转下一历史页面的js
var next = this.getNextChunk(nextHistoryPageUrl, 6000);
var note = this.getNotification();
serverResData = note + serverResData + next;
nowOffset = 0;
console.log("成功获取到第一页历史消息页面666666666666666666666__end");
callback(serverResData);
}
catch (e){
callback(serverResData);
}
}
else if(/mp\/profile_ext\?action=getmsg/i.test(req.url)){
try {
if(!serverResData){
console.log("抓取公众号全部历史文章结束!");
return;
}
nowOffset += 10;
firstLink = firstLink.replace("&offset="+nowOffset.toString(), "&offset="+(nowOffset+10).toString());
//注入跳转再下一页的js
var note = this.getNotification();
var next = this.getNextChunk(firstLink, 6000);
var newContent = note + firstContent + next;
var newData = serverResData;
var ResDataobj = JSON.parse(newData.toString());
var general_msg_list = ResDataobj['general_msg_list'];
var listJson = JSON.parse(general_msg_list);
for(var artileIndex in listJson.list){
try {
var title = listJson.list[artileIndex].app_msg_ext_info.title;
var author = listJson.list[artileIndex].app_msg_ext_info.author;
var content_url = listJson.list[artileIndex].app_msg_ext_info.content_url;
var datetime = listJson.list[artileIndex].comm_msg_info.datetime;
var id = listJson.list[artileIndex].comm_msg_info.id;
console.log(title);
console.log(content_url);
console.log(id);
console.log(datetime);
//当前历史页的文章各种信息
var getdatetime = getNowFormatDate();
var articleJson = {
"title": title,
"author": author,
"content_url": content_url,
"datetime": datetime,
"id": id,
"getdatetime": getdatetime
};
console.log("__________", articleLinkArr);
articleLinkArr.push(articleJson);
console.log("
|
cript">';
} else {
var next = '<script type="text/javascript">';
|
conditional_block
|
myrule2.js
|
Date() {
var date = new
|
function(db, callback) {
//连接到表 site
var collection = db.collection('site');
//插入数据
data = articleLinkArr;
collection.insert(data, function(err, result) {
if(err)
{
console.log('Error:'+ err);
return;
}
callback(result);
});
};
module.exports = {
token: Date.now(),
summary: "哈工大(威海) 基于中间人攻击的微信公众号爬虫",
getNextChunk: function (url, delay, nonce) {
if (nonce) {
var next = '<script nonce="' + nonce + '" type="text/javascript">';
} else {
var next = '<script type="text/javascript">';
}
next += 'setTimeout(function(){window.location.href="' + url + '";},' + delay + ');';
next += 'setTimeout(function(){window.location.href="' + url + '";},10000);';
next += '</script>';
return next;
},
getNotification: function () {
return '<h1 style="color:red; font-size:20px; text-align: center; margin-top: 10px; margin-bottom: 10px;">' +
'哈工大(威海)提示:10秒后没有自动刷新请手动刷新</h1>';
},
getNextUrl: function (currentUrl, rawList, appmsg_token) {
console.log("开始捕获下一页历史消息、、、、、、");
if (!rawList) {
return '';
}
var currentUrlArr = currentUrl.split("&");
var nextHistoryPageArr = [];
for(var item in currentUrlArr){
//console.log(currentUrlArr[item]);
if(currentUrlArr[item].substring(0,5)=="/mp/p"||currentUrlArr[item].substring(0,5)=="__biz"||
currentUrlArr[item].substring(0,5)=="scene"|| currentUrlArr[item].substring(0,5)=="pass_"){
nextHistoryPageArr.push(currentUrlArr[item]);
}
}
nextHistoryPageUrl = nextHistoryPageArr.join('&');
nextHistoryPageUrl += "&f=json";
//偏移量将在主函数中由函数自动修改 这里不必更改
nextHistoryPageUrl += "&offset=10";
nextHistoryPageUrl += "&count=10&is_ok=1";
nextHistoryPageUrl += "&uin=777&key=777";
nextHistoryPageUrl += "&wxtoken=";
nextHistoryPageUrl += "&appmsg_token=";
nextHistoryPageUrl += appmsg_token;
nextHistoryPageUrl = "https://mp.weixin.qq.com" + nextHistoryPageUrl;
nextHistoryPageUrl = nextHistoryPageUrl.replace("home", "getmsg");
console.log("this is raw!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
console.log(nextHistoryPageUrl);
firstLink = nextHistoryPageUrl;
return nextHistoryPageUrl;
},
replaceResponseHeader: function(req,res,header){
header = header || {};
console.log("开始:报头由json改为html")
if(flag && /mp\/profile_ext\?action=getmsg/i.test(req.url)) {
console.log("we have re[.p;ace1");
header['content-type'] = "text/html; charset=UTF-8";
}
console.log("成功结束:报头由json改为html")
return header;
},
//入口函数
replaceServerResDataAsync: function (req, res, serverResData, callback) {
console.log("抓捕到数据包。。。");
// console.log(articleLinkArr.size);
if(/mp\/profile_ext\?action=home/i.test(req.url)){
try{
var historyHomePage = /var msgList = \'(.*?)\';/;
var historyHomePageList = historyHomePage.exec(serverResData.toString());
if(!historyHomePageList){
callback(serverResData);
console.log("抓捕到空包!!");
return;
}
historyHomePageList[1] = historyHomePageList[1].replace(/"/g, "'");
var historyHomePageObj = eval("("+historyHomePageList[1]+")");
//问题:抓包显示有8个介绍界面,但只在historyHomePageList中获取到4个,原因是正则匹配时有问题??
//解决:列表中间的一个historyHomePageObj['list'][item]["app_msg_ext_info"]为undefined, 异常阻止了其他
// 介绍页面的获取!
for(var item in historyHomePageObj['list']){
console.log(item);
if(historyHomePageObj['list'][item]["app_msg_ext_info"]==undefined){
continue;
}
console.log(historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]);
var title = historyHomePageObj.list[item].app_msg_ext_info.title;
var author = historyHomePageObj.list[item].app_msg_ext_info.author;
var content_url = historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"];
var datetime = historyHomePageObj.list[item].comm_msg_info.datetime;
var id = historyHomePageObj.list[item].comm_msg_info.id;
console.log(title);
//公众号名称
var nickname_pattern = /var nickname = \"(.*?)\"/;
var nickname = nickname_pattern.exec(serverResData.toString())[1];
console.log("公众号的名字是————————", nickname);
//当前历史页的文章各种信息
var getdatetime = getNowFormatDate();
var articleJson = {
"title": title,
"author": author,
"content_url": content_url,
"datetime": datetime,
"id": id,
"getdatetime": getdatetime
};
// articleLinkArr.push(nickname);
articleLinkArr.push(articleJson);
}
console.log(result);
/*
MongoClient.connect(DB_CONN_STR, function(err, db) {
console.log("连接MongoDB成功!");
insertData(db, function(result) {
console.log(result);
db.close();
articleLinkArr = [];
});
});
*/
var appmsg_token_pattern = /window.appmsg_token = \"(.*?)\";/;
var appmsg_token = appmsg_token_pattern.exec(serverResData.toString())[1];
var nextHistoryPageUrl = this.getNextUrl(req.url, historyHomePageList, appmsg_token);
firstContent = serverResData;
//注入跳转下一历史页面的js
var next = this.getNextChunk(nextHistoryPageUrl, 6000);
var note = this.getNotification();
serverResData = note + serverResData + next;
nowOffset = 0;
console.log("成功获取到第一页历史消息页面666666666666666666666__end");
callback(serverResData);
}
catch (e){
callback(serverResData);
}
}
else if(/mp\/profile_ext\?action=getmsg/i.test(req.url)){
try {
if(!serverResData){
console.log("抓取公众号全部历史文章结束!");
return;
}
nowOffset += 10;
firstLink = firstLink.replace("&offset="+nowOffset.toString(), "&offset="+(nowOffset+10).toString());
//注入跳转再下一页的js
var note = this.getNotification();
var next = this.getNextChunk(firstLink, 6000);
var newContent = note + firstContent + next;
var newData = serverResData;
var ResDataobj = JSON.parse(newData.toString());
var general_msg_list = ResDataobj['general_msg_list'];
var listJson = JSON.parse(general_msg_list);
for(var artileIndex in listJson.list){
try {
var title = listJson.list[artileIndex].app_msg_ext_info.title;
var author = listJson.list[artileIndex].app_msg_ext_info.author;
var content_url = listJson.list[artileIndex].app_msg_ext_info.content_url;
var datetime = listJson.list[artileIndex].comm_msg_info.datetime;
var id = listJson.list[artileIndex].comm_msg_info.id;
console.log(title);
console.log(content_url);
console.log(id);
console.log(datetime);
//当前历史页的文章各种信息
var getdatetime = getNowFormatDate();
var articleJson = {
"title": title,
"author": author,
"content_url": content_url,
"datetime": datetime,
"id": id,
"getdatetime": getdatetime
};
console.log("__________", articleLinkArr);
articleLinkArr.push(articleJson);
console.log("__________
|
Date();
var seperator1 = "-";
var seperator2 = ":";
var month = date.getMonth() + 1;
var strDate = date.getDate();
if (month >= 1 && month <= 9) {
month = "0" + month;
}
if (strDate >= 0 && strDate <= 9) {
strDate = "0" + strDate;
}
var currentdate = date.getFullYear() + seperator1 + month + seperator1 + strDate
+ " " + date.getHours() + seperator2 + date.getMinutes()
+ seperator2 + date.getSeconds();
return currentdate;
}
var insertData =
|
identifier_body
|
myrule2.js
|
Date() {
|
new Date();
var seperator1 = "-";
var seperator2 = ":";
var month = date.getMonth() + 1;
var strDate = date.getDate();
if (month >= 1 && month <= 9) {
month = "0" + month;
}
if (strDate >= 0 && strDate <= 9) {
strDate = "0" + strDate;
}
var currentdate = date.getFullYear() + seperator1 + month + seperator1 + strDate
+ " " + date.getHours() + seperator2 + date.getMinutes()
+ seperator2 + date.getSeconds();
return currentdate;
}
var insertData = function(db, callback) {
//连接到表 site
var collection = db.collection('site');
//插入数据
data = articleLinkArr;
collection.insert(data, function(err, result) {
if(err)
{
console.log('Error:'+ err);
return;
}
callback(result);
});
};
module.exports = {
token: Date.now(),
summary: "哈工大(威海) 基于中间人攻击的微信公众号爬虫",
getNextChunk: function (url, delay, nonce) {
if (nonce) {
var next = '<script nonce="' + nonce + '" type="text/javascript">';
} else {
var next = '<script type="text/javascript">';
}
next += 'setTimeout(function(){window.location.href="' + url + '";},' + delay + ');';
next += 'setTimeout(function(){window.location.href="' + url + '";},10000);';
next += '</script>';
return next;
},
getNotification: function () {
return '<h1 style="color:red; font-size:20px; text-align: center; margin-top: 10px; margin-bottom: 10px;">' +
'哈工大(威海)提示:10秒后没有自动刷新请手动刷新</h1>';
},
getNextUrl: function (currentUrl, rawList, appmsg_token) {
console.log("开始捕获下一页历史消息、、、、、、");
if (!rawList) {
return '';
}
var currentUrlArr = currentUrl.split("&");
var nextHistoryPageArr = [];
for(var item in currentUrlArr){
//console.log(currentUrlArr[item]);
if(currentUrlArr[item].substring(0,5)=="/mp/p"||currentUrlArr[item].substring(0,5)=="__biz"||
currentUrlArr[item].substring(0,5)=="scene"|| currentUrlArr[item].substring(0,5)=="pass_"){
nextHistoryPageArr.push(currentUrlArr[item]);
}
}
nextHistoryPageUrl = nextHistoryPageArr.join('&');
nextHistoryPageUrl += "&f=json";
//偏移量将在主函数中由函数自动修改 这里不必更改
nextHistoryPageUrl += "&offset=10";
nextHistoryPageUrl += "&count=10&is_ok=1";
nextHistoryPageUrl += "&uin=777&key=777";
nextHistoryPageUrl += "&wxtoken=";
nextHistoryPageUrl += "&appmsg_token=";
nextHistoryPageUrl += appmsg_token;
nextHistoryPageUrl = "https://mp.weixin.qq.com" + nextHistoryPageUrl;
nextHistoryPageUrl = nextHistoryPageUrl.replace("home", "getmsg");
console.log("this is raw!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
console.log(nextHistoryPageUrl);
firstLink = nextHistoryPageUrl;
return nextHistoryPageUrl;
},
replaceResponseHeader: function(req,res,header){
header = header || {};
console.log("开始:报头由json改为html")
if(flag && /mp\/profile_ext\?action=getmsg/i.test(req.url)) {
console.log("we have re[.p;ace1");
header['content-type'] = "text/html; charset=UTF-8";
}
console.log("成功结束:报头由json改为html")
return header;
},
//入口函数
replaceServerResDataAsync: function (req, res, serverResData, callback) {
console.log("抓捕到数据包。。。");
// console.log(articleLinkArr.size);
if(/mp\/profile_ext\?action=home/i.test(req.url)){
try{
var historyHomePage = /var msgList = \'(.*?)\';/;
var historyHomePageList = historyHomePage.exec(serverResData.toString());
if(!historyHomePageList){
callback(serverResData);
console.log("抓捕到空包!!");
return;
}
historyHomePageList[1] = historyHomePageList[1].replace(/"/g, "'");
var historyHomePageObj = eval("("+historyHomePageList[1]+")");
//问题:抓包显示有8个介绍界面,但只在historyHomePageList中获取到4个,原因是正则匹配时有问题??
//解决:列表中间的一个historyHomePageObj['list'][item]["app_msg_ext_info"]为undefined, 异常阻止了其他
// 介绍页面的获取!
for(var item in historyHomePageObj['list']){
console.log(item);
if(historyHomePageObj['list'][item]["app_msg_ext_info"]==undefined){
continue;
}
console.log(historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]);
var title = historyHomePageObj.list[item].app_msg_ext_info.title;
var author = historyHomePageObj.list[item].app_msg_ext_info.author;
var content_url = historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"];
var datetime = historyHomePageObj.list[item].comm_msg_info.datetime;
var id = historyHomePageObj.list[item].comm_msg_info.id;
console.log(title);
//公众号名称
var nickname_pattern = /var nickname = \"(.*?)\"/;
var nickname = nickname_pattern.exec(serverResData.toString())[1];
console.log("公众号的名字是————————", nickname);
//当前历史页的文章各种信息
var getdatetime = getNowFormatDate();
var articleJson = {
"title": title,
"author": author,
"content_url": content_url,
"datetime": datetime,
"id": id,
"getdatetime": getdatetime
};
// articleLinkArr.push(nickname);
articleLinkArr.push(articleJson);
}
console.log(result);
/*
MongoClient.connect(DB_CONN_STR, function(err, db) {
console.log("连接MongoDB成功!");
insertData(db, function(result) {
console.log(result);
db.close();
articleLinkArr = [];
});
});
*/
var appmsg_token_pattern = /window.appmsg_token = \"(.*?)\";/;
var appmsg_token = appmsg_token_pattern.exec(serverResData.toString())[1];
var nextHistoryPageUrl = this.getNextUrl(req.url, historyHomePageList, appmsg_token);
firstContent = serverResData;
//注入跳转下一历史页面的js
var next = this.getNextChunk(nextHistoryPageUrl, 6000);
var note = this.getNotification();
serverResData = note + serverResData + next;
nowOffset = 0;
console.log("成功获取到第一页历史消息页面666666666666666666666__end");
callback(serverResData);
}
catch (e){
callback(serverResData);
}
}
else if(/mp\/profile_ext\?action=getmsg/i.test(req.url)){
try {
if(!serverResData){
console.log("抓取公众号全部历史文章结束!");
return;
}
nowOffset += 10;
firstLink = firstLink.replace("&offset="+nowOffset.toString(), "&offset="+(nowOffset+10).toString());
//注入跳转再下一页的js
var note = this.getNotification();
var next = this.getNextChunk(firstLink, 6000);
var newContent = note + firstContent + next;
var newData = serverResData;
var ResDataobj = JSON.parse(newData.toString());
var general_msg_list = ResDataobj['general_msg_list'];
var listJson = JSON.parse(general_msg_list);
for(var artileIndex in listJson.list){
try {
var title = listJson.list[artileIndex].app_msg_ext_info.title;
var author = listJson.list[artileIndex].app_msg_ext_info.author;
var content_url = listJson.list[artileIndex].app_msg_ext_info.content_url;
var datetime = listJson.list[artileIndex].comm_msg_info.datetime;
var id = listJson.list[artileIndex].comm_msg_info.id;
console.log(title);
console.log(content_url);
console.log(id);
console.log(datetime);
//当前历史页的文章各种信息
var getdatetime = getNowFormatDate();
var articleJson = {
"title": title,
"author": author,
"content_url": content_url,
"datetime": datetime,
"id": id,
"getdatetime": getdatetime
};
console.log("__________", articleLinkArr);
articleLinkArr.push(articleJson);
console.log("
|
var date =
|
identifier_name
|
main.go
|
Config()
sessionConfig, _ := json.Marshal(globalsessionkeeper.ChompConfig.ManagerConfig)
dbConfig := globalsessionkeeper.ChompConfig.DbConfig
fmt.Printf("\n\n\nIn init, new manager\n")
fmt.Printf("In init, new manager\n")
fmt.Printf("In init, new manager\n\n\n\n")
globalsessionkeeper.GlobalSessions, err = session.NewManager("mysql", string(sessionConfig))
if err != nil {
fmt.Printf("Coud not start session..Error: %v\n", err.Error())
os.Exit(-1)
}
err = errors.New("")
fmt.Printf("Opening DB connection\n")
// Connection string looks as the following
//MyDb, err = sql.Open("service", "user@tcp(ip:port)/database")
connString := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", dbConfig.User, dbConfig.Pass, dbConfig.Host,dbConfig.Port, dbConfig.Db)
fmt.Printf("ConnString = %s\n", connString)
MyDb, err = sql.Open("mysql", connString)
if err != nil {
// return err
fmt.Printf("Error = %v\n", err)
panic(fmt.Sprintf("%v", err))
}
globalsessionkeeper.GlobalSessions.SetSecure(true)
go globalsessionkeeper.GlobalSessions.GC()
}
func BasicAuth(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("made it to basic auth")
fmt.Printf("Headers = %v\n", r.Header)
fmt.Printf("Len = %v\n", len(r.Header))
if len(r.Header["Authorization"]) <= 0 {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
auth := strings.SplitN(r.Header["Authorization"][0], " ", 2)
fmt.Printf("auth = %v", auth)
if len(auth) != 2 {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
} else if auth[0] != "Basic" {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
payload, _ := base64.StdEncoding.DecodeString(auth[1])
pair := strings.SplitN(string(payload), ":", 2)
if len(pair) != 2 || !Validate(pair[0], pair[1]) {
http.Error(w, "authorization failed", http.StatusUnauthorized)
return
}
pass(w, r)
}
}
func (ah AppHandler) SessionAuth(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
cookie := globalsessionkeeper.GetCookie(r)
if cookie == "" {
//need logging here instead of print
fmt.Println("Session Auth Cookie = %v", cookie)
query := mux.Vars(r)
fmt.Printf("Query here.. %v\n", query)
if query["token"] != "" {
fmt.Printf("Error not nil, updating error instacode %v\n", query["token"])
cookie = query["token"]
} else {
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "No Cookie Present"})
return
}
}
sessionStore, err := globalsessionkeeper.GlobalSessions.GetSessionStore(cookie)
if err != nil {
//need logging here instead of print
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
defer sessionStore.SessionRelease(w)
ah.appContext.SessionStore = sessionStore
sessionUser := sessionStore.Get("username")
fmt.Printf("Session Auth SessionUser = %v\n", sessionUser)
if sessionUser == nil {
//need logging here instead of print
fmt.Printf("Username not found, returning unauth, Get has %v\n", sessionStore)
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
fmt.Printf("Session Auth Getting user info for user %v\n", sessionUser)
userInfo := new(db.UserInfo)
userInfo.Username = reflect.ValueOf(sessionUser).String()
err = userInfo.GetUserInfo(MyDb)
if err != nil {
//need logging here instead of print
fmt.Printf("Session Auth Username not found, returning unauth, Get has %v\n", sessionStore)
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
pass(w, r)
}
}
func GetConfig() error {
configFile, err := ioutil.ReadFile("./chomp_private/config.json")
if err != nil {
return err
}
err = json.Unmarshal(configFile, &globalsessionkeeper.ChompConfig)
if err != nil {
fmt.Printf("Err = %v", err)
return err
}
return nil
}
func Validate(username, password string) bool {
fmt.Println("Made it to validate..")
for _, e := range globalsessionkeeper.ChompConfig.Authorized {
if e.User == username && e.Pass == password {
return true
}
}
return false
}
type AppHandler struct {
appContext *globalsessionkeeper.AppContext
h func(*globalsessionkeeper.AppContext, http.ResponseWriter, *http.Request) (error)
}
func HttpErrorResponder(w http.ResponseWriter, errorResponse globalsessionkeeper.ErrorResponse)
|
func (ah AppHandler) ServerHttp(w http.ResponseWriter, r *http.Request) {
fmt.Printf("AH Context = %v\n", ah.appContext)
err := ah.h(ah.appContext, w, r)
if err != nil {
// log.Printf("HTTP %d: %q", status, err)
status := err.(globalsessionkeeper.ErrorResponse).Code
switch status {
case http.StatusNotFound:
fmt.Printf("Error: Page not found\n")
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
case http.StatusInternalServerError:
fmt.Printf("Error: %v\n", http.StatusInternalServerError)
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
default:
fmt.Printf("Error: %v\n", err)
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
}
}
}
func main() {
defer MyDb.Close()
router := mux.NewRouter().StrictSlash(true)
context := &globalsessionkeeper.AppContext{DB: MyDb}
fmt.Printf("Context = %v\n", context)
router.HandleFunc("/login", AppHandler{context, login.DoLogin}.ServerHttp)
router.HandleFunc("/verify", AppHandler{context, auth.VerifyHandler}.ServerHttp)
router.HandleFunc("/register", AppHandler{context, register.DoRegister}.ServerHttp)
router.HandleFunc("/admin/fp", BasicAuth(AppHandler{context, register.ForgotPassword}.ServerHttp))
router.HandleFunc("/admin/fu", BasicAuth(AppHandler{context, register.ForgotUsername}.ServerHttp))
router.HandleFunc("/admin/jwt", BasicAuth(AppHandler{context, crypto.GetJwt}.ServerHttp))
router.HandleFunc("/me", AppHandler{appContext: context, h: me.GetMe}.SessionAuth(AppHandler{appContext: context, h: me.GetMe}.ServerHttp))
//this is how you write a query parameter capture uri
router.Queries("token", "{token}", "code", "{code:.*}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.Queries("token", "{token}", "error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.Queries("error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.HandleFunc("/me/logout", AppHandler{appContext: context, h: me.Logout}.SessionAuth(AppHandler{appContext: context, h: me.Logout}.ServerHttp))
router.HandleFunc("/me/logout/all", AppHandler{appContext: context, h: me.LogoutAll}.SessionAuth(AppHandler{appContext: context, h: me.LogoutAll}.ServerHttp))
router.HandleFunc("/me/photos", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp))
router.HandleFunc("/me/photos/{photoID}", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp))
router.HandleFunc("/me/reviews", AppHandler{appContext: context, h: me.Reviews}.SessionAuth(AppHandler{appContext: context, h: me.Reviews}.ServerHttp))
router.HandleFunc("/me/update/up", AppHandler{appContext: context, h: me.UpdatePassword}.SessionAuth(AppHandler{appContext: context, h: me.UpdatePassword}.ServerHttp))
router.HandleFunc("/me/update/email", AppHandler{appContext: context, h: me.UpdateEmail}.SessionAuth(AppHandler{appContext: context, h: me.UpdateEmail}.ServerHttp))
router.HandleFunc("/me/update/d/{userID}", AppHandler{appContext: context,
|
{
fmt.Printf("Going out as: %v\n", errorResponse)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(errorResponse.Code)
json.NewEncoder(w).Encode(errorResponse)
}
|
identifier_body
|
main.go
|
Config()
sessionConfig, _ := json.Marshal(globalsessionkeeper.ChompConfig.ManagerConfig)
dbConfig := globalsessionkeeper.ChompConfig.DbConfig
fmt.Printf("\n\n\nIn init, new manager\n")
fmt.Printf("In init, new manager\n")
fmt.Printf("In init, new manager\n\n\n\n")
globalsessionkeeper.GlobalSessions, err = session.NewManager("mysql", string(sessionConfig))
if err != nil {
fmt.Printf("Coud not start session..Error: %v\n", err.Error())
os.Exit(-1)
}
err = errors.New("")
fmt.Printf("Opening DB connection\n")
// Connection string looks as the following
//MyDb, err = sql.Open("service", "user@tcp(ip:port)/database")
connString := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", dbConfig.User, dbConfig.Pass, dbConfig.Host,dbConfig.Port, dbConfig.Db)
fmt.Printf("ConnString = %s\n", connString)
MyDb, err = sql.Open("mysql", connString)
if err != nil {
// return err
fmt.Printf("Error = %v\n", err)
panic(fmt.Sprintf("%v", err))
}
globalsessionkeeper.GlobalSessions.SetSecure(true)
go globalsessionkeeper.GlobalSessions.GC()
}
func BasicAuth(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("made it to basic auth")
fmt.Printf("Headers = %v\n", r.Header)
fmt.Printf("Len = %v\n", len(r.Header))
if len(r.Header["Authorization"]) <= 0 {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
auth := strings.SplitN(r.Header["Authorization"][0], " ", 2)
fmt.Printf("auth = %v", auth)
if len(auth) != 2 {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
} else if auth[0] != "Basic" {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
payload, _ := base64.StdEncoding.DecodeString(auth[1])
pair := strings.SplitN(string(payload), ":", 2)
if len(pair) != 2 || !Validate(pair[0], pair[1]) {
http.Error(w, "authorization failed", http.StatusUnauthorized)
return
}
pass(w, r)
}
}
func (ah AppHandler) SessionAuth(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
cookie := globalsessionkeeper.GetCookie(r)
if cookie == "" {
//need logging here instead of print
fmt.Println("Session Auth Cookie = %v", cookie)
query := mux.Vars(r)
fmt.Printf("Query here.. %v\n", query)
if query["token"] != "" {
fmt.Printf("Error not nil, updating error instacode %v\n", query["token"])
cookie = query["token"]
} else {
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "No Cookie Present"})
return
}
}
sessionStore, err := globalsessionkeeper.GlobalSessions.GetSessionStore(cookie)
if err != nil {
//need logging here instead of print
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
defer sessionStore.SessionRelease(w)
ah.appContext.SessionStore = sessionStore
sessionUser := sessionStore.Get("username")
fmt.Printf("Session Auth SessionUser = %v\n", sessionUser)
if sessionUser == nil {
//need logging here instead of print
fmt.Printf("Username not found, returning unauth, Get has %v\n", sessionStore)
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
fmt.Printf("Session Auth Getting user info for user %v\n", sessionUser)
userInfo := new(db.UserInfo)
userInfo.Username = reflect.ValueOf(sessionUser).String()
err = userInfo.GetUserInfo(MyDb)
if err != nil {
//need logging here instead of print
fmt.Printf("Session Auth Username not found, returning unauth, Get has %v\n", sessionStore)
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
pass(w, r)
}
}
func GetConfig() error {
configFile, err := ioutil.ReadFile("./chomp_private/config.json")
if err != nil {
return err
}
err = json.Unmarshal(configFile, &globalsessionkeeper.ChompConfig)
if err != nil {
fmt.Printf("Err = %v", err)
return err
}
return nil
}
func Validate(username, password string) bool {
fmt.Println("Made it to validate..")
for _, e := range globalsessionkeeper.ChompConfig.Authorized {
if e.User == username && e.Pass == password {
return true
}
}
return false
}
type AppHandler struct {
appContext *globalsessionkeeper.AppContext
h func(*globalsessionkeeper.AppContext, http.ResponseWriter, *http.Request) (error)
}
func HttpErrorResponder(w http.ResponseWriter, errorResponse globalsessionkeeper.ErrorResponse) {
fmt.Printf("Going out as: %v\n", errorResponse)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(errorResponse.Code)
json.NewEncoder(w).Encode(errorResponse)
}
func (ah AppHandler) ServerHttp(w http.ResponseWriter, r *http.Request) {
|
status := err.(globalsessionkeeper.ErrorResponse).Code
switch status {
case http.StatusNotFound:
fmt.Printf("Error: Page not found\n")
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
case http.StatusInternalServerError:
fmt.Printf("Error: %v\n", http.StatusInternalServerError)
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
default:
fmt.Printf("Error: %v\n", err)
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
}
}
}
func main() {
defer MyDb.Close()
router := mux.NewRouter().StrictSlash(true)
context := &globalsessionkeeper.AppContext{DB: MyDb}
fmt.Printf("Context = %v\n", context)
router.HandleFunc("/login", AppHandler{context, login.DoLogin}.ServerHttp)
router.HandleFunc("/verify", AppHandler{context, auth.VerifyHandler}.ServerHttp)
router.HandleFunc("/register", AppHandler{context, register.DoRegister}.ServerHttp)
router.HandleFunc("/admin/fp", BasicAuth(AppHandler{context, register.ForgotPassword}.ServerHttp))
router.HandleFunc("/admin/fu", BasicAuth(AppHandler{context, register.ForgotUsername}.ServerHttp))
router.HandleFunc("/admin/jwt", BasicAuth(AppHandler{context, crypto.GetJwt}.ServerHttp))
router.HandleFunc("/me", AppHandler{appContext: context, h: me.GetMe}.SessionAuth(AppHandler{appContext: context, h: me.GetMe}.ServerHttp))
//this is how you write a query parameter capture uri
router.Queries("token", "{token}", "code", "{code:.*}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.Queries("token", "{token}", "error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.Queries("error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.HandleFunc("/me/logout", AppHandler{appContext: context, h: me.Logout}.SessionAuth(AppHandler{appContext: context, h: me.Logout}.ServerHttp))
router.HandleFunc("/me/logout/all", AppHandler{appContext: context, h: me.LogoutAll}.SessionAuth(AppHandler{appContext: context, h: me.LogoutAll}.ServerHttp))
router.HandleFunc("/me/photos", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp))
router.HandleFunc("/me/photos/{photoID}", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp))
router.HandleFunc("/me/reviews", AppHandler{appContext: context, h: me.Reviews}.SessionAuth(AppHandler{appContext: context, h: me.Reviews}.ServerHttp))
router.HandleFunc("/me/update/up", AppHandler{appContext: context, h: me.UpdatePassword}.SessionAuth(AppHandler{appContext: context, h: me.UpdatePassword}.ServerHttp))
router.HandleFunc("/me/update/email", AppHandler{appContext: context, h: me.UpdateEmail}.SessionAuth(AppHandler{appContext: context, h: me.UpdateEmail}.ServerHttp))
router.HandleFunc("/me/update/d/{userID}", AppHandler{appContext: context, h
|
fmt.Printf("AH Context = %v\n", ah.appContext)
err := ah.h(ah.appContext, w, r)
if err != nil {
// log.Printf("HTTP %d: %q", status, err)
|
random_line_split
|
main.go
|
Config()
sessionConfig, _ := json.Marshal(globalsessionkeeper.ChompConfig.ManagerConfig)
dbConfig := globalsessionkeeper.ChompConfig.DbConfig
fmt.Printf("\n\n\nIn init, new manager\n")
fmt.Printf("In init, new manager\n")
fmt.Printf("In init, new manager\n\n\n\n")
globalsessionkeeper.GlobalSessions, err = session.NewManager("mysql", string(sessionConfig))
if err != nil {
fmt.Printf("Coud not start session..Error: %v\n", err.Error())
os.Exit(-1)
}
err = errors.New("")
fmt.Printf("Opening DB connection\n")
// Connection string looks as the following
//MyDb, err = sql.Open("service", "user@tcp(ip:port)/database")
connString := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", dbConfig.User, dbConfig.Pass, dbConfig.Host,dbConfig.Port, dbConfig.Db)
fmt.Printf("ConnString = %s\n", connString)
MyDb, err = sql.Open("mysql", connString)
if err != nil {
// return err
fmt.Printf("Error = %v\n", err)
panic(fmt.Sprintf("%v", err))
}
globalsessionkeeper.GlobalSessions.SetSecure(true)
go globalsessionkeeper.GlobalSessions.GC()
}
func BasicAuth(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("made it to basic auth")
fmt.Printf("Headers = %v\n", r.Header)
fmt.Printf("Len = %v\n", len(r.Header))
if len(r.Header["Authorization"]) <= 0 {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
auth := strings.SplitN(r.Header["Authorization"][0], " ", 2)
fmt.Printf("auth = %v", auth)
if len(auth) != 2
|
else if auth[0] != "Basic" {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
payload, _ := base64.StdEncoding.DecodeString(auth[1])
pair := strings.SplitN(string(payload), ":", 2)
if len(pair) != 2 || !Validate(pair[0], pair[1]) {
http.Error(w, "authorization failed", http.StatusUnauthorized)
return
}
pass(w, r)
}
}
func (ah AppHandler) SessionAuth(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
cookie := globalsessionkeeper.GetCookie(r)
if cookie == "" {
//need logging here instead of print
fmt.Println("Session Auth Cookie = %v", cookie)
query := mux.Vars(r)
fmt.Printf("Query here.. %v\n", query)
if query["token"] != "" {
fmt.Printf("Error not nil, updating error instacode %v\n", query["token"])
cookie = query["token"]
} else {
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "No Cookie Present"})
return
}
}
sessionStore, err := globalsessionkeeper.GlobalSessions.GetSessionStore(cookie)
if err != nil {
//need logging here instead of print
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
defer sessionStore.SessionRelease(w)
ah.appContext.SessionStore = sessionStore
sessionUser := sessionStore.Get("username")
fmt.Printf("Session Auth SessionUser = %v\n", sessionUser)
if sessionUser == nil {
//need logging here instead of print
fmt.Printf("Username not found, returning unauth, Get has %v\n", sessionStore)
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
fmt.Printf("Session Auth Getting user info for user %v\n", sessionUser)
userInfo := new(db.UserInfo)
userInfo.Username = reflect.ValueOf(sessionUser).String()
err = userInfo.GetUserInfo(MyDb)
if err != nil {
//need logging here instead of print
fmt.Printf("Session Auth Username not found, returning unauth, Get has %v\n", sessionStore)
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
pass(w, r)
}
}
func GetConfig() error {
configFile, err := ioutil.ReadFile("./chomp_private/config.json")
if err != nil {
return err
}
err = json.Unmarshal(configFile, &globalsessionkeeper.ChompConfig)
if err != nil {
fmt.Printf("Err = %v", err)
return err
}
return nil
}
func Validate(username, password string) bool {
fmt.Println("Made it to validate..")
for _, e := range globalsessionkeeper.ChompConfig.Authorized {
if e.User == username && e.Pass == password {
return true
}
}
return false
}
type AppHandler struct {
appContext *globalsessionkeeper.AppContext
h func(*globalsessionkeeper.AppContext, http.ResponseWriter, *http.Request) (error)
}
func HttpErrorResponder(w http.ResponseWriter, errorResponse globalsessionkeeper.ErrorResponse) {
fmt.Printf("Going out as: %v\n", errorResponse)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(errorResponse.Code)
json.NewEncoder(w).Encode(errorResponse)
}
func (ah AppHandler) ServerHttp(w http.ResponseWriter, r *http.Request) {
fmt.Printf("AH Context = %v\n", ah.appContext)
err := ah.h(ah.appContext, w, r)
if err != nil {
// log.Printf("HTTP %d: %q", status, err)
status := err.(globalsessionkeeper.ErrorResponse).Code
switch status {
case http.StatusNotFound:
fmt.Printf("Error: Page not found\n")
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
case http.StatusInternalServerError:
fmt.Printf("Error: %v\n", http.StatusInternalServerError)
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
default:
fmt.Printf("Error: %v\n", err)
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
}
}
}
func main() {
defer MyDb.Close()
router := mux.NewRouter().StrictSlash(true)
context := &globalsessionkeeper.AppContext{DB: MyDb}
fmt.Printf("Context = %v\n", context)
router.HandleFunc("/login", AppHandler{context, login.DoLogin}.ServerHttp)
router.HandleFunc("/verify", AppHandler{context, auth.VerifyHandler}.ServerHttp)
router.HandleFunc("/register", AppHandler{context, register.DoRegister}.ServerHttp)
router.HandleFunc("/admin/fp", BasicAuth(AppHandler{context, register.ForgotPassword}.ServerHttp))
router.HandleFunc("/admin/fu", BasicAuth(AppHandler{context, register.ForgotUsername}.ServerHttp))
router.HandleFunc("/admin/jwt", BasicAuth(AppHandler{context, crypto.GetJwt}.ServerHttp))
router.HandleFunc("/me", AppHandler{appContext: context, h: me.GetMe}.SessionAuth(AppHandler{appContext: context, h: me.GetMe}.ServerHttp))
//this is how you write a query parameter capture uri
router.Queries("token", "{token}", "code", "{code:.*}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.Queries("token", "{token}", "error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.Queries("error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.HandleFunc("/me/logout", AppHandler{appContext: context, h: me.Logout}.SessionAuth(AppHandler{appContext: context, h: me.Logout}.ServerHttp))
router.HandleFunc("/me/logout/all", AppHandler{appContext: context, h: me.LogoutAll}.SessionAuth(AppHandler{appContext: context, h: me.LogoutAll}.ServerHttp))
router.HandleFunc("/me/photos", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp))
router.HandleFunc("/me/photos/{photoID}", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp))
router.HandleFunc("/me/reviews", AppHandler{appContext: context, h: me.Reviews}.SessionAuth(AppHandler{appContext: context, h: me.Reviews}.ServerHttp))
router.HandleFunc("/me/update/up", AppHandler{appContext: context, h: me.UpdatePassword}.SessionAuth(AppHandler{appContext: context, h: me.UpdatePassword}.ServerHttp))
router.HandleFunc("/me/update/email", AppHandler{appContext: context, h: me.UpdateEmail}.SessionAuth(AppHandler{appContext: context, h: me.UpdateEmail}.ServerHttp))
router.HandleFunc("/me/update/d/{userID}", AppHandler{appContext: context, h
|
{
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
|
conditional_block
|
main.go
|
Config()
sessionConfig, _ := json.Marshal(globalsessionkeeper.ChompConfig.ManagerConfig)
dbConfig := globalsessionkeeper.ChompConfig.DbConfig
fmt.Printf("\n\n\nIn init, new manager\n")
fmt.Printf("In init, new manager\n")
fmt.Printf("In init, new manager\n\n\n\n")
globalsessionkeeper.GlobalSessions, err = session.NewManager("mysql", string(sessionConfig))
if err != nil {
fmt.Printf("Coud not start session..Error: %v\n", err.Error())
os.Exit(-1)
}
err = errors.New("")
fmt.Printf("Opening DB connection\n")
// Connection string looks as the following
//MyDb, err = sql.Open("service", "user@tcp(ip:port)/database")
connString := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", dbConfig.User, dbConfig.Pass, dbConfig.Host,dbConfig.Port, dbConfig.Db)
fmt.Printf("ConnString = %s\n", connString)
MyDb, err = sql.Open("mysql", connString)
if err != nil {
// return err
fmt.Printf("Error = %v\n", err)
panic(fmt.Sprintf("%v", err))
}
globalsessionkeeper.GlobalSessions.SetSecure(true)
go globalsessionkeeper.GlobalSessions.GC()
}
func BasicAuth(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Println("made it to basic auth")
fmt.Printf("Headers = %v\n", r.Header)
fmt.Printf("Len = %v\n", len(r.Header))
if len(r.Header["Authorization"]) <= 0 {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
auth := strings.SplitN(r.Header["Authorization"][0], " ", 2)
fmt.Printf("auth = %v", auth)
if len(auth) != 2 {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
} else if auth[0] != "Basic" {
http.Error(w, "bad syntax", http.StatusBadRequest)
return
}
payload, _ := base64.StdEncoding.DecodeString(auth[1])
pair := strings.SplitN(string(payload), ":", 2)
if len(pair) != 2 || !Validate(pair[0], pair[1]) {
http.Error(w, "authorization failed", http.StatusUnauthorized)
return
}
pass(w, r)
}
}
func (ah AppHandler)
|
(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
cookie := globalsessionkeeper.GetCookie(r)
if cookie == "" {
//need logging here instead of print
fmt.Println("Session Auth Cookie = %v", cookie)
query := mux.Vars(r)
fmt.Printf("Query here.. %v\n", query)
if query["token"] != "" {
fmt.Printf("Error not nil, updating error instacode %v\n", query["token"])
cookie = query["token"]
} else {
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "No Cookie Present"})
return
}
}
sessionStore, err := globalsessionkeeper.GlobalSessions.GetSessionStore(cookie)
if err != nil {
//need logging here instead of print
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
defer sessionStore.SessionRelease(w)
ah.appContext.SessionStore = sessionStore
sessionUser := sessionStore.Get("username")
fmt.Printf("Session Auth SessionUser = %v\n", sessionUser)
if sessionUser == nil {
//need logging here instead of print
fmt.Printf("Username not found, returning unauth, Get has %v\n", sessionStore)
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
fmt.Printf("Session Auth Getting user info for user %v\n", sessionUser)
userInfo := new(db.UserInfo)
userInfo.Username = reflect.ValueOf(sessionUser).String()
err = userInfo.GetUserInfo(MyDb)
if err != nil {
//need logging here instead of print
fmt.Printf("Session Auth Username not found, returning unauth, Get has %v\n", sessionStore)
HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"})
return
}
pass(w, r)
}
}
func GetConfig() error {
configFile, err := ioutil.ReadFile("./chomp_private/config.json")
if err != nil {
return err
}
err = json.Unmarshal(configFile, &globalsessionkeeper.ChompConfig)
if err != nil {
fmt.Printf("Err = %v", err)
return err
}
return nil
}
func Validate(username, password string) bool {
fmt.Println("Made it to validate..")
for _, e := range globalsessionkeeper.ChompConfig.Authorized {
if e.User == username && e.Pass == password {
return true
}
}
return false
}
type AppHandler struct {
appContext *globalsessionkeeper.AppContext
h func(*globalsessionkeeper.AppContext, http.ResponseWriter, *http.Request) (error)
}
func HttpErrorResponder(w http.ResponseWriter, errorResponse globalsessionkeeper.ErrorResponse) {
fmt.Printf("Going out as: %v\n", errorResponse)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(errorResponse.Code)
json.NewEncoder(w).Encode(errorResponse)
}
func (ah AppHandler) ServerHttp(w http.ResponseWriter, r *http.Request) {
fmt.Printf("AH Context = %v\n", ah.appContext)
err := ah.h(ah.appContext, w, r)
if err != nil {
// log.Printf("HTTP %d: %q", status, err)
status := err.(globalsessionkeeper.ErrorResponse).Code
switch status {
case http.StatusNotFound:
fmt.Printf("Error: Page not found\n")
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
case http.StatusInternalServerError:
fmt.Printf("Error: %v\n", http.StatusInternalServerError)
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
default:
fmt.Printf("Error: %v\n", err)
HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse))
}
}
}
func main() {
defer MyDb.Close()
router := mux.NewRouter().StrictSlash(true)
context := &globalsessionkeeper.AppContext{DB: MyDb}
fmt.Printf("Context = %v\n", context)
router.HandleFunc("/login", AppHandler{context, login.DoLogin}.ServerHttp)
router.HandleFunc("/verify", AppHandler{context, auth.VerifyHandler}.ServerHttp)
router.HandleFunc("/register", AppHandler{context, register.DoRegister}.ServerHttp)
router.HandleFunc("/admin/fp", BasicAuth(AppHandler{context, register.ForgotPassword}.ServerHttp))
router.HandleFunc("/admin/fu", BasicAuth(AppHandler{context, register.ForgotUsername}.ServerHttp))
router.HandleFunc("/admin/jwt", BasicAuth(AppHandler{context, crypto.GetJwt}.ServerHttp))
router.HandleFunc("/me", AppHandler{appContext: context, h: me.GetMe}.SessionAuth(AppHandler{appContext: context, h: me.GetMe}.ServerHttp))
//this is how you write a query parameter capture uri
router.Queries("token", "{token}", "code", "{code:.*}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.Queries("token", "{token}", "error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.Queries("error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp))
router.HandleFunc("/me/logout", AppHandler{appContext: context, h: me.Logout}.SessionAuth(AppHandler{appContext: context, h: me.Logout}.ServerHttp))
router.HandleFunc("/me/logout/all", AppHandler{appContext: context, h: me.LogoutAll}.SessionAuth(AppHandler{appContext: context, h: me.LogoutAll}.ServerHttp))
router.HandleFunc("/me/photos", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp))
router.HandleFunc("/me/photos/{photoID}", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp))
router.HandleFunc("/me/reviews", AppHandler{appContext: context, h: me.Reviews}.SessionAuth(AppHandler{appContext: context, h: me.Reviews}.ServerHttp))
router.HandleFunc("/me/update/up", AppHandler{appContext: context, h: me.UpdatePassword}.SessionAuth(AppHandler{appContext: context, h: me.UpdatePassword}.ServerHttp))
router.HandleFunc("/me/update/email", AppHandler{appContext: context, h: me.UpdateEmail}.SessionAuth(AppHandler{appContext: context, h: me.UpdateEmail}.ServerHttp))
router.HandleFunc("/me/update/d/{userID}", AppHandler{appContext: context, h
|
SessionAuth
|
identifier_name
|
predicates.go
|
}
return ok, reasons, err
}
func getReasonsString(reasons []core.PredicateFailureReason) string {
if len(reasons) == 0 {
return ""
}
ss := make([]string, 0, len(reasons))
for _, reason := range reasons {
ss = append(ss, reason.GetReason())
}
return strings.Join(ss, ", ")
}
func NewPredicateHelper(pre core.FitPredicate, unit *core.Unit, candi core.Candidater) *PredicateHelper {
h := &PredicateHelper{
predicate: pre,
capacity: core.EmptyCapacity,
predicateFails: []core.PredicateFailureReason{},
Unit: unit,
Candidate: candi,
}
return h
}
func (h *PredicateHelper) GetFailedResult(err error) (bool, []core.PredicateFailureReason, error) {
return false, nil, err
}
func (h *PredicateHelper) AppendPredicateFail(reason core.PredicateFailureReason) {
h.predicateFails = append(h.predicateFails, reason)
}
type predicateFailure struct {
err core.PredicateFailureError
eType string
}
func (f predicateFailure) GetReason() string {
return f.err.GetReason()
}
func (f predicateFailure) GetType() string {
return f.eType
}
func (h *PredicateHelper) AppendPredicateFailMsg(reason string) {
h.AppendPredicateFailMsgWithType(reason, h.predicate.Name())
}
func (h *PredicateHelper) AppendPredicateFailMsgWithType(reason string, eType string) {
err := NewUnexceptedResourceError(reason)
h.AppendPredicateFail(&predicateFailure{err: err, eType: eType})
}
func (h *PredicateHelper) AppendInsufficientResourceError(req, total, free int64) {
h.AppendPredicateFail(
&predicateFailure{
err: NewInsufficientResourceError(h.Candidate.Getter().Name(), req, total, free),
eType: h.predicate.Name(),
})
}
// SetCapacity returns the current resource capacity calculated by a filter.
// And 'capacity' default is -1.
func (h *PredicateHelper) SetCapacity(capacity int64) {
if capacity < 0 {
capacity = 0
}
h.SetCapacityCounter(core.NewNormalCounter(capacity))
}
func (h *PredicateHelper) SetCapacityCounter(counter core.Counter) {
capacity := counter.GetCount()
if capacity < core.EmptyCapacity {
capacity = core.EmptyCapacity
}
h.capacity = capacity
h.Unit.SetCapacity(h.Candidate.IndexKey(), h.predicate.Name(), counter)
}
func (h *PredicateHelper) SetSelectPriority(sp int) {
if sp < 0 {
sp = 0
}
h.Unit.SetSelectPriorityWithLock(h.Candidate.IndexKey(), h.predicate.Name(), core.SSelectPriorityValue(sp))
}
func (h *PredicateHelper) Exclude(reason string) {
h.SetCapacity(0)
h.AppendPredicateFailMsg(reason)
}
func (h *PredicateHelper) ExcludeByErrors(errs []core.PredicateFailureReason) {
h.SetCapacity(0)
for _, err := range errs {
h.AppendPredicateFail(err)
}
}
func (h *PredicateHelper) Exclude2(predicateName string, current, expected interface{}) {
h.Exclude(fmt.Sprintf("%s is '%v', expected '%v'", predicateName, current, expected))
}
// UseReserved check whether the unit can use guest reserved resource
func (h *PredicateHelper) UseReserved() bool {
usable := false
data := h.Unit.SchedData()
isoDevs := data.IsolatedDevices
if len(isoDevs) > 0 {
usable = true
}
return usable
}
type PredicatedSchedtagResource struct {
ISchedtagCandidateResource
PreferTags []computeapi.SchedtagConfig
AvoidTags []computeapi.SchedtagConfig
}
type SchedtagInputResourcesMap map[int][]*PredicatedSchedtagResource
func (m SchedtagInputResourcesMap) getAllTags(isPrefer bool) []computeapi.SchedtagConfig {
ret := make([]computeapi.SchedtagConfig, 0)
for _, ss := range m {
for _, s := range ss {
var tags []computeapi.SchedtagConfig
if isPrefer {
tags = s.PreferTags
} else {
tags = s.AvoidTags
}
ret = append(ret, tags...)
}
}
return ret
}
func (m SchedtagInputResourcesMap) GetPreferTags() []computeapi.SchedtagConfig
|
func (m SchedtagInputResourcesMap) GetAvoidTags() []computeapi.SchedtagConfig {
return m.getAllTags(false)
}
type CandidateInputResourcesMap struct {
*sync.Map // map[string]SchedtagInputResourcesMap
}
type ISchedtagCandidateResource interface {
GetName() string
GetId() string
Keyword() string
GetSchedtags() []models.SSchedtag
GetSchedtagJointManager() models.ISchedtagJointManager
GetDynamicConditionInput() *jsonutils.JSONDict
}
type ISchedtagPredicateInstance interface {
core.FitPredicate
OnPriorityEnd(u *core.Unit, c core.Candidater)
OnSelectEnd(u *core.Unit, c core.Candidater, count int64)
GetInputs(u *core.Unit) []ISchedtagCustomer
GetResources(c core.Candidater) []ISchedtagCandidateResource
IsResourceMatchInput(input ISchedtagCustomer, res ISchedtagCandidateResource) bool
IsResourceFitInput(unit *core.Unit, c core.Candidater, res ISchedtagCandidateResource, input ISchedtagCustomer) core.PredicateFailureReason
DoSelect(c core.Candidater, input ISchedtagCustomer, res []ISchedtagCandidateResource) []ISchedtagCandidateResource
AddSelectResult(index int, input ISchedtagCustomer, selectRes []ISchedtagCandidateResource, output *core.AllocatedResource)
GetCandidateResourceSortScore(candidate ISchedtagCandidateResource) int64
}
type BaseSchedtagPredicate struct {
BasePredicate
plugin.BasePlugin
CandidateInputResources *CandidateInputResourcesMap
Hypervisor string
}
func NewBaseSchedtagPredicate() *BaseSchedtagPredicate {
return &BaseSchedtagPredicate{
CandidateInputResources: &CandidateInputResourcesMap{Map: new(sync.Map)}, // make(map[string]SchedtagInputResourcesMap),
}
}
func (p *PredicatedSchedtagResource) isNoTag() bool {
return len(p.PreferTags) == 0 && len(p.AvoidTags) == 0
}
func (p *PredicatedSchedtagResource) hasPreferTags() bool {
return len(p.PreferTags) != 0
}
func (p *PredicatedSchedtagResource) hasAvoidTags() bool {
return len(p.AvoidTags) != 0
}
type ISchedtagCustomer interface {
JSON(interface{}) *jsonutils.JSONDict
Keyword() string
IsSpecifyResource() bool
GetSchedtags() []*computeapi.SchedtagConfig
ResourceKeyword() string
}
type SchedtagResourceW struct {
candidater ISchedtagCandidateResource
input ISchedtagCustomer
}
func (w SchedtagResourceW) IndexKey() string {
return fmt.Sprintf("%s:%s", w.candidater.GetName(), w.candidater.GetId())
}
func (w SchedtagResourceW) ResourceType() string {
return getSchedtagResourceType(w.candidater)
}
func getSchedtagResourceType(candidater ISchedtagCandidateResource) string {
return candidater.GetSchedtagJointManager().GetMasterManager().KeywordPlural()
}
func (w SchedtagResourceW) GetSchedtags() []models.SSchedtag {
return w.candidater.GetSchedtags()
}
func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict {
ret := jsonutils.NewDict()
resSchedDesc := w.candidater.GetDynamicConditionInput()
inputSchedDesc := w.input.JSON(w.input)
ret.Add(resSchedDesc, w.candidater.Keyword())
ret.Add(inputSchedDesc, w.input.Keyword())
return ret
}
func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver {
return models.GetDriver(p.Hypervisor)
}
func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) {
allTags, err := GetAllSchedtags(getSchedtagResourceType(candidate))
if err != nil {
return nil, err
}
tagPredicate := NewSchedtagPredicate(input.GetSchedtags(), allTags)
shouldExec := u.ShouldExecuteSchedtagFilter(c.Getter().Id())
res := &PredicatedSchedtagResource{
ISchedtagCandidateResource: candidate,
}
if shouldExec && !input.IsSpecifyResource() {
if err := tagPredicate.Check(
SchedtagResourceW{
candidater: candidate,
input: input,
},
); err != nil {
return nil, err
}
res.PreferTags = tagPredicate.GetPreferTags()
res.AvoidTags = tagPredicate.GetAvoidTags()
}
return res, nil
}
func (p *BaseSchedtagPredicate) checkResources(input ISchedtagCustomer, ress []
|
{
return m.getAllTags(true)
}
|
identifier_body
|
predicates.go
|
() []computeapi.SchedtagConfig {
return m.getAllTags(false)
}
type CandidateInputResourcesMap struct {
*sync.Map // map[string]SchedtagInputResourcesMap
}
type ISchedtagCandidateResource interface {
GetName() string
GetId() string
Keyword() string
GetSchedtags() []models.SSchedtag
GetSchedtagJointManager() models.ISchedtagJointManager
GetDynamicConditionInput() *jsonutils.JSONDict
}
type ISchedtagPredicateInstance interface {
core.FitPredicate
OnPriorityEnd(u *core.Unit, c core.Candidater)
OnSelectEnd(u *core.Unit, c core.Candidater, count int64)
GetInputs(u *core.Unit) []ISchedtagCustomer
GetResources(c core.Candidater) []ISchedtagCandidateResource
IsResourceMatchInput(input ISchedtagCustomer, res ISchedtagCandidateResource) bool
IsResourceFitInput(unit *core.Unit, c core.Candidater, res ISchedtagCandidateResource, input ISchedtagCustomer) core.PredicateFailureReason
DoSelect(c core.Candidater, input ISchedtagCustomer, res []ISchedtagCandidateResource) []ISchedtagCandidateResource
AddSelectResult(index int, input ISchedtagCustomer, selectRes []ISchedtagCandidateResource, output *core.AllocatedResource)
GetCandidateResourceSortScore(candidate ISchedtagCandidateResource) int64
}
type BaseSchedtagPredicate struct {
BasePredicate
plugin.BasePlugin
CandidateInputResources *CandidateInputResourcesMap
Hypervisor string
}
func NewBaseSchedtagPredicate() *BaseSchedtagPredicate {
return &BaseSchedtagPredicate{
CandidateInputResources: &CandidateInputResourcesMap{Map: new(sync.Map)}, // make(map[string]SchedtagInputResourcesMap),
}
}
func (p *PredicatedSchedtagResource) isNoTag() bool {
return len(p.PreferTags) == 0 && len(p.AvoidTags) == 0
}
func (p *PredicatedSchedtagResource) hasPreferTags() bool {
return len(p.PreferTags) != 0
}
func (p *PredicatedSchedtagResource) hasAvoidTags() bool {
return len(p.AvoidTags) != 0
}
type ISchedtagCustomer interface {
JSON(interface{}) *jsonutils.JSONDict
Keyword() string
IsSpecifyResource() bool
GetSchedtags() []*computeapi.SchedtagConfig
ResourceKeyword() string
}
type SchedtagResourceW struct {
candidater ISchedtagCandidateResource
input ISchedtagCustomer
}
func (w SchedtagResourceW) IndexKey() string {
return fmt.Sprintf("%s:%s", w.candidater.GetName(), w.candidater.GetId())
}
func (w SchedtagResourceW) ResourceType() string {
return getSchedtagResourceType(w.candidater)
}
func getSchedtagResourceType(candidater ISchedtagCandidateResource) string {
return candidater.GetSchedtagJointManager().GetMasterManager().KeywordPlural()
}
func (w SchedtagResourceW) GetSchedtags() []models.SSchedtag {
return w.candidater.GetSchedtags()
}
func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict {
ret := jsonutils.NewDict()
resSchedDesc := w.candidater.GetDynamicConditionInput()
inputSchedDesc := w.input.JSON(w.input)
ret.Add(resSchedDesc, w.candidater.Keyword())
ret.Add(inputSchedDesc, w.input.Keyword())
return ret
}
func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver {
return models.GetDriver(p.Hypervisor)
}
func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) {
allTags, err := GetAllSchedtags(getSchedtagResourceType(candidate))
if err != nil {
return nil, err
}
tagPredicate := NewSchedtagPredicate(input.GetSchedtags(), allTags)
shouldExec := u.ShouldExecuteSchedtagFilter(c.Getter().Id())
res := &PredicatedSchedtagResource{
ISchedtagCandidateResource: candidate,
}
if shouldExec && !input.IsSpecifyResource() {
if err := tagPredicate.Check(
SchedtagResourceW{
candidater: candidate,
input: input,
},
); err != nil {
return nil, err
}
res.PreferTags = tagPredicate.GetPreferTags()
res.AvoidTags = tagPredicate.GetAvoidTags()
}
return res, nil
}
func (p *BaseSchedtagPredicate) checkResources(input ISchedtagCustomer, ress []ISchedtagCandidateResource, u *core.Unit, c core.Candidater) ([]*PredicatedSchedtagResource, error) {
errs := make([]error, 0)
ret := make([]*PredicatedSchedtagResource, 0)
for _, res := range ress {
ps, err := p.check(input, res, u, c)
if err != nil {
// append err, resource not suit input customer
errs = append(errs, err)
continue
}
ret = append(ret, ps)
}
if len(ret) == 0 {
return nil, errors.NewAggregate(errs)
}
return ret, nil
}
func (p *BaseSchedtagPredicate) GetInputResourcesMap(candidateId string) SchedtagInputResourcesMap {
ret, ok := p.CandidateInputResources.Load(candidateId)
if !ok {
ret = make(map[int][]*PredicatedSchedtagResource)
p.CandidateInputResources.Store(candidateId, ret)
}
return ret.(map[int][]*PredicatedSchedtagResource)
}
func (p *BaseSchedtagPredicate) PreExecute(sp ISchedtagPredicateInstance, u *core.Unit, cs []core.Candidater) (bool, error) {
input := sp.GetInputs(u)
if len(input) == 0 {
return false, nil
}
p.Hypervisor = u.GetHypervisor()
// always do select step
u.AppendSelectPlugin(sp)
return true, nil
}
func (p *BaseSchedtagPredicate) Execute(
sp ISchedtagPredicateInstance,
u *core.Unit,
c core.Candidater,
) (bool, []core.PredicateFailureReason, error) {
inputs := sp.GetInputs(u)
resources := sp.GetResources(c)
h := NewPredicateHelper(sp, u, c)
inputRes := p.GetInputResourcesMap(c.IndexKey())
filterErrs := make([]core.PredicateFailureReason, 0)
for idx, input := range inputs {
fitResources := make([]ISchedtagCandidateResource, 0)
errs := make([]core.PredicateFailureReason, 0)
matchedRes := make([]ISchedtagCandidateResource, 0)
for _, r := range resources {
if sp.IsResourceMatchInput(input, r) {
matchedRes = append(matchedRes, r)
}
}
if len(matchedRes) == 0 {
errs = append(errs, &FailReason{
Reason: fmt.Sprintf("Not found matched %s, candidate: %s, %s: %s", input.ResourceKeyword(), c.Getter().Name(), input.Keyword(), input.JSON(input).String()),
Type: fmt.Sprintf("%s_match", input.ResourceKeyword()),
})
}
for _, res := range matchedRes {
if err := sp.IsResourceFitInput(u, c, res, input); err == nil {
fitResources = append(fitResources, res)
} else {
errs = append(errs, err)
}
}
if len(fitResources) == 0 {
h.ExcludeByErrors(errs)
break
}
if len(errs) > 0 {
filterErrs = append(filterErrs, errs...)
}
matchedResources, err := p.checkResources(input, fitResources, u, c)
if err != nil {
if len(filterErrs) > 0 {
h.ExcludeByErrors(filterErrs)
}
errMsg := fmt.Sprintf("schedtag: %v", err.Error())
h.Exclude(errMsg)
}
inputRes[idx] = matchedResources
}
return h.GetResult()
}
func (p *BaseSchedtagPredicate) OnPriorityEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater) {
resTags := []models.SSchedtag{}
for _, res := range sp.GetResources(c) {
resTags = append(resTags, res.GetSchedtags()...)
}
inputRes := p.GetInputResourcesMap(c.IndexKey())
avoidTags := inputRes.GetAvoidTags()
preferTags := inputRes.GetPreferTags()
avoidCountMap := GetSchedtagCount(avoidTags, resTags, api.AggregateStrategyAvoid)
preferCountMap := GetSchedtagCount(preferTags, resTags, api.AggregateStrategyPrefer)
setScore := SetCandidateScoreBySchedtag
setScore(u, c, preferCountMap, true)
setScore(u, c, avoidCountMap, false)
}
func (p *BaseSchedtagPredicate)
|
OnSelectEnd
|
identifier_name
|
|
predicates.go
|
([]computeapi.SchedtagConfig, 0)
for _, ss := range m {
for _, s := range ss {
var tags []computeapi.SchedtagConfig
if isPrefer {
tags = s.PreferTags
} else {
tags = s.AvoidTags
}
ret = append(ret, tags...)
}
}
return ret
}
func (m SchedtagInputResourcesMap) GetPreferTags() []computeapi.SchedtagConfig {
return m.getAllTags(true)
}
func (m SchedtagInputResourcesMap) GetAvoidTags() []computeapi.SchedtagConfig {
return m.getAllTags(false)
}
type CandidateInputResourcesMap struct {
*sync.Map // map[string]SchedtagInputResourcesMap
}
type ISchedtagCandidateResource interface {
GetName() string
GetId() string
Keyword() string
GetSchedtags() []models.SSchedtag
GetSchedtagJointManager() models.ISchedtagJointManager
GetDynamicConditionInput() *jsonutils.JSONDict
}
type ISchedtagPredicateInstance interface {
core.FitPredicate
OnPriorityEnd(u *core.Unit, c core.Candidater)
OnSelectEnd(u *core.Unit, c core.Candidater, count int64)
GetInputs(u *core.Unit) []ISchedtagCustomer
GetResources(c core.Candidater) []ISchedtagCandidateResource
IsResourceMatchInput(input ISchedtagCustomer, res ISchedtagCandidateResource) bool
IsResourceFitInput(unit *core.Unit, c core.Candidater, res ISchedtagCandidateResource, input ISchedtagCustomer) core.PredicateFailureReason
DoSelect(c core.Candidater, input ISchedtagCustomer, res []ISchedtagCandidateResource) []ISchedtagCandidateResource
AddSelectResult(index int, input ISchedtagCustomer, selectRes []ISchedtagCandidateResource, output *core.AllocatedResource)
GetCandidateResourceSortScore(candidate ISchedtagCandidateResource) int64
}
type BaseSchedtagPredicate struct {
BasePredicate
plugin.BasePlugin
CandidateInputResources *CandidateInputResourcesMap
Hypervisor string
}
func NewBaseSchedtagPredicate() *BaseSchedtagPredicate {
return &BaseSchedtagPredicate{
CandidateInputResources: &CandidateInputResourcesMap{Map: new(sync.Map)}, // make(map[string]SchedtagInputResourcesMap),
}
}
func (p *PredicatedSchedtagResource) isNoTag() bool {
return len(p.PreferTags) == 0 && len(p.AvoidTags) == 0
}
func (p *PredicatedSchedtagResource) hasPreferTags() bool {
return len(p.PreferTags) != 0
}
func (p *PredicatedSchedtagResource) hasAvoidTags() bool {
return len(p.AvoidTags) != 0
}
type ISchedtagCustomer interface {
JSON(interface{}) *jsonutils.JSONDict
Keyword() string
IsSpecifyResource() bool
GetSchedtags() []*computeapi.SchedtagConfig
ResourceKeyword() string
}
type SchedtagResourceW struct {
candidater ISchedtagCandidateResource
input ISchedtagCustomer
}
func (w SchedtagResourceW) IndexKey() string {
return fmt.Sprintf("%s:%s", w.candidater.GetName(), w.candidater.GetId())
}
func (w SchedtagResourceW) ResourceType() string {
return getSchedtagResourceType(w.candidater)
}
func getSchedtagResourceType(candidater ISchedtagCandidateResource) string {
return candidater.GetSchedtagJointManager().GetMasterManager().KeywordPlural()
}
func (w SchedtagResourceW) GetSchedtags() []models.SSchedtag {
return w.candidater.GetSchedtags()
}
func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict {
ret := jsonutils.NewDict()
resSchedDesc := w.candidater.GetDynamicConditionInput()
inputSchedDesc := w.input.JSON(w.input)
ret.Add(resSchedDesc, w.candidater.Keyword())
ret.Add(inputSchedDesc, w.input.Keyword())
return ret
}
func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver {
return models.GetDriver(p.Hypervisor)
}
func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) {
allTags, err := GetAllSchedtags(getSchedtagResourceType(candidate))
if err != nil {
return nil, err
}
tagPredicate := NewSchedtagPredicate(input.GetSchedtags(), allTags)
shouldExec := u.ShouldExecuteSchedtagFilter(c.Getter().Id())
res := &PredicatedSchedtagResource{
ISchedtagCandidateResource: candidate,
}
if shouldExec && !input.IsSpecifyResource() {
if err := tagPredicate.Check(
SchedtagResourceW{
candidater: candidate,
input: input,
},
); err != nil {
return nil, err
}
res.PreferTags = tagPredicate.GetPreferTags()
res.AvoidTags = tagPredicate.GetAvoidTags()
}
return res, nil
}
func (p *BaseSchedtagPredicate) checkResources(input ISchedtagCustomer, ress []ISchedtagCandidateResource, u *core.Unit, c core.Candidater) ([]*PredicatedSchedtagResource, error) {
errs := make([]error, 0)
ret := make([]*PredicatedSchedtagResource, 0)
for _, res := range ress {
ps, err := p.check(input, res, u, c)
if err != nil {
// append err, resource not suit input customer
errs = append(errs, err)
continue
}
ret = append(ret, ps)
}
if len(ret) == 0 {
return nil, errors.NewAggregate(errs)
}
return ret, nil
}
func (p *BaseSchedtagPredicate) GetInputResourcesMap(candidateId string) SchedtagInputResourcesMap {
ret, ok := p.CandidateInputResources.Load(candidateId)
if !ok {
ret = make(map[int][]*PredicatedSchedtagResource)
p.CandidateInputResources.Store(candidateId, ret)
}
return ret.(map[int][]*PredicatedSchedtagResource)
}
func (p *BaseSchedtagPredicate) PreExecute(sp ISchedtagPredicateInstance, u *core.Unit, cs []core.Candidater) (bool, error) {
input := sp.GetInputs(u)
if len(input) == 0 {
return false, nil
}
p.Hypervisor = u.GetHypervisor()
// always do select step
u.AppendSelectPlugin(sp)
return true, nil
}
func (p *BaseSchedtagPredicate) Execute(
sp ISchedtagPredicateInstance,
u *core.Unit,
c core.Candidater,
) (bool, []core.PredicateFailureReason, error) {
inputs := sp.GetInputs(u)
resources := sp.GetResources(c)
h := NewPredicateHelper(sp, u, c)
inputRes := p.GetInputResourcesMap(c.IndexKey())
filterErrs := make([]core.PredicateFailureReason, 0)
for idx, input := range inputs {
fitResources := make([]ISchedtagCandidateResource, 0)
errs := make([]core.PredicateFailureReason, 0)
matchedRes := make([]ISchedtagCandidateResource, 0)
for _, r := range resources {
if sp.IsResourceMatchInput(input, r) {
matchedRes = append(matchedRes, r)
}
}
if len(matchedRes) == 0 {
errs = append(errs, &FailReason{
Reason: fmt.Sprintf("Not found matched %s, candidate: %s, %s: %s", input.ResourceKeyword(), c.Getter().Name(), input.Keyword(), input.JSON(input).String()),
Type: fmt.Sprintf("%s_match", input.ResourceKeyword()),
})
}
for _, res := range matchedRes {
if err := sp.IsResourceFitInput(u, c, res, input); err == nil {
fitResources = append(fitResources, res)
} else {
errs = append(errs, err)
}
}
if len(fitResources) == 0 {
h.ExcludeByErrors(errs)
break
}
if len(errs) > 0 {
filterErrs = append(filterErrs, errs...)
}
matchedResources, err := p.checkResources(input, fitResources, u, c)
if err != nil {
if len(filterErrs) > 0 {
h.ExcludeByErrors(filterErrs)
}
errMsg := fmt.Sprintf("schedtag: %v", err.Error())
h.Exclude(errMsg)
}
inputRes[idx] = matchedResources
}
return h.GetResult()
}
func (p *BaseSchedtagPredicate) OnPriorityEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater) {
resTags := []models.SSchedtag{}
for _, res := range sp.GetResources(c)
|
{
resTags = append(resTags, res.GetSchedtags()...)
}
|
conditional_block
|
|
predicates.go
|
}
}
return ret
}
func (m SchedtagInputResourcesMap) GetPreferTags() []computeapi.SchedtagConfig {
return m.getAllTags(true)
}
func (m SchedtagInputResourcesMap) GetAvoidTags() []computeapi.SchedtagConfig {
return m.getAllTags(false)
}
type CandidateInputResourcesMap struct {
*sync.Map // map[string]SchedtagInputResourcesMap
}
type ISchedtagCandidateResource interface {
GetName() string
GetId() string
Keyword() string
GetSchedtags() []models.SSchedtag
GetSchedtagJointManager() models.ISchedtagJointManager
GetDynamicConditionInput() *jsonutils.JSONDict
}
type ISchedtagPredicateInstance interface {
core.FitPredicate
OnPriorityEnd(u *core.Unit, c core.Candidater)
OnSelectEnd(u *core.Unit, c core.Candidater, count int64)
GetInputs(u *core.Unit) []ISchedtagCustomer
GetResources(c core.Candidater) []ISchedtagCandidateResource
IsResourceMatchInput(input ISchedtagCustomer, res ISchedtagCandidateResource) bool
IsResourceFitInput(unit *core.Unit, c core.Candidater, res ISchedtagCandidateResource, input ISchedtagCustomer) core.PredicateFailureReason
DoSelect(c core.Candidater, input ISchedtagCustomer, res []ISchedtagCandidateResource) []ISchedtagCandidateResource
AddSelectResult(index int, input ISchedtagCustomer, selectRes []ISchedtagCandidateResource, output *core.AllocatedResource)
GetCandidateResourceSortScore(candidate ISchedtagCandidateResource) int64
}
type BaseSchedtagPredicate struct {
BasePredicate
plugin.BasePlugin
CandidateInputResources *CandidateInputResourcesMap
Hypervisor string
}
func NewBaseSchedtagPredicate() *BaseSchedtagPredicate {
return &BaseSchedtagPredicate{
CandidateInputResources: &CandidateInputResourcesMap{Map: new(sync.Map)}, // make(map[string]SchedtagInputResourcesMap),
}
}
func (p *PredicatedSchedtagResource) isNoTag() bool {
return len(p.PreferTags) == 0 && len(p.AvoidTags) == 0
}
func (p *PredicatedSchedtagResource) hasPreferTags() bool {
return len(p.PreferTags) != 0
}
func (p *PredicatedSchedtagResource) hasAvoidTags() bool {
return len(p.AvoidTags) != 0
}
type ISchedtagCustomer interface {
JSON(interface{}) *jsonutils.JSONDict
Keyword() string
IsSpecifyResource() bool
GetSchedtags() []*computeapi.SchedtagConfig
ResourceKeyword() string
}
type SchedtagResourceW struct {
candidater ISchedtagCandidateResource
input ISchedtagCustomer
}
func (w SchedtagResourceW) IndexKey() string {
return fmt.Sprintf("%s:%s", w.candidater.GetName(), w.candidater.GetId())
}
func (w SchedtagResourceW) ResourceType() string {
return getSchedtagResourceType(w.candidater)
}
func getSchedtagResourceType(candidater ISchedtagCandidateResource) string {
return candidater.GetSchedtagJointManager().GetMasterManager().KeywordPlural()
}
func (w SchedtagResourceW) GetSchedtags() []models.SSchedtag {
return w.candidater.GetSchedtags()
}
func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict {
ret := jsonutils.NewDict()
resSchedDesc := w.candidater.GetDynamicConditionInput()
inputSchedDesc := w.input.JSON(w.input)
ret.Add(resSchedDesc, w.candidater.Keyword())
ret.Add(inputSchedDesc, w.input.Keyword())
return ret
}
func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver {
return models.GetDriver(p.Hypervisor)
}
func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) {
allTags, err := GetAllSchedtags(getSchedtagResourceType(candidate))
if err != nil {
return nil, err
}
tagPredicate := NewSchedtagPredicate(input.GetSchedtags(), allTags)
shouldExec := u.ShouldExecuteSchedtagFilter(c.Getter().Id())
res := &PredicatedSchedtagResource{
ISchedtagCandidateResource: candidate,
}
if shouldExec && !input.IsSpecifyResource() {
if err := tagPredicate.Check(
SchedtagResourceW{
candidater: candidate,
input: input,
},
); err != nil {
return nil, err
}
res.PreferTags = tagPredicate.GetPreferTags()
res.AvoidTags = tagPredicate.GetAvoidTags()
}
return res, nil
}
func (p *BaseSchedtagPredicate) checkResources(input ISchedtagCustomer, ress []ISchedtagCandidateResource, u *core.Unit, c core.Candidater) ([]*PredicatedSchedtagResource, error) {
errs := make([]error, 0)
ret := make([]*PredicatedSchedtagResource, 0)
for _, res := range ress {
ps, err := p.check(input, res, u, c)
if err != nil {
// append err, resource not suit input customer
errs = append(errs, err)
continue
}
ret = append(ret, ps)
}
if len(ret) == 0 {
return nil, errors.NewAggregate(errs)
}
return ret, nil
}
func (p *BaseSchedtagPredicate) GetInputResourcesMap(candidateId string) SchedtagInputResourcesMap {
ret, ok := p.CandidateInputResources.Load(candidateId)
if !ok {
ret = make(map[int][]*PredicatedSchedtagResource)
p.CandidateInputResources.Store(candidateId, ret)
}
return ret.(map[int][]*PredicatedSchedtagResource)
}
func (p *BaseSchedtagPredicate) PreExecute(sp ISchedtagPredicateInstance, u *core.Unit, cs []core.Candidater) (bool, error) {
input := sp.GetInputs(u)
if len(input) == 0 {
return false, nil
}
p.Hypervisor = u.GetHypervisor()
// always do select step
u.AppendSelectPlugin(sp)
return true, nil
}
func (p *BaseSchedtagPredicate) Execute(
sp ISchedtagPredicateInstance,
u *core.Unit,
c core.Candidater,
) (bool, []core.PredicateFailureReason, error) {
inputs := sp.GetInputs(u)
resources := sp.GetResources(c)
h := NewPredicateHelper(sp, u, c)
inputRes := p.GetInputResourcesMap(c.IndexKey())
filterErrs := make([]core.PredicateFailureReason, 0)
for idx, input := range inputs {
fitResources := make([]ISchedtagCandidateResource, 0)
errs := make([]core.PredicateFailureReason, 0)
matchedRes := make([]ISchedtagCandidateResource, 0)
for _, r := range resources {
if sp.IsResourceMatchInput(input, r) {
matchedRes = append(matchedRes, r)
}
}
if len(matchedRes) == 0 {
errs = append(errs, &FailReason{
Reason: fmt.Sprintf("Not found matched %s, candidate: %s, %s: %s", input.ResourceKeyword(), c.Getter().Name(), input.Keyword(), input.JSON(input).String()),
Type: fmt.Sprintf("%s_match", input.ResourceKeyword()),
})
}
for _, res := range matchedRes {
if err := sp.IsResourceFitInput(u, c, res, input); err == nil {
fitResources = append(fitResources, res)
} else {
errs = append(errs, err)
}
}
if len(fitResources) == 0 {
h.ExcludeByErrors(errs)
break
}
if len(errs) > 0 {
filterErrs = append(filterErrs, errs...)
}
matchedResources, err := p.checkResources(input, fitResources, u, c)
if err != nil {
if len(filterErrs) > 0 {
h.ExcludeByErrors(filterErrs)
}
errMsg := fmt.Sprintf("schedtag: %v", err.Error())
h.Exclude(errMsg)
}
inputRes[idx] = matchedResources
}
return h.GetResult()
}
func (p *BaseSchedtagPredicate) OnPriorityEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater) {
resTags := []models.SSchedtag{}
for _, res := range sp.GetResources(c) {
resTags = append(resTags, res.GetSchedtags()...)
}
inputRes := p.GetInputResourcesMap(c.IndexKey())
avoidTags := inputRes.GetAvoidTags()
|
preferTags := inputRes.GetPreferTags()
avoidCountMap := GetSchedtagCount(avoidTags, resTags, api.AggregateStrategyAvoid)
preferCountMap := GetSchedtagCount(preferTags, resTags, api.AggregateStrategyPrefer)
|
random_line_split
|
|
adminEventTitle.js
|
.png'
import { subscribe } from '../../../redux/middlweare/crud'
import AllEvents from '../../events/allEvents/allEvents'
import FooterEventsGallery from '../../footer/footerEventsGallery';
import UploadImageFromConfigurator from '../../Configurator/uploadImageFromConfigurator';
import uploadIcon from '../../../assets/upload.png';
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import Subscribe from '../../subscribe/subscribe'
function mapStateToProps(state) {
// red #86F3FF
document.documentElement.style.setProperty('--Button-color', state.pageSettings.page.eventsButtonColor);
document.documentElement.style.setProperty('--align-text', state.editHeader.header.eventsPageAlignment);
// state.settings.settings.eventsButtonColor
// document.documentElement.style.setProperty('--Page-color',state.settings.eventsPageColor);
|
site: state.site,
pagesettings: state.pageSettings.page,
headersettings: state.editHeader.header,
subscribesettings: state.editSubscription.subscribe,
// (לחלק לכמה רדיוסרים)
// text-align נתונים מהשרת................................
}
}
const mapDispatchToProps = (dispatch) => ({
changeTitleText: (e) => { dispatch(actionsStore.setTitleText(e)); },
changeBodyText: (e) => { dispatch(actionsStore.setBodyText(e)) },
changeCurrentComponent: (e) => { dispatch(actionsStore.setCurrentComponent(e)) },
setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })),
changeImage: (url) => { dispatch(actionsStore.setImage(url)) },
setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })),
changeLogo: (url) => dispatch(actionsStore.setLogo(url))
// addAllEvents: (events) => dispatch(actionsStore.addAllEvents(events)),
})
export default connect(mapStateToProps, mapDispatchToProps)(function AdminEventTitle(props) {
const { pagesettings, headersettings, subscribesettings, changeTitleText, changeBodyText, changeCurrentComponent } = props;
const img =
{
'#ad60ff': purple1,
'#4f40d0': purple2,
'#ff53f7': pink,
'#ff62b2': pink2,
'#fa5252': red,
'#ff803f': orange,
'#faee3a': yellow,
'#424149': black,
'#9f9cb5': gray,
'#63f597': turquoise,
'#54b9ff': lightBlue,
'#51e7fb': lightBlue2
}
// const display = true;//ימלא נתונים בפרופס מהרידאקס אם מעונין שיציג כותרת
// const [settings, setSettings] = useState({ eventsPageTitle: 'welcome to leader event', picteventsPageImageure: '', eventsPageDescription: 'Don’t Act So Surprised, Your Highness. You Weren’t On Any Mercy Mission This Time. Seve…', amountEventsInRow: '3' });//ימלא נתונים מהפרופס מהרידאקס את ההגדרות..
const [showing, setShowing] = useState(false);
const [uploadImg, setUploadImg] = useState(false);
var myImg = new Image();
function setHeightAndWidth() {
var size;
myImg.src = headersettings.eventsPageImageOrVideo;
console.log("@@" + myImg.width / myImg.height + "@@")
size = myImg.width / myImg.height < 1.5 ? myImg.width / myImg.height * 21 : myImg.width / myImg.height < 2 ? myImg.width / myImg.height * 17 : myImg.width / myImg.height * 12;
size += "vw";
var inputHeight = myImg.width / myImg.height < 1.5 ? 24 : myImg.width / myImg.height < 2 ? 20 : 16;
inputHeight += "vh";
console.log("myImg.width ", myImg.width, " myImg.height ", myImg.height)
console.log("@@" + size + "@@")
if (size == "NaNvw") { size = "30vw" }
document.documentElement.style.setProperty('--image-width', size);
document.documentElement.style.setProperty('--input-height', inputHeight);
}
const changeImage = (e) => {
props.setLoaderUploadShow(true, 'image');
const TokenToString = document.cookie && document.cookie.includes('devJwt')
? document.cookie
.split(';')
.filter(s => s.includes('devJwt'))[0]
.split('=')
.pop()
: null
const userName = window.location.pathname.split('/')[1]
const file = e.target.files[0];
var myFile = new FormData();
myFile.append("file", file);
$.ajax({
type: "POST",
url: `${keys.API_FILE}/${userName}/upload`,
headers: { Authorization: TokenToString },
data: myFile,
processData: false,
contentType: false,
success: (data) => {
// alert("upload success");
props.changeImage(data.data.url);
},
error: function (err) {
alert('please try again later');
},
});
}
const changeLogoImage = (e) => {
props.setLoaderUploadShow(true, "logo");
const TokenToString = document.cookie && document.cookie.includes('devJwt')
? document.cookie
.split(';')
.filter(s => s.includes('devJwt'))[0]
.split('=')
.pop()
: null
const userName = window.location.pathname.split('/')[1]
const file = e.target.files[0];
var myFile = new FormData();
myFile.append("file", file);
$.ajax({
type: "POST",
url: `${keys.API_FILE}/${userName}/upload`,
headers: { Authorization: TokenToString },
data: myFile,
processData: false,
contentType: false,
success: (data) => {
// alert("upload success");
props.changeLogo(data.data.url);
},
error: function (err) {
alert('please try again later');
},
});
}
useEffect(() => {
if (headersettings) {
setHeightAndWidth()
setFontsize()
}
}, [headersettings])
function checkImg() {
let x = headersettings.eventsPageImageOrVideo.replace(/[{()}]/g, '');
if (x.match(/\w+\.(jpg|jpeg|gif|png|tiff|bmp)$/)) {
return true;
} else {
return false;
}
}
function changeToHeaderComponent() {
changeCurrentComponent('Edit Header')
}
function changeToPageSettingsComponent() {
changeCurrentComponent('Page Settings')
}
function setUpload() {
setUploadImg(!uploadImg)
}
function setFontsize() {
debugger
var height, len = headersettings.eventsPageTitle.length;
height = Math.ceil(len / 15) * 7;
if (height < 25) {
height += "vh";
console.log("-- ", height, " --");
document.documentElement.style.setProperty('--title-height', height);
}
let textLength = headersettings.eventsPageTitle.length
let textSize = 5
const baseSize = 8
if (Math.ceil(len / 15) >= 2) {
textSize = textSize - 1;
if (Math.ceil(len / 15) >= 3) {
textSize = textSize - 1
if (Math.ceil(len / 15) >= 4) {
textSize = textSize - 1
}
}
}
document.documentElement.style.setProperty('--font-size-title-admin', `${textSize}vw`);
}
return (
<>
<div className="container-fluid adminEventTitle" >
<div className="row adminTitleDiv" id='showHeader'>
<img className="myImg titleImgColor" src={img[pagesettings.eventsPageColor]} onClick={changeToPageSettingsComponent}></img>
<label htmlFor='filelogo' className="adminLogoLabel">
<img className="adminMylogo" src={headersettings.eventsPageLogo} onClick={changeToHeaderComponent}></img>
<div className="adminLogoIconDiv" onClick={changeToHeaderComponent}>
<FontAwesomeIcon
id='angle-right'
className='iconCloudUpload uploadLogo'
icon={['fas', 'cloud-upload-alt']}
></FontAwesomeIcon>
</div>
</label>
<input type="file" name="file" accept="image/*" id="filelogo"
className="adminInputfileLogo" onChange={changeLogoImage} />
<div className="col-3 adminTitleAndDescription">
<textarea
className="adminEventTitletitleH1"
// onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()}
onChange={(e) => changeTitleText(e.target.value)}
onClick={changeToHeaderComponent}
value={headersettings.eventsPageTitle}
// rows="2"
// size="14"
maxLength="90"
// style={{ textAlign: 'left' }}
placeholder={headersettings.eventsPageTitle}
onFocus={(e
|
return {
|
random_line_split
|
adminEventTitle.js
|
'
import { subscribe } from '../../../redux/middlweare/crud'
import AllEvents from '../../events/allEvents/allEvents'
import FooterEventsGallery from '../../footer/footerEventsGallery';
import UploadImageFromConfigurator from '../../Configurator/uploadImageFromConfigurator';
import uploadIcon from '../../../assets/upload.png';
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import Subscribe from '../../subscribe/subscribe'
function mapStateToProps(state) {
// red #86F3FF
document.documentElement.style.setProperty('--Button-color', state.pageSettings.page.eventsButtonColor);
document.documentElement.style.setProperty('--align-text', state.editHeader.header.eventsPageAlignment);
// state.settings.settings.eventsButtonColor
// document.documentElement.style.setProperty('--Page-color',state.settings.eventsPageColor);
return {
site: state.site,
pagesettings: state.pageSettings.page,
headersettings: state.editHeader.header,
subscribesettings: state.editSubscription.subscribe,
// (לחלק לכמה רדיוסרים)
// text-align נתונים מהשרת................................
}
}
const mapDispatchToProps = (dispatch) => ({
changeTitleText: (e) => { dispatch(actionsStore.setTitleText(e)); },
changeBodyText: (e) => { dispatch(actionsStore.setBodyText(e)) },
changeCurrentComponent: (e) => { dispatch(actionsStore.setCurrentComponent(e)) },
setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })),
changeImage: (url) => { dispatch(actionsStore.setImage(url)) },
setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })),
changeLogo: (url) => dispatch(actionsStore.setLogo(url))
// addAllEvents: (events) => dispatch(actionsStore.addAllEvents(events)),
})
export default connect(mapStateToProps, mapDispatchToProps)(function AdminEventTitle(props) {
const { pagesettings, headersettings, subscribesettings, changeTitleText, changeBodyText, changeCurrentComponent } = props;
const img =
{
'#ad60ff': purple1,
'#4f40d0': purple2,
'#ff53f7': pink,
'#ff62b2': pink2,
'#fa5252': red,
'#ff803f': orange,
'#faee3a': yellow,
'#424149': black,
'#9f9cb5': gray,
'#63f597': turquoise,
'#54b9ff': lightBlue,
'#51e7fb': lightBlue2
}
// const display = true;//ימלא נתונים בפרופס מהרידאקס אם מעונין שיציג כותרת
// const [settings, setSettings] = useState({ eventsPageTitle: 'welcome to leader event', picteventsPageImageure: '', eventsPageDescription: 'Don’t Act So Surprised, Your Highness. You Weren’t On Any Mercy Mission This Time. Seve…', amountEventsInRow: '3' });//ימלא נתונים מהפרופס מהרידאקס את ההגדרות..
const [showing, setShowing] = useState(false);
const [uploadImg, setUploadImg] = useState(false);
var myImg = new Image();
function setHeightAndWidth() {
var size;
myImg.src = headersettings.eventsPageImageOrVideo;
console.log("@@" + myImg.width / myImg.height + "@@")
size = myImg.width / myImg.height < 1.5 ? myImg.width / myImg.height * 21 : myImg.width / myImg.height < 2 ? myImg.width / myImg.height * 17 : myImg.width / myImg.height * 12;
size += "vw";
var inputHeight = myImg.width / myImg.height < 1.5 ? 24 : myImg.width / myImg.height < 2 ? 20 : 16;
inputHeight += "vh";
console.log("myImg.width ", myImg.width, " myImg.height ", myImg.height)
console.log("@@" + size + "@@")
if (size == "NaNvw") { size = "30vw" }
document.documentElement.style.setProperty('--image-width', size);
document.documentElement.style.setProperty('--input-height', inputHeight);
}
const changeImage = (e) => {
props.setLoaderUploadShow(true, 'image');
const TokenToString = document.cookie && document.cookie.includes('devJwt')
? document.cookie
.split(';')
.filter(s => s.includes('devJwt'))[0]
.split('=')
.pop()
: null
const userName = window.location.pathname.split('/')[1]
const file = e.target.files[0];
var myFile = new FormData();
myFile.append("file", file);
$.ajax({
type: "POST",
url: `${keys.API_FILE}/${userName}/upload`,
headers: { Authorization: TokenToString },
data: myFile,
processData: false,
contentType: false,
success: (data) => {
// alert("upload success");
props.changeImage(data.data.url);
},
error: function (err) {
alert('please try again later');
},
});
}
const changeLogoImage = (e) => {
props.setLoaderUploadShow(true, "logo");
const TokenToString = document.cookie && document.cookie.includes('devJwt')
? document.cookie
.split(';')
.filter(s => s.includes('devJwt'))[0]
.split('=')
.pop()
: null
const userName = window.location.pathname.split('/')[1]
const file = e.target.files[0];
var myFile = new FormData();
myFile.append("file", file);
$.ajax({
type: "POST",
url: `${keys.API_FILE}/${userName}/upload`,
headers: { Authorization: TokenToString },
data: myFile,
processData: false,
contentType: false,
success: (data) => {
// alert("upload success");
props.changeLogo(data.data.url);
},
error: function (err) {
alert('please try again later');
},
});
}
useEffect(() => {
if (headersettings) {
setHeightAndWidth()
setFontsize()
}
}, [headersettings])
function checkImg() {
let x = headersettings.eventsPageImageOrVideo.replace(/[{()}]/g, '');
if (x.match(/\w+\.(jpg|jpeg|gif|png|tiff|bmp)$/)) {
return true;
} else {
return false;
}
}
function changeToHeaderComponent() {
changeCurrentComponent('Edit Header')
}
function changeToPageSettingsComponent() {
changeCurrentComponent('Page Settings')
}
function setUpload() {
setUploadImg(!up
|
gger
var height, len = headersettings.eventsPageTitle.length;
height = Math.ceil(len / 15) * 7;
if (height < 25) {
height += "vh";
console.log("-- ", height, " --");
document.documentElement.style.setProperty('--title-height', height);
}
let textLength = headersettings.eventsPageTitle.length
let textSize = 5
const baseSize = 8
if (Math.ceil(len / 15) >= 2) {
textSize = textSize - 1;
if (Math.ceil(len / 15) >= 3) {
textSize = textSize - 1
if (Math.ceil(len / 15) >= 4) {
textSize = textSize - 1
}
}
}
document.documentElement.style.setProperty('--font-size-title-admin', `${textSize}vw`);
}
return (
<>
<div className="container-fluid adminEventTitle" >
<div className="row adminTitleDiv" id='showHeader'>
<img className="myImg titleImgColor" src={img[pagesettings.eventsPageColor]} onClick={changeToPageSettingsComponent}></img>
<label htmlFor='filelogo' className="adminLogoLabel">
<img className="adminMylogo" src={headersettings.eventsPageLogo} onClick={changeToHeaderComponent}></img>
<div className="adminLogoIconDiv" onClick={changeToHeaderComponent}>
<FontAwesomeIcon
id='angle-right'
className='iconCloudUpload uploadLogo'
icon={['fas', 'cloud-upload-alt']}
></FontAwesomeIcon>
</div>
</label>
<input type="file" name="file" accept="image/*" id="filelogo"
className="adminInputfileLogo" onChange={changeLogoImage} />
<div className="col-3 adminTitleAndDescription">
<textarea
className="adminEventTitletitleH1"
// onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()}
onChange={(e) => changeTitleText(e.target.value)}
onClick={changeToHeaderComponent}
value={headersettings.eventsPageTitle}
// rows="2"
// size="14"
maxLength="90"
// style={{ textAlign: 'left' }}
placeholder={headersettings.eventsPageTitle}
onFocus
|
loadImg)
}
function setFontsize() {
debu
|
identifier_body
|
adminEventTitle.js
|
.png'
import { subscribe } from '../../../redux/middlweare/crud'
import AllEvents from '../../events/allEvents/allEvents'
import FooterEventsGallery from '../../footer/footerEventsGallery';
import UploadImageFromConfigurator from '../../Configurator/uploadImageFromConfigurator';
import uploadIcon from '../../../assets/upload.png';
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import Subscribe from '../../subscribe/subscribe'
function mapStateToProps(state) {
// red #86F3FF
document.documentElement.style.setProperty('--Button-color', state.pageSettings.page.eventsButtonColor);
document.documentElement.style.setProperty('--align-text', state.editHeader.header.eventsPageAlignment);
// state.settings.settings.eventsButtonColor
// document.documentElement.style.setProperty('--Page-color',state.settings.eventsPageColor);
return {
site: state.site,
pagesettings: state.pageSettings.page,
headersettings: state.editHeader.header,
subscribesettings: state.editSubscription.subscribe,
// (לחלק לכמה רדיוסרים)
// text-align נתונים מהשרת................................
}
}
const mapDispatchToProps = (dispatch) => ({
changeTitleText: (e) => { dispatch(actionsStore.setTitleText(e)); },
changeBodyText: (e) => { dispatch(actionsStore.setBodyText(e)) },
changeCurrentComponent: (e) => { dispatch(actionsStore.setCurrentComponent(e)) },
setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })),
changeImage: (url) => { dispatch(actionsStore.setImage(url)) },
setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })),
changeLogo: (url) => dispatch(actionsStore.setLogo(url))
// addAllEvents: (events) => dispatch(actionsStore.addAllEvents(events)),
})
export default connect(mapStateToProps, mapDispatchToProps)(function AdminEventTitle(props) {
const { pagesettings, headersettings, subscribesettings, changeTitleText, changeBodyText, changeCurrentComponent } = props;
const img =
{
'#ad60ff': purple1,
'#4f40d0': purple2,
'#ff53f7': pink,
'#ff62b2': pink2,
'#fa5252': red,
'#ff803f': orange,
'#faee3a': yellow,
'#424149': black,
'#9f9cb5': gray,
'#63f597': turquoise,
'#54b9ff': lightBlue,
'#51e7fb': lightBlue2
}
// const display = true;//ימלא נתונים בפרופס מהרידאקס אם מעונין שיציג כותרת
// const [settings, setSettings] = useState({ eventsPageTitle: 'welcome to leader event', picteventsPageImageure: '', eventsPageDescription: 'Don’t Act So Surprised, Your Highness. You Weren’t On Any Mercy Mission This Time. Seve…', amountEventsInRow: '3' });//ימלא נתונים מהפרופס מהרידאקס את ההגדרות..
const [showing, setShowing] = useState(false);
const [uploadImg, setUploadImg] = useState(false);
var myImg = new Image();
function setHeightAndWidth() {
var size;
myImg.src = headersettings.eventsPageImageOrVideo;
console.log("@@" + myImg.width / myImg.height + "@@")
size = myImg.width / myImg.height < 1.5 ? myImg.width / myImg.height * 21 : myImg.width / myImg.height < 2 ? myImg.width / myImg.height * 17 : myImg.width / myImg.height * 12;
size += "vw";
var inputHeight = myImg.width / myImg.height < 1.5 ? 24 : myImg.width / myImg.height < 2 ? 20 : 16;
inputHeight += "vh";
console.log("myImg.width ", myImg.width, " myImg.height ", myImg.height)
console.log("@@" + size + "@@")
if (size == "NaNvw") { size = "30vw" }
document.documentElement.style.setProperty('--image-width', size);
document.documentElement.style.setProperty('--input-height', inputHeight);
}
const changeImage = (e) => {
props.setLoaderUploadShow(true, 'image');
const TokenToString = document.cookie && document.cookie.includes('devJwt')
? document.cookie
.split(';')
.filter(s => s.includes('devJwt'))[0]
.split('=')
.pop()
: null
const userName = window.location.pathname.split('/')[1]
const file = e.target.files[0];
var myFile = new FormData();
myFile.append("file", file);
$.ajax({
type: "POST",
url: `${keys.API_FILE}/${userName}/upload`,
headers: { Authorization: TokenToString },
data: myFile,
processData: false,
contentType: false,
success: (data) => {
// alert("upload success");
props.changeImage(data.data.url);
},
error: function (err) {
alert('please try again later');
},
});
}
const changeLogoImage = (e) => {
props.setLoaderUploadShow(true, "logo");
const TokenToString = document.cookie && document.cookie.includes('devJwt')
? document.cookie
.split(';')
.filter(s => s.includes('devJwt'))[0]
.split('=')
.pop()
: null
const userName = window.location.pathname.split('/')[1]
const file = e.target.files[0];
var myFile = new FormData();
myFile.append("file", file);
$.ajax({
type: "POST",
url: `${keys.API_FILE}/${userName}/upload`,
headers: { Authorization: TokenToString },
data: myFile,
processData: false,
contentType: false,
success: (data) => {
// alert("upload success");
props.changeLogo(data.data.url);
},
error: function (err) {
alert('please try again later');
},
});
}
useEffect(() => {
if (headersettings) {
setHeightAndWidth()
setFontsize()
}
}, [headersettings])
function checkImg() {
let x = headersettings.eventsPageImageOrVideo.replace(/[{()}]/g, '');
if (x.match(/\w+\.(jpg|jpeg|gif|png|tiff|bmp)$/)) {
return true;
} else {
return false;
}
}
function changeToHeaderComponent() {
changeCurrentComponent('Edit Header')
}
function changeToPageSettingsComponent() {
changeCurrentComponent('Page Settings')
}
function setUpload() {
setUploadImg(!uploadImg)
}
function setFontsize() {
debugger
var height, len = headersettings.eventsPageTitle.length;
height = Math.ceil(len / 15) * 7;
if (height < 25) {
height += "vh";
console.log("-- ", height, " --");
document.documentElement.style.setProperty('--title-height', height);
}
let textLength = headersettings.eventsPageTitle.length
let textSize = 5
const baseSize = 8
if (Math.ceil(len / 15) >= 2) {
textSize = textSize - 1;
if (Math.ceil(len / 15) >= 3) {
textSize = textSize - 1
if (Math.ceil(len / 15) >= 4) {
|
}
return (
<>
<div className="container-fluid adminEventTitle" >
<div className="row adminTitleDiv" id='showHeader'>
<img className="myImg titleImgColor" src={img[pagesettings.eventsPageColor]} onClick={changeToPageSettingsComponent}></img>
<label htmlFor='filelogo' className="adminLogoLabel">
<img className="adminMylogo" src={headersettings.eventsPageLogo} onClick={changeToHeaderComponent}></img>
<div className="adminLogoIconDiv" onClick={changeToHeaderComponent}>
<FontAwesomeIcon
id='angle-right'
className='iconCloudUpload uploadLogo'
icon={['fas', 'cloud-upload-alt']}
></FontAwesomeIcon>
</div>
</label>
<input type="file" name="file" accept="image/*" id="filelogo"
className="adminInputfileLogo" onChange={changeLogoImage} />
<div className="col-3 adminTitleAndDescription">
<textarea
className="adminEventTitletitleH1"
// onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()}
onChange={(e) => changeTitleText(e.target.value)}
onClick={changeToHeaderComponent}
value={headersettings.eventsPageTitle}
// rows="2"
// size="14"
maxLength="90"
// style={{ textAlign: 'left' }}
placeholder={headersettings.eventsPageTitle}
onFocus={(
|
textSize = textSize - 1
}
}
}
document.documentElement.style.setProperty('--font-size-title-admin', `${textSize}vw`);
|
conditional_block
|
adminEventTitle.js
|
.png'
import { subscribe } from '../../../redux/middlweare/crud'
import AllEvents from '../../events/allEvents/allEvents'
import FooterEventsGallery from '../../footer/footerEventsGallery';
import UploadImageFromConfigurator from '../../Configurator/uploadImageFromConfigurator';
import uploadIcon from '../../../assets/upload.png';
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import Subscribe from '../../subscribe/subscribe'
function mapStateToProps(state) {
// red #86F3FF
document.documentElement.style.setProperty('--Button-color', state.pageSettings.page.eventsButtonColor);
document.documentElement.style.setProperty('--align-text', state.editHeader.header.eventsPageAlignment);
// state.settings.settings.eventsButtonColor
// document.documentElement.style.setProperty('--Page-color',state.settings.eventsPageColor);
return {
site: state.site,
pagesettings: state.pageSettings.page,
headersettings: state.editHeader.header,
subscribesettings: state.editSubscription.subscribe,
// (לחלק לכמה רדיוסרים)
// text-align נתונים מהשרת................................
}
}
const mapDispatchToProps = (dispatch) => ({
changeTitleText: (e) => { dispatch(actionsStore.setTitleText(e)); },
changeBodyText: (e) => { dispatch(actionsStore.setBodyText(e)) },
changeCurrentComponent: (e) => { dispatch(actionsStore.setCurrentComponent(e)) },
setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })),
changeImage: (url) => { dispatch(actionsStore.setImage(url)) },
setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })),
changeLogo: (url) => dispatch(actionsStore.setLogo(url))
// addAllEvents: (events) => dispatch(actionsStore.addAllEvents(events)),
})
export default connect(mapStateToProps, mapDispatchToProps)(function AdminEventTitle(props) {
const { pagesettings, headersettings, subscribesettings, changeTitleText, changeBodyText, changeCurrentComponent } = props;
const img =
{
'#ad60ff': purple1,
'#4f40d0': purple2,
'#ff53f7': pink,
'#ff62b2': pink2,
'#fa5252': red,
'#ff803f': orange,
'#faee3a': yellow,
'#424149': black,
'#9f9cb5': gray,
'#63f597': turquoise,
'#54b9ff': lightBlue,
'#51e7fb': lightBlue2
}
// const display = true;//ימלא נתונים בפרופס מהרידאקס אם מעונין שיציג כותרת
// const [settings, setSettings] = useState({ eventsPageTitle: 'welcome to leader event', picteventsPageImageure: '', eventsPageDescription: 'Don’t Act So Surprised, Your Highness. You Weren’t On Any Mercy Mission This Time. Seve…', amountEventsInRow: '3' });//ימלא נתונים מהפרופס מהרידאקס את ההגדרות..
const [showing, setShowing] = useState(false);
const [uploadImg, setUploadImg] = useState(false);
var myImg = new Image();
function setHeightAndWidth() {
var size;
myImg.src = headersettings.eventsPageImageOrVideo;
|
+ myImg.width / myImg.height + "@@")
size = myImg.width / myImg.height < 1.5 ? myImg.width / myImg.height * 21 : myImg.width / myImg.height < 2 ? myImg.width / myImg.height * 17 : myImg.width / myImg.height * 12;
size += "vw";
var inputHeight = myImg.width / myImg.height < 1.5 ? 24 : myImg.width / myImg.height < 2 ? 20 : 16;
inputHeight += "vh";
console.log("myImg.width ", myImg.width, " myImg.height ", myImg.height)
console.log("@@" + size + "@@")
if (size == "NaNvw") { size = "30vw" }
document.documentElement.style.setProperty('--image-width', size);
document.documentElement.style.setProperty('--input-height', inputHeight);
}
const changeImage = (e) => {
props.setLoaderUploadShow(true, 'image');
const TokenToString = document.cookie && document.cookie.includes('devJwt')
? document.cookie
.split(';')
.filter(s => s.includes('devJwt'))[0]
.split('=')
.pop()
: null
const userName = window.location.pathname.split('/')[1]
const file = e.target.files[0];
var myFile = new FormData();
myFile.append("file", file);
$.ajax({
type: "POST",
url: `${keys.API_FILE}/${userName}/upload`,
headers: { Authorization: TokenToString },
data: myFile,
processData: false,
contentType: false,
success: (data) => {
// alert("upload success");
props.changeImage(data.data.url);
},
error: function (err) {
alert('please try again later');
},
});
}
const changeLogoImage = (e) => {
props.setLoaderUploadShow(true, "logo");
const TokenToString = document.cookie && document.cookie.includes('devJwt')
? document.cookie
.split(';')
.filter(s => s.includes('devJwt'))[0]
.split('=')
.pop()
: null
const userName = window.location.pathname.split('/')[1]
const file = e.target.files[0];
var myFile = new FormData();
myFile.append("file", file);
$.ajax({
type: "POST",
url: `${keys.API_FILE}/${userName}/upload`,
headers: { Authorization: TokenToString },
data: myFile,
processData: false,
contentType: false,
success: (data) => {
// alert("upload success");
props.changeLogo(data.data.url);
},
error: function (err) {
alert('please try again later');
},
});
}
useEffect(() => {
if (headersettings) {
setHeightAndWidth()
setFontsize()
}
}, [headersettings])
function checkImg() {
let x = headersettings.eventsPageImageOrVideo.replace(/[{()}]/g, '');
if (x.match(/\w+\.(jpg|jpeg|gif|png|tiff|bmp)$/)) {
return true;
} else {
return false;
}
}
function changeToHeaderComponent() {
changeCurrentComponent('Edit Header')
}
function changeToPageSettingsComponent() {
changeCurrentComponent('Page Settings')
}
function setUpload() {
setUploadImg(!uploadImg)
}
function setFontsize() {
debugger
var height, len = headersettings.eventsPageTitle.length;
height = Math.ceil(len / 15) * 7;
if (height < 25) {
height += "vh";
console.log("-- ", height, " --");
document.documentElement.style.setProperty('--title-height', height);
}
let textLength = headersettings.eventsPageTitle.length
let textSize = 5
const baseSize = 8
if (Math.ceil(len / 15) >= 2) {
textSize = textSize - 1;
if (Math.ceil(len / 15) >= 3) {
textSize = textSize - 1
if (Math.ceil(len / 15) >= 4) {
textSize = textSize - 1
}
}
}
document.documentElement.style.setProperty('--font-size-title-admin', `${textSize}vw`);
}
return (
<>
<div className="container-fluid adminEventTitle" >
<div className="row adminTitleDiv" id='showHeader'>
<img className="myImg titleImgColor" src={img[pagesettings.eventsPageColor]} onClick={changeToPageSettingsComponent}></img>
<label htmlFor='filelogo' className="adminLogoLabel">
<img className="adminMylogo" src={headersettings.eventsPageLogo} onClick={changeToHeaderComponent}></img>
<div className="adminLogoIconDiv" onClick={changeToHeaderComponent}>
<FontAwesomeIcon
id='angle-right'
className='iconCloudUpload uploadLogo'
icon={['fas', 'cloud-upload-alt']}
></FontAwesomeIcon>
</div>
</label>
<input type="file" name="file" accept="image/*" id="filelogo"
className="adminInputfileLogo" onChange={changeLogoImage} />
<div className="col-3 adminTitleAndDescription">
<textarea
className="adminEventTitletitleH1"
// onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()}
onChange={(e) => changeTitleText(e.target.value)}
onClick={changeToHeaderComponent}
value={headersettings.eventsPageTitle}
// rows="2"
// size="14"
maxLength="90"
// style={{ textAlign: 'left' }}
placeholder={headersettings.eventsPageTitle}
onFocus={(
|
console.log("@@"
|
identifier_name
|
index.go
|
c.x, c.y)
}
type Cmd int
const (
CMD_WAIT Cmd = 0
CMD_MOVE Cmd = 1
CMD_DIG Cmd = 2
CMD_RADAR Cmd = 3
CMD_TRAP Cmd = 4
)
type Item int
const (
ITEM_NONE Item = -1
ITEM_RADAR Item = 2
ITEM_TRAP Item = 3
ITEM_ORE Item = 4
)
type Object int
const (
OBJ_ME Object = 0
OBJ_OPPONENT Object = 1
OBJ_RADAR Object = 2
OBJ_TRAP Object = 3
)
type Robot struct {
id int
pos Coord
cmd Cmd
targetPos Coord
item Item
digIntent Item
}
func (r Robot) String() string {
return fmt.Sprintf("Robot (%d) { pos: %s, cmd: %d, targetPos: %s, item: %d}", r.id, r.pos, r.cmd, r.targetPos, r.item)
}
func (r *Robot) Wait() {
r.cmd = CMD_WAIT
}
func (r *Robot) MoveTo(pos Coord) {
r.cmd = CMD_MOVE
r.targetPos.x = pos.x
r.targetPos.y = pos.y
}
func (r *Robot) Move(dx, dy int) {
r.cmd = CMD_MOVE
r.targetPos.x = clamp(r.pos.x + dx, 0, world.width)
r.targetPos.y = clamp(r.pos.y + dy, 0, world.height)
}
func (r *Robot) ReturnToHQ() {
r.cmd = CMD_MOVE
r.targetPos.x = 0
r.targetPos.y = r.pos.y
}
func (r Robot) IsAtHQ() bool {
return r.pos.x == 0
}
func (r *Robot) Dig(pos Coord, intent Item) {
r.cmd = CMD_DIG
r.targetPos.x = pos.x
r.targetPos.y = pos.y
r.digIntent = intent
}
func (r *Robot) RequestRadar() {
r.cmd = CMD_RADAR
}
func (r *Robot) RequestTrap() {
r.cmd = CMD_TRAP
}
func (r Robot) GetCommand() string {
if r.cmd == CMD_WAIT {
return "WAIT"
}
if r.cmd == CMD_MOVE {
return fmt.Sprintf("MOVE %d %d", r.targetPos.x, r.targetPos.y)
}
if r.cmd == CMD_DIG {
return fmt.Sprintf("DIG %d %d", r.targetPos.x, r.targetPos.y)
}
if r.cmd == CMD_RADAR {
return "REQUEST RADAR"
}
if r.cmd == CMD_TRAP {
return "REQUEST TRAP"
}
fmt.Fprintf(os.Stderr, "Unknown command type for robot! %d, id: %d", r.cmd, r.id)
return "WAIT"
}
func (r Robot) IsCmdValid(ores *[]int) (valid bool) {
if r.cmd == CMD_DIG {
if r.digIntent == ITEM_RADAR {
return r.item == ITEM_RADAR
}
if r.digIntent == ITEM_TRAP {
return r.item == ITEM_TRAP
}
if r.digIntent == ITEM_ORE {
// Can only have 1 ore (for now?)
if r.item == ITEM_ORE {
return false
}
valid = (*ores)[world.ArrayIndexC(r.targetPos)] > 0
(*ores)[world.ArrayIndexC(r.targetPos)]-- // If it goes negative, that's okay
return
}
}
if r.cmd == CMD_MOVE {
return r.pos != r.targetPos
}
if r.cmd == CMD_RADAR {
return r.item != ITEM_RADAR
}
if r.cmd == CMD_TRAP {
return r.item != ITEM_TRAP
}
return false
}
func (r Robot) IsDead() bool {
return r.pos.x == -1
}
/**********************************************************************************
* Utility functions
*********************************************************************************/
/**
* The Manhattan distance between 2 coordinates
**/
func dist(p1, p2 Coord) int {
return abs(p1.x-p2.x) + abs(p1.y-p2.y)
}
/**
* The Manhattan distance between 2 coordinates for digging (1 less)
**/
func digDist(p1, p2 Coord) int {
return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0)
}
/**
* The distance in turns between 2 coordinates
**/
func turnDist(p1, p2 Coord) int {
return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST))
}
/**
* The distance in turns between 2 coordinates for digging
**/
func
|
(p1, p2 Coord) int {
return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST))
}
/**********************************************************************************
* Serious business here
*********************************************************************************/
func calculateCellRadarValues(unknowns []int) []int {
radarValues := make([]int, world.Size())
for j := 0; j < world.height; j++ {
for i := 1; i < world.width; i++ {
cell := Coord{i, j}
for n := max(j-RADAR_DIST, 0); n <= min(j+RADAR_DIST, world.height-1); n++ {
for m := max(i-RADAR_DIST, 1); m <= min(i+RADAR_DIST, world.width-1); m++ {
if dist(cell, Coord{m, n}) > RADAR_DIST {
continue
}
radarValues[world.ArrayIndexC(cell)] += unknowns[world.ArrayIndex(m,n)]
}
}
}
}
return radarValues
}
func calculateBestRadarPosition(unknowns []int, pos Coord) (best Coord) {
radarValues := calculateCellRadarValues(unknowns)
closest := world.width // furthest point
largestValue := 0 // lowest value
for j := 0; j < world.height; j++ {
for i := 1; i < world.width; i++ {
value := radarValues[world.ArrayIndex(i, j)]
if value > largestValue {
largestValue = value
best = Coord{i, j}
closest = i
} else if value == largestValue {
newCoord := Coord{i, j}
// Pick the closest to HQ
if i < closest {
best = newCoord
closest = i
}
}
}
}
return best
}
/**********************************************************************************
* Parsing Logic
*********************************************************************************/
func ParseScore(scanner *bufio.Scanner) (myScore, opponentScore int) {
scanner.Scan()
fmt.Sscan(scanner.Text(), &myScore, &opponentScore)
return myScore, opponentScore
}
func ParseWorld(scanner *bufio.Scanner, ores *[]int, unknowns* []int) (numOres, numUnknowns int){
numOres = 0
numUnknowns = 0
for j := 0; j < world.height; j++ {
scanner.Scan()
inputs := strings.Split(scanner.Text(), " ")
for i := 0; i < world.width; i++ {
// ore: amount of ore or "?" if unknown
// hole: 1 if cell has a hole
ore, err := strconv.Atoi(inputs[2*i])
if err != nil {
(*ores)[world.ArrayIndex(i,j)] = 0
(*unknowns)[world.ArrayIndex(i,j)] = 1
numUnknowns++
} else {
(*ores)[world.ArrayIndex(i,j)] = ore
(*unknowns)[world.ArrayIndex(i,j)] = 0
numOres += ore
}
hole, _ := strconv.ParseInt(inputs[2*i+1], 10, 32)
_ = hole
}
}
return numOres, numUnknowns
}
func ParseEntities(scanner *bufio.Scanner, robots *[]Robot, ores *[]int) (radarCooldown, trapCooldown int){
// entityCount: number of entities visible to you
// radarCooldown: turns left until a new radar can be requested
// trapCooldown: turns left until a new trap can be requested
var entityCount int
scanner.Scan()
fmt.Sscan(scanner.Text(), &entityCount, &radarCooldown, &trapCooldown)
myRobot_i := 0
for i := 0; i < entityCount; i++ {
// id: unique id of the entity
// type: 0 for your robot, 1 for other robot, 2 for radar, 3 for trap
// y: position of the entity
// item: if this entity is a robot, the item it is carrying (-1 for NONE, 2 for RADAR, 3 for TRAP, 4 for ORE)
var id, objType, x, y, item int
scanner.Scan()
fmt.Sscan(scanner.Text(), &id, &objType, &x, &y, &item)
if Object(objType) == OBJ_ME {
robot := &(*robots)[myRobot_i]
robot.id = id
robot.pos
|
digTurnDist
|
identifier_name
|
index.go
|
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func clamp(x, low, high int) int {
return min(max(x, low), high)
}
/**********************************************************************************
* Data structures
*********************************************************************************/
type World struct {
width, height int
}
var world World
func (w World) ArrayIndex(x, y int) int {
return y * w.width + x
}
func (w World) ArrayIndexC(coord Coord) int {
return coord.y * w.width + coord.x
}
func (w World) Center() Coord {
return Coord{w.width / 2, w.height / 2}
}
func (w World) Size() int {
return w.width * w.height
}
/**
* A pair of ints for coordinates
**/
type Coord struct {
x, y int
}
func (c Coord) String() string {
return fmt.Sprintf("(%d, %d)", c.x, c.y)
}
type Cmd int
const (
CMD_WAIT Cmd = 0
CMD_MOVE Cmd = 1
CMD_DIG Cmd = 2
CMD_RADAR Cmd = 3
CMD_TRAP Cmd = 4
)
type Item int
const (
ITEM_NONE Item = -1
ITEM_RADAR Item = 2
ITEM_TRAP Item = 3
ITEM_ORE Item = 4
)
type Object int
const (
OBJ_ME Object = 0
OBJ_OPPONENT Object = 1
OBJ_RADAR Object = 2
OBJ_TRAP Object = 3
)
type Robot struct {
id int
pos Coord
cmd Cmd
targetPos Coord
item Item
digIntent Item
}
func (r Robot) String() string {
return fmt.Sprintf("Robot (%d) { pos: %s, cmd: %d, targetPos: %s, item: %d}", r.id, r.pos, r.cmd, r.targetPos, r.item)
}
func (r *Robot) Wait() {
r.cmd = CMD_WAIT
}
func (r *Robot) MoveTo(pos Coord) {
r.cmd = CMD_MOVE
r.targetPos.x = pos.x
r.targetPos.y = pos.y
}
func (r *Robot) Move(dx, dy int) {
r.cmd = CMD_MOVE
r.targetPos.x = clamp(r.pos.x + dx, 0, world.width)
r.targetPos.y = clamp(r.pos.y + dy, 0, world.height)
}
func (r *Robot) ReturnToHQ() {
r.cmd = CMD_MOVE
r.targetPos.x = 0
r.targetPos.y = r.pos.y
}
func (r Robot) IsAtHQ() bool {
return r.pos.x == 0
}
func (r *Robot) Dig(pos Coord, intent Item) {
r.cmd = CMD_DIG
r.targetPos.x = pos.x
r.targetPos.y = pos.y
r.digIntent = intent
}
func (r *Robot) RequestRadar() {
r.cmd = CMD_RADAR
}
func (r *Robot) RequestTrap() {
r.cmd = CMD_TRAP
}
func (r Robot) GetCommand() string {
if r.cmd == CMD_WAIT {
return "WAIT"
}
if r.cmd == CMD_MOVE {
return fmt.Sprintf("MOVE %d %d", r.targetPos.x, r.targetPos.y)
}
if r.cmd == CMD_DIG {
return fmt.Sprintf("DIG %d %d", r.targetPos.x, r.targetPos.y)
}
if r.cmd == CMD_RADAR {
return "REQUEST RADAR"
}
if r.cmd == CMD_TRAP {
return "REQUEST TRAP"
}
fmt.Fprintf(os.Stderr, "Unknown command type for robot! %d, id: %d", r.cmd, r.id)
return "WAIT"
}
func (r Robot) IsCmdValid(ores *[]int) (valid bool) {
if r.cmd == CMD_DIG {
if r.digIntent == ITEM_RADAR {
return r.item == ITEM_RADAR
}
if r.digIntent == ITEM_TRAP {
return r.item == ITEM_TRAP
}
if r.digIntent == ITEM_ORE {
// Can only have 1 ore (for now?)
if r.item == ITEM_ORE {
return false
}
valid = (*ores)[world.ArrayIndexC(r.targetPos)] > 0
(*ores)[world.ArrayIndexC(r.targetPos)]-- // If it goes negative, that's okay
return
}
}
if r.cmd == CMD_MOVE {
return r.pos != r.targetPos
}
if r.cmd == CMD_RADAR {
return r.item != ITEM_RADAR
}
if r.cmd == CMD_TRAP {
return r.item != ITEM_TRAP
}
return false
}
func (r Robot) IsDead() bool {
return r.pos.x == -1
}
/**********************************************************************************
* Utility functions
*********************************************************************************/
/**
* The Manhattan distance between 2 coordinates
**/
func dist(p1, p2 Coord) int {
return abs(p1.x-p2.x) + abs(p1.y-p2.y)
}
/**
* The Manhattan distance between 2 coordinates for digging (1 less)
**/
func digDist(p1, p2 Coord) int {
return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0)
}
/**
* The distance in turns between 2 coordinates
**/
func turnDist(p1, p2 Coord) int {
return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST))
}
/**
* The distance in turns between 2 coordinates for digging
**/
func digTurnDist(p1, p2 Coord) int {
return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST))
}
/**********************************************************************************
* Serious business here
*********************************************************************************/
func calculateCellRadarValues(unknowns []int) []int {
radarValues := make([]int, world.Size())
for j := 0; j < world.height; j++ {
for i := 1; i < world.width; i++ {
cell := Coord{i, j}
for n := max(j-RADAR_DIST, 0); n <= min(j+RADAR_DIST, world.height-1); n++ {
for m := max(i-RADAR_DIST, 1); m <= min(i+RADAR_DIST, world.width-1); m++ {
if dist(cell, Coord{m, n}) > RADAR_DIST {
continue
}
radarValues[world.ArrayIndexC(cell)] += unknowns[world.ArrayIndex(m,n)]
}
}
}
}
return radarValues
}
func calculateBestRadarPosition(unknowns []int, pos Coord) (best Coord) {
radarValues := calculateCellRadarValues(unknowns)
closest := world.width // furthest point
largestValue := 0 // lowest value
for j := 0; j < world.height; j++ {
for i := 1; i < world.width; i++ {
value := radarValues[world.ArrayIndex(i, j)]
if value > largestValue {
largestValue = value
best = Coord{i, j}
closest = i
} else if value == largestValue {
newCoord := Coord{i, j}
// Pick the closest to HQ
if i < closest {
best = newCoord
closest = i
}
}
}
}
return best
}
/**********************************************************************************
* Parsing Logic
*********************************************************************************/
func ParseScore(scanner *bufio.Scanner) (myScore, opponentScore int) {
scanner.Scan()
fmt.Sscan(scanner.Text(), &myScore, &opponentScore)
return myScore, opponentScore
}
func ParseWorld(scanner *bufio.Scanner, ores *[]int, unknowns* []int) (numOres, numUnknowns int){
numOres = 0
numUnknowns = 0
for j := 0; j < world.height; j++ {
scanner.Scan()
inputs := strings.Split(scanner.Text(), " ")
for i := 0; i < world.width; i++ {
// ore: amount of ore or "?" if unknown
// hole: 1 if cell has a hole
ore, err := strconv.Atoi(inputs[2*i])
if err != nil {
(*ores)[world.ArrayIndex(i,j)] = 0
(*unknowns)[world.ArrayIndex(i,j)] = 1
numUnknowns++
} else {
(*ores)[world.ArrayIndex(i,j)] = ore
(*unknowns)[world.ArrayIndex(i,j)] = 0
numOres += ore
}
hole, _ := strconv.ParseInt(inputs[2*i+1], 10, 32)
_ = hole
}
}
return numOres, numUnknowns
}
func ParseEntities(scanner *bufio.Scanner, robots *[]Robot, ores *[]int) (radarCooldown, trapCooldown int){
|
{
if n < 0 {
return -n
}
return n
}
|
identifier_body
|
|
index.go
|
)", c.x, c.y)
}
type Cmd int
const (
CMD_WAIT Cmd = 0
CMD_MOVE Cmd = 1
CMD_DIG Cmd = 2
CMD_RADAR Cmd = 3
CMD_TRAP Cmd = 4
)
type Item int
const (
ITEM_NONE Item = -1
ITEM_RADAR Item = 2
ITEM_TRAP Item = 3
ITEM_ORE Item = 4
)
type Object int
const (
OBJ_ME Object = 0
OBJ_OPPONENT Object = 1
OBJ_RADAR Object = 2
OBJ_TRAP Object = 3
)
type Robot struct {
id int
pos Coord
cmd Cmd
targetPos Coord
item Item
digIntent Item
}
func (r Robot) String() string {
return fmt.Sprintf("Robot (%d) { pos: %s, cmd: %d, targetPos: %s, item: %d}", r.id, r.pos, r.cmd, r.targetPos, r.item)
}
func (r *Robot) Wait() {
r.cmd = CMD_WAIT
}
func (r *Robot) MoveTo(pos Coord) {
r.cmd = CMD_MOVE
r.targetPos.x = pos.x
r.targetPos.y = pos.y
}
func (r *Robot) Move(dx, dy int) {
r.cmd = CMD_MOVE
r.targetPos.x = clamp(r.pos.x + dx, 0, world.width)
r.targetPos.y = clamp(r.pos.y + dy, 0, world.height)
}
func (r *Robot) ReturnToHQ() {
r.cmd = CMD_MOVE
r.targetPos.x = 0
r.targetPos.y = r.pos.y
}
func (r Robot) IsAtHQ() bool {
return r.pos.x == 0
}
func (r *Robot) Dig(pos Coord, intent Item) {
r.cmd = CMD_DIG
r.targetPos.x = pos.x
r.targetPos.y = pos.y
r.digIntent = intent
}
func (r *Robot) RequestRadar() {
r.cmd = CMD_RADAR
}
func (r *Robot) RequestTrap() {
r.cmd = CMD_TRAP
}
func (r Robot) GetCommand() string {
if r.cmd == CMD_WAIT {
return "WAIT"
}
if r.cmd == CMD_MOVE {
return fmt.Sprintf("MOVE %d %d", r.targetPos.x, r.targetPos.y)
}
if r.cmd == CMD_DIG {
return fmt.Sprintf("DIG %d %d", r.targetPos.x, r.targetPos.y)
}
if r.cmd == CMD_RADAR {
return "REQUEST RADAR"
}
if r.cmd == CMD_TRAP {
return "REQUEST TRAP"
}
fmt.Fprintf(os.Stderr, "Unknown command type for robot! %d, id: %d", r.cmd, r.id)
return "WAIT"
}
func (r Robot) IsCmdValid(ores *[]int) (valid bool) {
if r.cmd == CMD_DIG {
if r.digIntent == ITEM_RADAR {
return r.item == ITEM_RADAR
}
if r.digIntent == ITEM_TRAP {
return r.item == ITEM_TRAP
}
if r.digIntent == ITEM_ORE {
// Can only have 1 ore (for now?)
if r.item == ITEM_ORE {
return false
}
valid = (*ores)[world.ArrayIndexC(r.targetPos)] > 0
(*ores)[world.ArrayIndexC(r.targetPos)]-- // If it goes negative, that's okay
return
}
}
if r.cmd == CMD_MOVE {
return r.pos != r.targetPos
}
if r.cmd == CMD_RADAR {
return r.item != ITEM_RADAR
}
if r.cmd == CMD_TRAP {
return r.item != ITEM_TRAP
}
return false
}
func (r Robot) IsDead() bool {
return r.pos.x == -1
}
/**********************************************************************************
* Utility functions
*********************************************************************************/
/**
* The Manhattan distance between 2 coordinates
**/
func dist(p1, p2 Coord) int {
return abs(p1.x-p2.x) + abs(p1.y-p2.y)
}
/**
* The Manhattan distance between 2 coordinates for digging (1 less)
**/
func digDist(p1, p2 Coord) int {
return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0)
}
/**
* The distance in turns between 2 coordinates
**/
func turnDist(p1, p2 Coord) int {
return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST))
}
/**
* The distance in turns between 2 coordinates for digging
**/
func digTurnDist(p1, p2 Coord) int {
return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST))
}
/**********************************************************************************
* Serious business here
*********************************************************************************/
func calculateCellRadarValues(unknowns []int) []int {
radarValues := make([]int, world.Size())
for j := 0; j < world.height; j++ {
for i := 1; i < world.width; i++ {
cell := Coord{i, j}
for n := max(j-RADAR_DIST, 0); n <= min(j+RADAR_DIST, world.height-1); n++ {
for m := max(i-RADAR_DIST, 1); m <= min(i+RADAR_DIST, world.width-1); m++ {
if dist(cell, Coord{m, n}) > RADAR_DIST {
continue
}
radarValues[world.ArrayIndexC(cell)] += unknowns[world.ArrayIndex(m,n)]
}
}
}
}
return radarValues
}
func calculateBestRadarPosition(unknowns []int, pos Coord) (best Coord) {
radarValues := calculateCellRadarValues(unknowns)
closest := world.width // furthest point
largestValue := 0 // lowest value
for j := 0; j < world.height; j++ {
for i := 1; i < world.width; i++ {
value := radarValues[world.ArrayIndex(i, j)]
if value > largestValue {
largestValue = value
best = Coord{i, j}
closest = i
} else if value == largestValue {
newCoord := Coord{i, j}
// Pick the closest to HQ
if i < closest {
best = newCoord
closest = i
}
}
}
}
return best
}
/**********************************************************************************
* Parsing Logic
*********************************************************************************/
func ParseScore(scanner *bufio.Scanner) (myScore, opponentScore int) {
scanner.Scan()
fmt.Sscan(scanner.Text(), &myScore, &opponentScore)
return myScore, opponentScore
}
func ParseWorld(scanner *bufio.Scanner, ores *[]int, unknowns* []int) (numOres, numUnknowns int){
numOres = 0
numUnknowns = 0
for j := 0; j < world.height; j++ {
scanner.Scan()
inputs := strings.Split(scanner.Text(), " ")
for i := 0; i < world.width; i++ {
// ore: amount of ore or "?" if unknown
// hole: 1 if cell has a hole
ore, err := strconv.Atoi(inputs[2*i])
if err != nil {
(*ores)[world.ArrayIndex(i,j)] = 0
(*unknowns)[world.ArrayIndex(i,j)] = 1
numUnknowns++
} else {
(*ores)[world.ArrayIndex(i,j)] = ore
(*unknowns)[world.ArrayIndex(i,j)] = 0
numOres += ore
}
hole, _ := strconv.ParseInt(inputs[2*i+1], 10, 32)
_ = hole
}
}
return numOres, numUnknowns
}
func ParseEntities(scanner *bufio.Scanner, robots *[]Robot, ores *[]int) (radarCooldown, trapCooldown int){
// entityCount: number of entities visible to you
// radarCooldown: turns left until a new radar can be requested
// trapCooldown: turns left until a new trap can be requested
var entityCount int
scanner.Scan()
fmt.Sscan(scanner.Text(), &entityCount, &radarCooldown, &trapCooldown)
myRobot_i := 0
for i := 0; i < entityCount; i++ {
// id: unique id of the entity
// type: 0 for your robot, 1 for other robot, 2 for radar, 3 for trap
// y: position of the entity
// item: if this entity is a robot, the item it is carrying (-1 for NONE, 2 for RADAR, 3 for TRAP, 4 for ORE)
var id, objType, x, y, item int
scanner.Scan()
fmt.Sscan(scanner.Text(), &id, &objType, &x, &y, &item)
if Object(objType) == OBJ_ME {
|
robot := &(*robots)[myRobot_i]
robot.id = id
robot.pos
|
random_line_split
|
|
index.go
|
c.x, c.y)
}
type Cmd int
const (
CMD_WAIT Cmd = 0
CMD_MOVE Cmd = 1
CMD_DIG Cmd = 2
CMD_RADAR Cmd = 3
CMD_TRAP Cmd = 4
)
type Item int
const (
ITEM_NONE Item = -1
ITEM_RADAR Item = 2
ITEM_TRAP Item = 3
ITEM_ORE Item = 4
)
type Object int
const (
OBJ_ME Object = 0
OBJ_OPPONENT Object = 1
OBJ_RADAR Object = 2
OBJ_TRAP Object = 3
)
type Robot struct {
id int
pos Coord
cmd Cmd
targetPos Coord
item Item
digIntent Item
}
func (r Robot) String() string {
return fmt.Sprintf("Robot (%d) { pos: %s, cmd: %d, targetPos: %s, item: %d}", r.id, r.pos, r.cmd, r.targetPos, r.item)
}
func (r *Robot) Wait() {
r.cmd = CMD_WAIT
}
func (r *Robot) MoveTo(pos Coord) {
r.cmd = CMD_MOVE
r.targetPos.x = pos.x
r.targetPos.y = pos.y
}
func (r *Robot) Move(dx, dy int) {
r.cmd = CMD_MOVE
r.targetPos.x = clamp(r.pos.x + dx, 0, world.width)
r.targetPos.y = clamp(r.pos.y + dy, 0, world.height)
}
func (r *Robot) ReturnToHQ() {
r.cmd = CMD_MOVE
r.targetPos.x = 0
r.targetPos.y = r.pos.y
}
func (r Robot) IsAtHQ() bool {
return r.pos.x == 0
}
func (r *Robot) Dig(pos Coord, intent Item) {
r.cmd = CMD_DIG
r.targetPos.x = pos.x
r.targetPos.y = pos.y
r.digIntent = intent
}
func (r *Robot) RequestRadar() {
r.cmd = CMD_RADAR
}
func (r *Robot) RequestTrap() {
r.cmd = CMD_TRAP
}
func (r Robot) GetCommand() string {
if r.cmd == CMD_WAIT {
return "WAIT"
}
if r.cmd == CMD_MOVE
|
if r.cmd == CMD_DIG {
return fmt.Sprintf("DIG %d %d", r.targetPos.x, r.targetPos.y)
}
if r.cmd == CMD_RADAR {
return "REQUEST RADAR"
}
if r.cmd == CMD_TRAP {
return "REQUEST TRAP"
}
fmt.Fprintf(os.Stderr, "Unknown command type for robot! %d, id: %d", r.cmd, r.id)
return "WAIT"
}
func (r Robot) IsCmdValid(ores *[]int) (valid bool) {
if r.cmd == CMD_DIG {
if r.digIntent == ITEM_RADAR {
return r.item == ITEM_RADAR
}
if r.digIntent == ITEM_TRAP {
return r.item == ITEM_TRAP
}
if r.digIntent == ITEM_ORE {
// Can only have 1 ore (for now?)
if r.item == ITEM_ORE {
return false
}
valid = (*ores)[world.ArrayIndexC(r.targetPos)] > 0
(*ores)[world.ArrayIndexC(r.targetPos)]-- // If it goes negative, that's okay
return
}
}
if r.cmd == CMD_MOVE {
return r.pos != r.targetPos
}
if r.cmd == CMD_RADAR {
return r.item != ITEM_RADAR
}
if r.cmd == CMD_TRAP {
return r.item != ITEM_TRAP
}
return false
}
func (r Robot) IsDead() bool {
return r.pos.x == -1
}
/**********************************************************************************
* Utility functions
*********************************************************************************/
/**
* The Manhattan distance between 2 coordinates
**/
func dist(p1, p2 Coord) int {
return abs(p1.x-p2.x) + abs(p1.y-p2.y)
}
/**
* The Manhattan distance between 2 coordinates for digging (1 less)
**/
func digDist(p1, p2 Coord) int {
return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0)
}
/**
* The distance in turns between 2 coordinates
**/
func turnDist(p1, p2 Coord) int {
return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST))
}
/**
* The distance in turns between 2 coordinates for digging
**/
func digTurnDist(p1, p2 Coord) int {
return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST))
}
/**********************************************************************************
* Serious business here
*********************************************************************************/
func calculateCellRadarValues(unknowns []int) []int {
radarValues := make([]int, world.Size())
for j := 0; j < world.height; j++ {
for i := 1; i < world.width; i++ {
cell := Coord{i, j}
for n := max(j-RADAR_DIST, 0); n <= min(j+RADAR_DIST, world.height-1); n++ {
for m := max(i-RADAR_DIST, 1); m <= min(i+RADAR_DIST, world.width-1); m++ {
if dist(cell, Coord{m, n}) > RADAR_DIST {
continue
}
radarValues[world.ArrayIndexC(cell)] += unknowns[world.ArrayIndex(m,n)]
}
}
}
}
return radarValues
}
func calculateBestRadarPosition(unknowns []int, pos Coord) (best Coord) {
radarValues := calculateCellRadarValues(unknowns)
closest := world.width // furthest point
largestValue := 0 // lowest value
for j := 0; j < world.height; j++ {
for i := 1; i < world.width; i++ {
value := radarValues[world.ArrayIndex(i, j)]
if value > largestValue {
largestValue = value
best = Coord{i, j}
closest = i
} else if value == largestValue {
newCoord := Coord{i, j}
// Pick the closest to HQ
if i < closest {
best = newCoord
closest = i
}
}
}
}
return best
}
/**********************************************************************************
* Parsing Logic
*********************************************************************************/
func ParseScore(scanner *bufio.Scanner) (myScore, opponentScore int) {
scanner.Scan()
fmt.Sscan(scanner.Text(), &myScore, &opponentScore)
return myScore, opponentScore
}
func ParseWorld(scanner *bufio.Scanner, ores *[]int, unknowns* []int) (numOres, numUnknowns int){
numOres = 0
numUnknowns = 0
for j := 0; j < world.height; j++ {
scanner.Scan()
inputs := strings.Split(scanner.Text(), " ")
for i := 0; i < world.width; i++ {
// ore: amount of ore or "?" if unknown
// hole: 1 if cell has a hole
ore, err := strconv.Atoi(inputs[2*i])
if err != nil {
(*ores)[world.ArrayIndex(i,j)] = 0
(*unknowns)[world.ArrayIndex(i,j)] = 1
numUnknowns++
} else {
(*ores)[world.ArrayIndex(i,j)] = ore
(*unknowns)[world.ArrayIndex(i,j)] = 0
numOres += ore
}
hole, _ := strconv.ParseInt(inputs[2*i+1], 10, 32)
_ = hole
}
}
return numOres, numUnknowns
}
func ParseEntities(scanner *bufio.Scanner, robots *[]Robot, ores *[]int) (radarCooldown, trapCooldown int){
// entityCount: number of entities visible to you
// radarCooldown: turns left until a new radar can be requested
// trapCooldown: turns left until a new trap can be requested
var entityCount int
scanner.Scan()
fmt.Sscan(scanner.Text(), &entityCount, &radarCooldown, &trapCooldown)
myRobot_i := 0
for i := 0; i < entityCount; i++ {
// id: unique id of the entity
// type: 0 for your robot, 1 for other robot, 2 for radar, 3 for trap
// y: position of the entity
// item: if this entity is a robot, the item it is carrying (-1 for NONE, 2 for RADAR, 3 for TRAP, 4 for ORE)
var id, objType, x, y, item int
scanner.Scan()
fmt.Sscan(scanner.Text(), &id, &objType, &x, &y, &item)
if Object(objType) == OBJ_ME {
robot := &(*robots)[myRobot_i]
robot.id = id
robot
|
{
return fmt.Sprintf("MOVE %d %d", r.targetPos.x, r.targetPos.y)
}
|
conditional_block
|
main.rs
|
() {
let s: Vec<usize> = std::fs::read_to_string("src/e11.txt")
.unwrap()
.split_whitespace()
.map(|n| n.parse::<usize>().unwrap())
.collect();
//println!("{:?}", s);
// could just run with s, but let's build our 2d array.
let mut v = [[0; 20]; 20];
(0..400).for_each(|i| v[i / 20][i % 20] = s[i]);
//println!("{:?}", v);
let mut big = 0;
use itertools::Itertools;
(0..20).cartesian_product(0..20).for_each(|(i, j)| {
if i < 17 {
// h_
let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j];
if temp > big {
// println!(
// "h_ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
if j < 17 {
// v|
let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3];
if temp > big {
// println!(
// "v| new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
if i < 17 && j < 17 {
// d\
let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3];
if temp > big {
// println!(
// "d\\ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j],
// );
big = temp
}
}
if i < 17 && j > 2 {
// d/
let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3];
if temp > big {
// println!(
// "d/ new biggest: {} starting at: ({},{}), with init value {}:",
// big, i, j, v[i][j]
// );
big = temp
}
}
});
println!("biggest: {}", big);
}
// v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs
// 1. include_str!("grid.txt") I could be using this macro instead.
// 2. .filter_map(|n| n.parse().ok()), well isn't that sweet.
// 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though.
// What is the value of the first triangle number to have over five hundred divisors?
#[timings]
fn e12() {
// entire problem is "count divisors". Naive soln sucks. Derive a soln.
// Proposition. given X = p_1^a * p_2^b * ...,
// N_factors(X) = (a+1)(b+1)....
// now we only need to find the algebraic multiplicity of each prime divisor.
let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> {
let mut h = std::collections::HashMap::new();
let mut n = input;
while n % 2 == 0 {
let counter = h.entry(2).or_insert(0);
*counter += 1;
n /= 2;
}
let mut i = 3;
while n > 1 {
while n % i == 0 {
let counter = h.entry(i).or_insert(0);
*counter += 1;
n /= i;
}
i += 2;
}
h
};
let mut i = 1;
let mut sum = 0;
loop {
sum += i;
i += 1;
let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d));
//dbg!(sum, divisors);
if divisors > 500 {
println!("value: {}, the {}th triangle number", sum, i);
break;
}
}
}
#[timings]
fn e13() {
let s: Vec<String> = std::fs::read_to_string("src/e13.txt")
.unwrap()
.split_whitespace()
.map(|s| s.parse::<String>().unwrap())
.collect();
let s13: Vec<usize> = s
.iter()
.map(|l| l[..13].parse::<usize>().unwrap())
.collect();
let n = s13.iter().sum::<usize>().to_string();
println!("e13: {}", &n[..10]);
}
#[allow(dead_code)]
fn collatz(n: usize) -> usize {
match n % 2 {
0 => n / 2,
1 => 3 * n + 1,
_ => unreachable!(),
}
}
#[timings]
fn e14() {
use std::collections::HashMap;
let mut h = HashMap::new();
h.insert(1, 0);
let mut it_counter = 0;
let mut biggest = (0, 0);
for it in 2..1_000_000 {
if h.contains_key(&it) {
continue;
}
// Build a cache of values til we find a value we have seen
let mut next = collatz(it);
it_counter += 1;
let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1
while h.get(&next).is_none() {
it_counter += 1;
cache.push((next, it_counter));
next = collatz(next);
}
// the next value is now in the hashmap
let count_last = *h.get(&next).unwrap();
let count_for_it = count_last + it_counter;
//println!("it:{},count: {}", it, count_for_it);
for (n, c) in cache {
let count = count_for_it + 1 - c;
//println!("n:{},c: {}, count: {}", n, c, count);
h.insert(n, count);
}
it_counter = 0;
if count_for_it > biggest.0 {
biggest = (count_for_it, it);
}
}
println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1);
}
#[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs
fn e14_zach_denton() {
let mut collatz: Vec<usize> = vec![0; 1_000_000];
collatz[1] = 1;
let max = (2..collatz.len())
.max_by_key(|&i| {
let f = |n: usize| match n % 2 {
0 => n / 2,
_ => n * 3 + 1,
};
// og:
let (mut j, mut len) = (i, 0);
loop {
// exit if:
if j < collatz.len() && collatz[j] != 0 {
break;
}
len += 1;
j = f(j);
}
len += collatz[j];
collatz[i] = len;
len
})
.unwrap();
println!("{}", max);
}
// How many such (only move left or down) routes are there through a 20×20 grid?
#[timings]
fn e15() {
// basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan.
let a: u128 = (21..=40).product();
let b: u128 = (2..=20).product();
println!("{}", a / b);
}
#[timings]
fn e16() {
// mostly, futzing with bigint.
use num_bigint::BigUint;
// note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint.
let a = BigUint::new(vec![2]);
let b = a.pow(1000);
//println!("{:
|
e11
|
identifier_name
|
|
main.rs
|
% 2 == 0 {
let counter = h.entry(2).or_insert(0);
*counter += 1;
n /= 2;
}
let mut i = 3;
while n > 1 {
while n % i == 0 {
let counter = h.entry(i).or_insert(0);
*counter += 1;
n /= i;
}
i += 2;
}
h
};
let mut i = 1;
let mut sum = 0;
loop {
sum += i;
i += 1;
let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d));
//dbg!(sum, divisors);
if divisors > 500 {
println!("value: {}, the {}th triangle number", sum, i);
break;
}
}
}
#[timings]
fn e13() {
let s: Vec<String> = std::fs::read_to_string("src/e13.txt")
.unwrap()
.split_whitespace()
.map(|s| s.parse::<String>().unwrap())
.collect();
let s13: Vec<usize> = s
.iter()
.map(|l| l[..13].parse::<usize>().unwrap())
.collect();
let n = s13.iter().sum::<usize>().to_string();
println!("e13: {}", &n[..10]);
}
#[allow(dead_code)]
fn collatz(n: usize) -> usize {
match n % 2 {
0 => n / 2,
1 => 3 * n + 1,
_ => unreachable!(),
}
}
#[timings]
fn e14() {
use std::collections::HashMap;
let mut h = HashMap::new();
h.insert(1, 0);
let mut it_counter = 0;
let mut biggest = (0, 0);
for it in 2..1_000_000 {
if h.contains_key(&it) {
continue;
}
// Build a cache of values til we find a value we have seen
let mut next = collatz(it);
it_counter += 1;
let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1
while h.get(&next).is_none() {
it_counter += 1;
cache.push((next, it_counter));
next = collatz(next);
}
// the next value is now in the hashmap
let count_last = *h.get(&next).unwrap();
let count_for_it = count_last + it_counter;
//println!("it:{},count: {}", it, count_for_it);
for (n, c) in cache {
let count = count_for_it + 1 - c;
//println!("n:{},c: {}, count: {}", n, c, count);
h.insert(n, count);
}
it_counter = 0;
if count_for_it > biggest.0 {
biggest = (count_for_it, it);
}
}
println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1);
}
#[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs
fn e14_zach_denton() {
let mut collatz: Vec<usize> = vec![0; 1_000_000];
collatz[1] = 1;
let max = (2..collatz.len())
.max_by_key(|&i| {
let f = |n: usize| match n % 2 {
0 => n / 2,
_ => n * 3 + 1,
};
// og:
let (mut j, mut len) = (i, 0);
loop {
// exit if:
if j < collatz.len() && collatz[j] != 0 {
break;
}
len += 1;
j = f(j);
}
len += collatz[j];
collatz[i] = len;
len
})
.unwrap();
println!("{}", max);
}
// How many such (only move left or down) routes are there through a 20×20 grid?
#[timings]
fn e15() {
// basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan.
let a: u128 = (21..=40).product();
let b: u128 = (2..=20).product();
println!("{}", a / b);
}
#[timings]
fn e16() {
// mostly, futzing with bigint.
use num_bigint::BigUint;
// note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint.
let a = BigUint::new(vec![2]);
let b = a.pow(1000);
//println!("{:?}", b);
// TFAE:
//let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap());
let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum();
println!("{:?}", res);
//let digits: num::BigInt = 2.pow(1000);
}
// If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
#[timings]
fn e17() {
let map = vec![
(0, 0),
(1, 3),
(2, 3),
(3, 5),
(4, 4),
(5, 4),
(6, 3),
(7, 5),
(8, 5),
(9, 4),
(10, 3),
(11, 6),
(12, 6),
(13, 8),
(14, 8),
(15, 7),
(16, 7),
(17, 9),
(18, 8),
(19, 8),
(20, 6),
(30, 6),
(40, 5),
(50, 5),
(60, 5),
(70, 7),
(80, 6),
(90, 6),
];
let h = std::collections::HashMap::from_iter(map.into_iter());
let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h));
println!("{}", res);
}
fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize {
let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10);
let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() };
let bb = if b == 1 {
*h.get(&(b * 10 + a)).unwrap()
} else {
*h.get(&(b * 10)).unwrap()
};
let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently
if c > 0 && aa == 0 && bb == 0 {
cc -= 3 // 100 doesn't have an "and"
};
let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 };
//println!("{}:{},{},{},{}", d, ee, cc, bb, aa);
aa + bb + cc + ee
}
// first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one.
// A couple possible approaches occur:
// naive: at each step, pick the greatest next value
// brute: calculate the value of all 2^14 paths, not hard
// pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means)
// This problem begs to be solved recursively somehow.
#[timings]
fn e18() {
|
let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt")
.unwrap()
.lines()
.map(|l| {
l.split_whitespace()
.into_iter()
.map(|n| n.parse::<usize>().unwrap())
.collect::<Vec<usize>>()
})
.collect();
let res = e18_less_naive_r(&triangle[1..], 75, 0);
println!("{}", res);
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.