file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
planner.go
|
planner) ExtendedEvalContext() *extendedEvalContext {
return &p.extendedEvalCtx
}
func (p *planner) ExtendedEvalContextCopy() *extendedEvalContext {
return p.extendedEvalCtx.copy()
}
// CurrentDatabase is part of the resolver.SchemaResolver interface.
func (p *planner) CurrentDatabase() string {
return p.SessionData().Database
}
// CurrentSearchPath is part of the resolver.SchemaResolver interface.
func (p *planner) CurrentSearchPath() sessiondata.SearchPath {
return p.SessionData().SearchPath
}
// EvalContext() provides convenient access to the planner's EvalContext().
func (p *planner) EvalContext() *tree.EvalContext {
return &p.extendedEvalCtx.EvalContext
}
func (p *planner) Descriptors() *descs.Collection {
return p.extendedEvalCtx.Descs
}
// ExecCfg implements the PlanHookState interface.
func (p *planner) ExecCfg() *ExecutorConfig {
return p.extendedEvalCtx.ExecCfg
}
// GetOrInitSequenceCache returns the sequence cache for the session.
// If the sequence cache has not been used yet, it initializes the cache
// inside the session data.
func (p *planner) GetOrInitSequenceCache() sessiondata.SequenceCache {
if p.SessionData().SequenceCache == nil {
p.sessionDataMutator.initSequenceCache()
}
return p.SessionData().SequenceCache
}
func (p *planner) LeaseMgr() *lease.Manager {
return p.Descriptors().LeaseManager()
}
func (p *planner) Txn() *kv.Txn {
return p.txn
}
func (p *planner) User() security.SQLUsername {
return p.SessionData().User()
}
func (p *planner) TemporarySchemaName() string {
return temporarySchemaName(p.ExtendedEvalContext().SessionID)
}
// DistSQLPlanner returns the DistSQLPlanner
func (p *planner) DistSQLPlanner() *DistSQLPlanner {
return p.extendedEvalCtx.DistSQLPlanner
}
// MigrationJobDeps returns the migration.JobDeps.
func (p *planner) MigrationJobDeps() migration.JobDeps {
return p.execCfg.MigrationJobDeps
}
// GetTypeFromValidSQLSyntax implements the tree.EvalPlanner interface.
// We define this here to break the dependency from eval.go to the parser.
func (p *planner) GetTypeFromValidSQLSyntax(sql string) (*types.T, error) {
ref, err := parser.GetTypeFromValidSQLSyntax(sql)
if err != nil {
return nil, err
}
return tree.ResolveType(context.TODO(), ref, p.semaCtx.GetTypeResolver())
}
// ParseQualifiedTableName implements the tree.EvalDatabase interface.
// This exists to get around a circular dependency between sql/sem/tree and
// sql/parser. sql/parser depends on tree to make objects, so tree cannot import
// ParseQualifiedTableName even though some builtins need that function.
// TODO(jordan): remove this once builtins can be moved outside of sql/sem/tree.
func (p *planner) ParseQualifiedTableName(sql string) (*tree.TableName, error) {
return parser.ParseQualifiedTableName(sql)
}
// ResolveTableName implements the tree.EvalDatabase interface.
func (p *planner) ResolveTableName(ctx context.Context, tn *tree.TableName) (tree.ID, error) {
flags := tree.ObjectLookupFlagsWithRequiredTableKind(tree.ResolveAnyTableKind)
desc, err := resolver.ResolveExistingTableObject(ctx, p, tn, flags)
if err != nil {
return 0, err
}
return tree.ID(desc.GetID()), nil
}
// LookupTableByID looks up a table, by the given descriptor ID. Based on the
// CommonLookupFlags, it could use or skip the Collection cache. See
// Collection.getTableVersionByID for how it's used.
// TODO (SQLSchema): This should call into the set of SchemaAccessors instead
// of having its own logic for lookups.
func (p *planner) LookupTableByID(
ctx context.Context, tableID descpb.ID,
) (catalog.TableDescriptor, error) {
if entry, err := p.getVirtualTabler().getVirtualTableEntryByID(tableID); err == nil {
return entry.desc, nil
}
flags := tree.ObjectLookupFlags{CommonLookupFlags: tree.CommonLookupFlags{AvoidCached: p.avoidCachedDescriptors}}
table, err := p.Descriptors().GetImmutableTableByID(ctx, p.txn, tableID, flags)
if err != nil {
return nil, err
}
return table, nil
}
// TypeAsString enforces (not hints) that the given expression typechecks as a
// string and returns a function that can be called to get the string value
// during (planNode).Start.
// To also allow NULLs to be returned, use TypeAsStringOrNull() instead.
func (p *planner) TypeAsString(
ctx context.Context, e tree.Expr, op string,
) (func() (string, error), error) {
typedE, err := tree.TypeCheckAndRequire(ctx, e, &p.semaCtx, types.String, op)
if err != nil {
return nil, err
}
evalFn := p.makeStringEvalFn(typedE)
return func() (string, error) {
isNull, str, err := evalFn()
if err != nil {
return "", err
}
if isNull {
return "", errors.Errorf("expected string, got NULL")
}
return str, nil
}, nil
}
// TypeAsStringOrNull is like TypeAsString but allows NULLs.
func (p *planner) TypeAsStringOrNull(
ctx context.Context, e tree.Expr, op string,
) (func() (bool, string, error), error) {
typedE, err := tree.TypeCheckAndRequire(ctx, e, &p.semaCtx, types.String, op)
if err != nil {
return nil, err
}
return p.makeStringEvalFn(typedE), nil
}
func (p *planner) makeStringEvalFn(typedE tree.TypedExpr) func() (bool, string, error) {
return func() (bool, string, error) {
d, err := typedE.Eval(p.EvalContext())
if err != nil {
return false, "", err
}
if d == tree.DNull {
return true, "", nil
}
str, ok := d.(*tree.DString)
if !ok {
return false, "", errors.Errorf("failed to cast %T to string", d)
}
return false, string(*str), nil
}
}
// KVStringOptValidate indicates the requested validation of a TypeAsStringOpts
// option.
type KVStringOptValidate string
// KVStringOptValidate values
const (
KVStringOptAny KVStringOptValidate = `any`
KVStringOptRequireNoValue KVStringOptValidate = `no-value`
KVStringOptRequireValue KVStringOptValidate = `value`
)
// evalStringOptions evaluates the KVOption values as strings and returns them
// in a map. Options with no value have an empty string.
func evalStringOptions(
evalCtx *tree.EvalContext, opts []exec.KVOption, optValidate map[string]KVStringOptValidate,
) (map[string]string, error) {
res := make(map[string]string, len(opts))
for _, opt := range opts {
k := opt.Key
validate, ok := optValidate[k]
if !ok {
return nil, errors.Errorf("invalid option %q", k)
}
val, err := opt.Value.Eval(evalCtx)
if err != nil {
return nil, err
}
if val == tree.DNull {
if validate == KVStringOptRequireValue {
return nil, errors.Errorf("option %q requires a value", k)
}
res[k] = ""
} else {
if validate == KVStringOptRequireNoValue {
return nil, errors.Errorf("option %q does not take a value", k)
}
str, ok := val.(*tree.DString)
if !ok {
return nil, errors.Errorf("expected string value, got %T", val)
}
res[k] = string(*str)
}
}
return res, nil
}
// TypeAsStringOpts enforces (not hints) that the given expressions
// typecheck as strings, and returns a function that can be called to
// get the string value during (planNode).Start.
func (p *planner) TypeAsStringOpts(
ctx context.Context, opts tree.KVOptions, optValidate map[string]KVStringOptValidate,
) (func() (map[string]string, error), error)
|
{
typed := make(map[string]tree.TypedExpr, len(opts))
for _, opt := range opts {
k := string(opt.Key)
validate, ok := optValidate[k]
if !ok {
return nil, errors.Errorf("invalid option %q", k)
}
if opt.Value == nil {
if validate == KVStringOptRequireValue {
return nil, errors.Errorf("option %q requires a value", k)
}
typed[k] = nil
continue
}
if validate == KVStringOptRequireNoValue {
return nil, errors.Errorf("option %q does not take a value", k)
}
r, err := tree.TypeCheckAndRequire(ctx, opt.Value, &p.semaCtx, types.String, k)
|
identifier_body
|
|
planner.go
|
Manager()
}
func (p *planner) Txn() *kv.Txn {
return p.txn
}
func (p *planner) User() security.SQLUsername {
return p.SessionData().User()
}
func (p *planner) TemporarySchemaName() string {
return temporarySchemaName(p.ExtendedEvalContext().SessionID)
}
// DistSQLPlanner returns the DistSQLPlanner
func (p *planner) DistSQLPlanner() *DistSQLPlanner {
return p.extendedEvalCtx.DistSQLPlanner
}
// MigrationJobDeps returns the migration.JobDeps.
func (p *planner) MigrationJobDeps() migration.JobDeps {
return p.execCfg.MigrationJobDeps
}
// GetTypeFromValidSQLSyntax implements the tree.EvalPlanner interface.
// We define this here to break the dependency from eval.go to the parser.
func (p *planner) GetTypeFromValidSQLSyntax(sql string) (*types.T, error) {
ref, err := parser.GetTypeFromValidSQLSyntax(sql)
if err != nil {
return nil, err
}
return tree.ResolveType(context.TODO(), ref, p.semaCtx.GetTypeResolver())
}
// ParseQualifiedTableName implements the tree.EvalDatabase interface.
// This exists to get around a circular dependency between sql/sem/tree and
// sql/parser. sql/parser depends on tree to make objects, so tree cannot import
// ParseQualifiedTableName even though some builtins need that function.
// TODO(jordan): remove this once builtins can be moved outside of sql/sem/tree.
func (p *planner) ParseQualifiedTableName(sql string) (*tree.TableName, error) {
return parser.ParseQualifiedTableName(sql)
}
// ResolveTableName implements the tree.EvalDatabase interface.
func (p *planner) ResolveTableName(ctx context.Context, tn *tree.TableName) (tree.ID, error) {
flags := tree.ObjectLookupFlagsWithRequiredTableKind(tree.ResolveAnyTableKind)
desc, err := resolver.ResolveExistingTableObject(ctx, p, tn, flags)
if err != nil {
return 0, err
}
return tree.ID(desc.GetID()), nil
}
// LookupTableByID looks up a table, by the given descriptor ID. Based on the
// CommonLookupFlags, it could use or skip the Collection cache. See
// Collection.getTableVersionByID for how it's used.
// TODO (SQLSchema): This should call into the set of SchemaAccessors instead
// of having its own logic for lookups.
func (p *planner) LookupTableByID(
ctx context.Context, tableID descpb.ID,
) (catalog.TableDescriptor, error) {
if entry, err := p.getVirtualTabler().getVirtualTableEntryByID(tableID); err == nil {
return entry.desc, nil
}
flags := tree.ObjectLookupFlags{CommonLookupFlags: tree.CommonLookupFlags{AvoidCached: p.avoidCachedDescriptors}}
table, err := p.Descriptors().GetImmutableTableByID(ctx, p.txn, tableID, flags)
if err != nil {
return nil, err
}
return table, nil
}
// TypeAsString enforces (not hints) that the given expression typechecks as a
// string and returns a function that can be called to get the string value
// during (planNode).Start.
// To also allow NULLs to be returned, use TypeAsStringOrNull() instead.
func (p *planner) TypeAsString(
ctx context.Context, e tree.Expr, op string,
) (func() (string, error), error) {
typedE, err := tree.TypeCheckAndRequire(ctx, e, &p.semaCtx, types.String, op)
if err != nil {
return nil, err
}
evalFn := p.makeStringEvalFn(typedE)
return func() (string, error) {
isNull, str, err := evalFn()
if err != nil {
return "", err
}
if isNull {
return "", errors.Errorf("expected string, got NULL")
}
return str, nil
}, nil
}
// TypeAsStringOrNull is like TypeAsString but allows NULLs.
func (p *planner) TypeAsStringOrNull(
ctx context.Context, e tree.Expr, op string,
) (func() (bool, string, error), error) {
typedE, err := tree.TypeCheckAndRequire(ctx, e, &p.semaCtx, types.String, op)
if err != nil {
return nil, err
}
return p.makeStringEvalFn(typedE), nil
}
func (p *planner) makeStringEvalFn(typedE tree.TypedExpr) func() (bool, string, error) {
return func() (bool, string, error) {
d, err := typedE.Eval(p.EvalContext())
if err != nil {
return false, "", err
}
if d == tree.DNull {
return true, "", nil
}
str, ok := d.(*tree.DString)
if !ok {
return false, "", errors.Errorf("failed to cast %T to string", d)
}
return false, string(*str), nil
}
}
// KVStringOptValidate indicates the requested validation of a TypeAsStringOpts
// option.
type KVStringOptValidate string
// KVStringOptValidate values
const (
KVStringOptAny KVStringOptValidate = `any`
KVStringOptRequireNoValue KVStringOptValidate = `no-value`
KVStringOptRequireValue KVStringOptValidate = `value`
)
// evalStringOptions evaluates the KVOption values as strings and returns them
// in a map. Options with no value have an empty string.
func evalStringOptions(
evalCtx *tree.EvalContext, opts []exec.KVOption, optValidate map[string]KVStringOptValidate,
) (map[string]string, error) {
res := make(map[string]string, len(opts))
for _, opt := range opts {
k := opt.Key
validate, ok := optValidate[k]
if !ok {
return nil, errors.Errorf("invalid option %q", k)
}
val, err := opt.Value.Eval(evalCtx)
if err != nil {
return nil, err
}
if val == tree.DNull {
if validate == KVStringOptRequireValue {
return nil, errors.Errorf("option %q requires a value", k)
}
res[k] = ""
} else {
if validate == KVStringOptRequireNoValue {
return nil, errors.Errorf("option %q does not take a value", k)
}
str, ok := val.(*tree.DString)
if !ok {
return nil, errors.Errorf("expected string value, got %T", val)
}
res[k] = string(*str)
}
}
return res, nil
}
// TypeAsStringOpts enforces (not hints) that the given expressions
// typecheck as strings, and returns a function that can be called to
// get the string value during (planNode).Start.
func (p *planner) TypeAsStringOpts(
ctx context.Context, opts tree.KVOptions, optValidate map[string]KVStringOptValidate,
) (func() (map[string]string, error), error) {
typed := make(map[string]tree.TypedExpr, len(opts))
for _, opt := range opts {
k := string(opt.Key)
validate, ok := optValidate[k]
if !ok {
return nil, errors.Errorf("invalid option %q", k)
}
if opt.Value == nil {
if validate == KVStringOptRequireValue {
return nil, errors.Errorf("option %q requires a value", k)
}
typed[k] = nil
continue
}
if validate == KVStringOptRequireNoValue {
return nil, errors.Errorf("option %q does not take a value", k)
}
r, err := tree.TypeCheckAndRequire(ctx, opt.Value, &p.semaCtx, types.String, k)
if err != nil {
return nil, err
}
typed[k] = r
}
fn := func() (map[string]string, error) {
res := make(map[string]string, len(typed))
for name, e := range typed {
if e == nil {
res[name] = ""
continue
}
d, err := e.Eval(p.EvalContext())
if err != nil {
return nil, err
}
str, ok := d.(*tree.DString)
if !ok {
return res, errors.Errorf("failed to cast %T to string", d)
}
res[name] = string(*str)
}
return res, nil
}
return fn, nil
}
// TypeAsStringArray enforces (not hints) that the given expressions all typecheck as
// strings and returns a function that can be called to get the string values
// during (planNode).Start.
func (p *planner) TypeAsStringArray(
ctx context.Context, exprs tree.Exprs, op string,
) (func() ([]string, error), error) {
typedExprs := make([]tree.TypedExpr, len(exprs))
for i := range exprs
|
{
typedE, err := tree.TypeCheckAndRequire(ctx, exprs[i], &p.semaCtx, types.String, op)
if err != nil {
return nil, err
}
typedExprs[i] = typedE
}
|
conditional_block
|
|
builder.go
|
// Validation tags are based on country codes, hence it's easy to differentiate and
// also trivial to set the validator tag as country code is the core attribute of account builder.
//////
essentialAttributes struct {
ID string `validate:"uuid,required"`
OrganizationID string `validate:"uuid,required"`
Country string `validate:"eq=GB|eq=AU|eq=BE|eq=CA|eq=FR|eq=DE|eq=GR|eq=HK|eq=IT|eq=LU|eq=NL|eq=PL|eq=PT|eq=ES|eq=ES|eq=CH|eq=US"`
BankIDCode string
BankID string `GB:"len=6" BE:"len=3" FR:"len=10" DE:"len=8" GR:"len=7" IT:"len=10|len=11" LU:"len=3" NL:"len=0" PL:"len=8" PT:"len=8" ES:"len=8" CH:"len=5" US:"len=9"`
Bic string `GB:"len=8|len=11" AU:"len=8|len=11" CA:"len=8|len=11" HK:"len=8|len=11" NL:"len=8|len=11" US:"len=8|len=11"`
Iban string `AU:"len=0" CA:"len=0" HK:"len=0" US:"len=0"`
}
///////
// Chose not to add all attributes on the same builder level.
// Having a structure and separating optional attributes from essential ones lowers cognitive load for client user.
///////
optionalAttributes struct {
Builder *Builder
VersionIndex int
AccountNumber string
BaseCurrency string
CustomerID string
Title string `validate:"max=40"`
FirstName string `validate:"max=40"`
BankAccountName string `validate:"max=140"`
AltBankAccountNames []string `validate:"max=3,dive,max=140"`
AccountClassification string `validate:"eq=Personal|eq=Business"`
SecondaryIdentification string `validate:"max=140"`
JointAccount bool
AccountMatchingOptOut bool
}
// OptionalAttributes has a collection of methods to set optional account attributes
///////
// This interface enabled to hide public optional attributes struct fields which were required for validation.
///////
OptionalAttributes interface {
SetVersion(int) *Builder
SetAccountNumber(string) *Builder
SetBaseCurrency(currency.Unit) *Builder
SetCustomerID(string) *Builder
SetTitle(string) *Builder
SetFirstName(string) *Builder
SetBankAccountName(string) *Builder
SetAltBankAccountNames(...string) *Builder
SetAccountClassification(string) *Builder
SetJointAccount(bool) *Builder
SetAccountMatchingOptOut(bool) *Builder
SetSecondaryIdentification(string) *Builder
}
)
// NewBuilder creates account builder from provided Country.
// Country sets validation rules for created accounts.
func NewBuilder(country Country) *Builder {
return &Builder{
essential: &essentialAttributes{
Country: country.Code(),
BankIDCode: country.BankIDCode(),
},
optional: &optionalAttributes{
AccountClassification: "Personal",
},
validate: validator.New(),
}
}
// CastBuilderFrom creates an account builder from existing account object.
// This enables to modify, validate and create new account object.
///////
// This function would be essential for PATCH operation.
///////
func CastBuilderFrom(account *Account) *Builder {
return &Builder{
essential: &essentialAttributes{
ID: account.ID(),
OrganizationID: account.OrganizationID(),
Country: account.Country(),
BankIDCode: account.BankIDCode(),
BankID: account.BankID(),
Bic: account.Bic(),
Iban: account.Iban(),
},
optional: &optionalAttributes{
VersionIndex: account.Version(),
AccountNumber: account.AccountNumber(),
BaseCurrency: account.BaseCurrency(),
CustomerID: account.CustomerID(),
Title: account.Title(),
FirstName: account.FirstName(),
BankAccountName: account.BankAccountName(),
AltBankAccountNames: account.AltBankAccountNames(),
AccountClassification: account.AccountClassification(),
SecondaryIdentification: account.SecondaryIdentification(),
JointAccount: account.IsJointAccount(),
AccountMatchingOptOut: account.IsAccountMatchingOptOut(),
},
validate: validator.New(),
}
}
// SetID of an account. Unique identifier (UUID) string - required for all accounts.
func (b *Builder) SetID(id string) *Builder {
b.essential.ID = id
return b
}
// SetOrganizationID of an account. Unique identifier (UUID) string - required for all accounts.
func (b *Builder) SetOrganizationID(id string) *Builder {
b.essential.OrganizationID = id
return b
}
// SetBankID - Local country bank identifier. Format depends on the country. Required for most countries.
func (b *Builder) SetBankID(bankID string) *Builder {
b.essential.BankID = bankID
return b
}
// SetBic - SWIFT BIC in either 8 or 11 character format e.g. 'NWBKGB22'
func (b *Builder) SetBic(bic string) *Builder {
b.essential.Bic = bic
return b
}
// SetIban - IBAN of the account. Will be calculated from other fields if not supplied.
func (b *Builder) SetIban(iban string) *Builder {
b.essential.Iban = iban
return b
}
// Validate checks set fields based on country code.
// Returns account object if no errors are generated during validation.
func (b *Builder) Validate() (*Account, error) {
b.validate.SetTagName(b.essential.Country)
if err := validateStruct(b.validate, b.essential); err != nil {
return nil, err
}
b.validate.SetTagName("validate")
if err := validateStruct(b.validate, b.essential); err != nil {
return nil, err
}
if err := validateStruct(b.validate, b.optional); err != nil {
return nil, err
}
return &Account{
id: b.essential.ID,
organizationID: b.essential.OrganizationID,
versionIndex: b.optional.VersionIndex,
country: b.essential.Country,
bankIDCode: b.essential.BankIDCode,
bankID: b.essential.BankID,
bic: b.essential.Bic,
iban: b.essential.Iban,
baseCurrency: b.optional.BaseCurrency,
accountNumber: b.optional.AccountNumber,
customerID: b.optional.CustomerID,
title: b.optional.Title,
firstName: b.optional.FirstName,
bankAccountName: b.optional.BankAccountName,
altBankAccountNames: b.optional.AltBankAccountNames,
accountClassification: b.optional.AccountClassification,
jointAccount: b.optional.JointAccount,
accountMatchingOptOut: b.optional.AccountMatchingOptOut,
secondaryIdentification: b.optional.SecondaryIdentification,
}, nil
}
// SetOptionalAttribute returns a list of methods for setting optional account attributes.
func (b *Builder) SetOptionalAttribute() OptionalAttributes {
return &optionalAttributes{
Builder: b,
}
}
// SetAccountNumber - A unique account number will automatically be generated if not provided.
func (opt *optionalAttributes) SetAccountNumber(accountNumber string) *Builder {
opt.Builder.optional.AccountNumber = accountNumber
return opt.Builder
}
// SetVersion - version number of account object. Needs to be incremented when Patching an existing account.
func (opt *optionalAttributes) SetVersion(version int) *Builder
|
// SetBaseCurrency - ISO 4217 code used to identify the base currency of the account, e.g. 'GBP', 'EUR'
// Provide currency unit object from golang text library.
func (opt *optionalAttributes) SetBaseCurrency(unit currency.Unit) *Builder {
opt.Builder.optional.BaseCurrency = unit.String()
return opt.Builder
}
// SetCustomerID - A free-format reference that can be used to link this account to an external system
func (opt *optionalAttributes) SetCustomerID(customerID string) *Builder {
opt.Builder.optional.CustomerID = customerID
return opt.Builder
}
// SetTitle - The account holder's title, e.g. Ms, Dr, Mr.
// Valid up to string[40]
func (opt *optionalAttributes) SetTitle(title string) *Builder {
opt.Builder.optional.Title = title
return opt.Builder
}
// SetFirstName - The account holder's first name.
// Valid up to string[40]
func (opt *optionalAttributes) SetFirstName(firstName string) *Builder {
opt.Builder.optional.FirstName = firstName
return opt.Builder
}
// SetBankAccountName - Primary
|
{
opt.Builder.optional.VersionIndex = version
return opt.Builder
}
|
identifier_body
|
builder.go
|
len=11"`
Iban string `AU:"len=0" CA:"len=0" HK:"len=0" US:"len=0"`
}
///////
// Chose not to add all attributes on the same builder level.
// Having a structure and separating optional attributes from essential ones lowers cognitive load for client user.
///////
optionalAttributes struct {
Builder *Builder
VersionIndex int
AccountNumber string
BaseCurrency string
CustomerID string
Title string `validate:"max=40"`
FirstName string `validate:"max=40"`
BankAccountName string `validate:"max=140"`
AltBankAccountNames []string `validate:"max=3,dive,max=140"`
AccountClassification string `validate:"eq=Personal|eq=Business"`
SecondaryIdentification string `validate:"max=140"`
JointAccount bool
AccountMatchingOptOut bool
}
// OptionalAttributes has a collection of methods to set optional account attributes
///////
// This interface enabled to hide public optional attributes struct fields which were required for validation.
///////
OptionalAttributes interface {
SetVersion(int) *Builder
SetAccountNumber(string) *Builder
SetBaseCurrency(currency.Unit) *Builder
SetCustomerID(string) *Builder
SetTitle(string) *Builder
SetFirstName(string) *Builder
SetBankAccountName(string) *Builder
SetAltBankAccountNames(...string) *Builder
SetAccountClassification(string) *Builder
SetJointAccount(bool) *Builder
SetAccountMatchingOptOut(bool) *Builder
SetSecondaryIdentification(string) *Builder
}
)
// NewBuilder creates account builder from provided Country.
// Country sets validation rules for created accounts.
func NewBuilder(country Country) *Builder {
return &Builder{
essential: &essentialAttributes{
Country: country.Code(),
BankIDCode: country.BankIDCode(),
},
optional: &optionalAttributes{
AccountClassification: "Personal",
},
validate: validator.New(),
}
}
// CastBuilderFrom creates an account builder from existing account object.
// This enables to modify, validate and create new account object.
///////
// This function would be essential for PATCH operation.
///////
func CastBuilderFrom(account *Account) *Builder {
return &Builder{
essential: &essentialAttributes{
ID: account.ID(),
OrganizationID: account.OrganizationID(),
Country: account.Country(),
BankIDCode: account.BankIDCode(),
BankID: account.BankID(),
Bic: account.Bic(),
Iban: account.Iban(),
},
optional: &optionalAttributes{
VersionIndex: account.Version(),
AccountNumber: account.AccountNumber(),
BaseCurrency: account.BaseCurrency(),
CustomerID: account.CustomerID(),
Title: account.Title(),
FirstName: account.FirstName(),
BankAccountName: account.BankAccountName(),
AltBankAccountNames: account.AltBankAccountNames(),
AccountClassification: account.AccountClassification(),
SecondaryIdentification: account.SecondaryIdentification(),
JointAccount: account.IsJointAccount(),
AccountMatchingOptOut: account.IsAccountMatchingOptOut(),
},
validate: validator.New(),
}
}
// SetID of an account. Unique identifier (UUID) string - required for all accounts.
func (b *Builder) SetID(id string) *Builder {
b.essential.ID = id
return b
}
// SetOrganizationID of an account. Unique identifier (UUID) string - required for all accounts.
func (b *Builder) SetOrganizationID(id string) *Builder {
b.essential.OrganizationID = id
return b
}
// SetBankID - Local country bank identifier. Format depends on the country. Required for most countries.
func (b *Builder) SetBankID(bankID string) *Builder {
b.essential.BankID = bankID
return b
}
// SetBic - SWIFT BIC in either 8 or 11 character format e.g. 'NWBKGB22'
func (b *Builder) SetBic(bic string) *Builder {
b.essential.Bic = bic
return b
}
// SetIban - IBAN of the account. Will be calculated from other fields if not supplied.
func (b *Builder) SetIban(iban string) *Builder {
b.essential.Iban = iban
return b
}
// Validate checks set fields based on country code.
// Returns account object if no errors are generated during validation.
func (b *Builder) Validate() (*Account, error) {
b.validate.SetTagName(b.essential.Country)
if err := validateStruct(b.validate, b.essential); err != nil {
return nil, err
}
b.validate.SetTagName("validate")
if err := validateStruct(b.validate, b.essential); err != nil {
return nil, err
}
if err := validateStruct(b.validate, b.optional); err != nil {
return nil, err
}
return &Account{
id: b.essential.ID,
organizationID: b.essential.OrganizationID,
versionIndex: b.optional.VersionIndex,
country: b.essential.Country,
bankIDCode: b.essential.BankIDCode,
bankID: b.essential.BankID,
bic: b.essential.Bic,
iban: b.essential.Iban,
baseCurrency: b.optional.BaseCurrency,
accountNumber: b.optional.AccountNumber,
customerID: b.optional.CustomerID,
title: b.optional.Title,
firstName: b.optional.FirstName,
bankAccountName: b.optional.BankAccountName,
altBankAccountNames: b.optional.AltBankAccountNames,
accountClassification: b.optional.AccountClassification,
jointAccount: b.optional.JointAccount,
accountMatchingOptOut: b.optional.AccountMatchingOptOut,
secondaryIdentification: b.optional.SecondaryIdentification,
}, nil
}
// SetOptionalAttribute returns a list of methods for setting optional account attributes.
func (b *Builder) SetOptionalAttribute() OptionalAttributes {
return &optionalAttributes{
Builder: b,
}
}
// SetAccountNumber - A unique account number will automatically be generated if not provided.
func (opt *optionalAttributes) SetAccountNumber(accountNumber string) *Builder {
opt.Builder.optional.AccountNumber = accountNumber
return opt.Builder
}
// SetVersion - version number of account object. Needs to be incremented when Patching an existing account.
func (opt *optionalAttributes) SetVersion(version int) *Builder {
opt.Builder.optional.VersionIndex = version
return opt.Builder
}
// SetBaseCurrency - ISO 4217 code used to identify the base currency of the account, e.g. 'GBP', 'EUR'
// Provide currency unit object from golang text library.
func (opt *optionalAttributes) SetBaseCurrency(unit currency.Unit) *Builder {
opt.Builder.optional.BaseCurrency = unit.String()
return opt.Builder
}
// SetCustomerID - A free-format reference that can be used to link this account to an external system
func (opt *optionalAttributes) SetCustomerID(customerID string) *Builder {
opt.Builder.optional.CustomerID = customerID
return opt.Builder
}
// SetTitle - The account holder's title, e.g. Ms, Dr, Mr.
// Valid up to string[40]
func (opt *optionalAttributes) SetTitle(title string) *Builder {
opt.Builder.optional.Title = title
return opt.Builder
}
// SetFirstName - The account holder's first name.
// Valid up to string[40]
func (opt *optionalAttributes) SetFirstName(firstName string) *Builder {
opt.Builder.optional.FirstName = firstName
return opt.Builder
}
// SetBankAccountName - Primary account name, used for Confirmation of Payee matching.
// Required if Confirmation of Payee is enabled for the organisation.
// Valid up to string[140]
func (opt *optionalAttributes) SetBankAccountName(accountName string) *Builder {
opt.Builder.optional.BankAccountName = accountName
return opt.Builder
}
// SetAltBankAccountNames - Up to 3 alternative account names, used for Confirmation of Payee matching.
// Each element valid up to string[140]
func (opt *optionalAttributes) SetAltBankAccountNames(names ...string) *Builder {
opt.Builder.optional.AltBankAccountNames = names
return opt.Builder
}
// SetAccountClassification - Classification of account. Can be either Personal or Business.
// Defaults to Personal.
func (opt *optionalAttributes) SetAccountClassification(accountClassification string) *Builder {
opt.Builder.optional.AccountClassification = accountClassification
return opt.Builder
}
// SetJointAccount - set to True if this is a joint account.
// Defaults to false.
func (opt *optionalAttributes) SetJointAccount(isJointAccount bool) *Builder {
opt.Builder.optional.JointAccount = isJointAccount
return opt.Builder
}
// SetAccountMatchingOptOut - set to True if the account has opted out of account matching, e.g. Confirmation of Payee.
// Defaults to false.
func (opt *optionalAttributes) SetAccountMatchingOptOut(isAccountMatching bool) *Builder {
opt.Builder.optional.AccountMatchingOptOut = isAccountMatching
return opt.Builder
|
}
|
random_line_split
|
|
builder.go
|
// Validation tags are based on country codes, hence it's easy to differentiate and
// also trivial to set the validator tag as country code is the core attribute of account builder.
//////
essentialAttributes struct {
ID string `validate:"uuid,required"`
OrganizationID string `validate:"uuid,required"`
Country string `validate:"eq=GB|eq=AU|eq=BE|eq=CA|eq=FR|eq=DE|eq=GR|eq=HK|eq=IT|eq=LU|eq=NL|eq=PL|eq=PT|eq=ES|eq=ES|eq=CH|eq=US"`
BankIDCode string
BankID string `GB:"len=6" BE:"len=3" FR:"len=10" DE:"len=8" GR:"len=7" IT:"len=10|len=11" LU:"len=3" NL:"len=0" PL:"len=8" PT:"len=8" ES:"len=8" CH:"len=5" US:"len=9"`
Bic string `GB:"len=8|len=11" AU:"len=8|len=11" CA:"len=8|len=11" HK:"len=8|len=11" NL:"len=8|len=11" US:"len=8|len=11"`
Iban string `AU:"len=0" CA:"len=0" HK:"len=0" US:"len=0"`
}
///////
// Chose not to add all attributes on the same builder level.
// Having a structure and separating optional attributes from essential ones lowers cognitive load for client user.
///////
optionalAttributes struct {
Builder *Builder
VersionIndex int
AccountNumber string
BaseCurrency string
CustomerID string
Title string `validate:"max=40"`
FirstName string `validate:"max=40"`
BankAccountName string `validate:"max=140"`
AltBankAccountNames []string `validate:"max=3,dive,max=140"`
AccountClassification string `validate:"eq=Personal|eq=Business"`
SecondaryIdentification string `validate:"max=140"`
JointAccount bool
AccountMatchingOptOut bool
}
// OptionalAttributes has a collection of methods to set optional account attributes
///////
// This interface enabled to hide public optional attributes struct fields which were required for validation.
///////
OptionalAttributes interface {
SetVersion(int) *Builder
SetAccountNumber(string) *Builder
SetBaseCurrency(currency.Unit) *Builder
SetCustomerID(string) *Builder
SetTitle(string) *Builder
SetFirstName(string) *Builder
SetBankAccountName(string) *Builder
SetAltBankAccountNames(...string) *Builder
SetAccountClassification(string) *Builder
SetJointAccount(bool) *Builder
SetAccountMatchingOptOut(bool) *Builder
SetSecondaryIdentification(string) *Builder
}
)
// NewBuilder creates account builder from provided Country.
// Country sets validation rules for created accounts.
func NewBuilder(country Country) *Builder {
return &Builder{
essential: &essentialAttributes{
Country: country.Code(),
BankIDCode: country.BankIDCode(),
},
optional: &optionalAttributes{
AccountClassification: "Personal",
},
validate: validator.New(),
}
}
// CastBuilderFrom creates an account builder from existing account object.
// This enables to modify, validate and create new account object.
///////
// This function would be essential for PATCH operation.
///////
func CastBuilderFrom(account *Account) *Builder {
return &Builder{
essential: &essentialAttributes{
ID: account.ID(),
OrganizationID: account.OrganizationID(),
Country: account.Country(),
BankIDCode: account.BankIDCode(),
BankID: account.BankID(),
Bic: account.Bic(),
Iban: account.Iban(),
},
optional: &optionalAttributes{
VersionIndex: account.Version(),
AccountNumber: account.AccountNumber(),
BaseCurrency: account.BaseCurrency(),
CustomerID: account.CustomerID(),
Title: account.Title(),
FirstName: account.FirstName(),
BankAccountName: account.BankAccountName(),
AltBankAccountNames: account.AltBankAccountNames(),
AccountClassification: account.AccountClassification(),
SecondaryIdentification: account.SecondaryIdentification(),
JointAccount: account.IsJointAccount(),
AccountMatchingOptOut: account.IsAccountMatchingOptOut(),
},
validate: validator.New(),
}
}
// SetID of an account. Unique identifier (UUID) string - required for all accounts.
func (b *Builder) SetID(id string) *Builder {
b.essential.ID = id
return b
}
// SetOrganizationID of an account. Unique identifier (UUID) string - required for all accounts.
func (b *Builder) SetOrganizationID(id string) *Builder {
b.essential.OrganizationID = id
return b
}
// SetBankID - Local country bank identifier. Format depends on the country. Required for most countries.
func (b *Builder) SetBankID(bankID string) *Builder {
b.essential.BankID = bankID
return b
}
// SetBic - SWIFT BIC in either 8 or 11 character format e.g. 'NWBKGB22'
func (b *Builder) SetBic(bic string) *Builder {
b.essential.Bic = bic
return b
}
// SetIban - IBAN of the account. Will be calculated from other fields if not supplied.
func (b *Builder) SetIban(iban string) *Builder {
b.essential.Iban = iban
return b
}
// Validate checks set fields based on country code.
// Returns account object if no errors are generated during validation.
func (b *Builder) Validate() (*Account, error) {
b.validate.SetTagName(b.essential.Country)
if err := validateStruct(b.validate, b.essential); err != nil {
return nil, err
}
b.validate.SetTagName("validate")
if err := validateStruct(b.validate, b.essential); err != nil {
return nil, err
}
if err := validateStruct(b.validate, b.optional); err != nil
|
return &Account{
id: b.essential.ID,
organizationID: b.essential.OrganizationID,
versionIndex: b.optional.VersionIndex,
country: b.essential.Country,
bankIDCode: b.essential.BankIDCode,
bankID: b.essential.BankID,
bic: b.essential.Bic,
iban: b.essential.Iban,
baseCurrency: b.optional.BaseCurrency,
accountNumber: b.optional.AccountNumber,
customerID: b.optional.CustomerID,
title: b.optional.Title,
firstName: b.optional.FirstName,
bankAccountName: b.optional.BankAccountName,
altBankAccountNames: b.optional.AltBankAccountNames,
accountClassification: b.optional.AccountClassification,
jointAccount: b.optional.JointAccount,
accountMatchingOptOut: b.optional.AccountMatchingOptOut,
secondaryIdentification: b.optional.SecondaryIdentification,
}, nil
}
// SetOptionalAttribute returns a list of methods for setting optional account attributes.
func (b *Builder) SetOptionalAttribute() OptionalAttributes {
return &optionalAttributes{
Builder: b,
}
}
// SetAccountNumber - A unique account number will automatically be generated if not provided.
func (opt *optionalAttributes) SetAccountNumber(accountNumber string) *Builder {
opt.Builder.optional.AccountNumber = accountNumber
return opt.Builder
}
// SetVersion - version number of account object. Needs to be incremented when Patching an existing account.
func (opt *optionalAttributes) SetVersion(version int) *Builder {
opt.Builder.optional.VersionIndex = version
return opt.Builder
}
// SetBaseCurrency - ISO 4217 code used to identify the base currency of the account, e.g. 'GBP', 'EUR'
// Provide currency unit object from golang text library.
func (opt *optionalAttributes) SetBaseCurrency(unit currency.Unit) *Builder {
opt.Builder.optional.BaseCurrency = unit.String()
return opt.Builder
}
// SetCustomerID - A free-format reference that can be used to link this account to an external system
func (opt *optionalAttributes) SetCustomerID(customerID string) *Builder {
opt.Builder.optional.CustomerID = customerID
return opt.Builder
}
// SetTitle - The account holder's title, e.g. Ms, Dr, Mr.
// Valid up to string[40]
func (opt *optionalAttributes) SetTitle(title string) *Builder {
opt.Builder.optional.Title = title
return opt.Builder
}
// SetFirstName - The account holder's first name.
// Valid up to string[40]
func (opt *optionalAttributes) SetFirstName(firstName string) *Builder {
opt.Builder.optional.FirstName = firstName
return opt.Builder
}
// SetBankAccountName - Primary
|
{
return nil, err
}
|
conditional_block
|
builder.go
|
// Validation tags are based on country codes, hence it's easy to differentiate and
// also trivial to set the validator tag as country code is the core attribute of account builder.
//////
essentialAttributes struct {
ID string `validate:"uuid,required"`
OrganizationID string `validate:"uuid,required"`
Country string `validate:"eq=GB|eq=AU|eq=BE|eq=CA|eq=FR|eq=DE|eq=GR|eq=HK|eq=IT|eq=LU|eq=NL|eq=PL|eq=PT|eq=ES|eq=ES|eq=CH|eq=US"`
BankIDCode string
BankID string `GB:"len=6" BE:"len=3" FR:"len=10" DE:"len=8" GR:"len=7" IT:"len=10|len=11" LU:"len=3" NL:"len=0" PL:"len=8" PT:"len=8" ES:"len=8" CH:"len=5" US:"len=9"`
Bic string `GB:"len=8|len=11" AU:"len=8|len=11" CA:"len=8|len=11" HK:"len=8|len=11" NL:"len=8|len=11" US:"len=8|len=11"`
Iban string `AU:"len=0" CA:"len=0" HK:"len=0" US:"len=0"`
}
///////
// Chose not to add all attributes on the same builder level.
// Having a structure and separating optional attributes from essential ones lowers cognitive load for client user.
///////
optionalAttributes struct {
Builder *Builder
VersionIndex int
AccountNumber string
BaseCurrency string
CustomerID string
Title string `validate:"max=40"`
FirstName string `validate:"max=40"`
BankAccountName string `validate:"max=140"`
AltBankAccountNames []string `validate:"max=3,dive,max=140"`
AccountClassification string `validate:"eq=Personal|eq=Business"`
SecondaryIdentification string `validate:"max=140"`
JointAccount bool
AccountMatchingOptOut bool
}
// OptionalAttributes has a collection of methods to set optional account attributes
///////
// This interface enabled to hide public optional attributes struct fields which were required for validation.
///////
OptionalAttributes interface {
SetVersion(int) *Builder
SetAccountNumber(string) *Builder
SetBaseCurrency(currency.Unit) *Builder
SetCustomerID(string) *Builder
SetTitle(string) *Builder
SetFirstName(string) *Builder
SetBankAccountName(string) *Builder
SetAltBankAccountNames(...string) *Builder
SetAccountClassification(string) *Builder
SetJointAccount(bool) *Builder
SetAccountMatchingOptOut(bool) *Builder
SetSecondaryIdentification(string) *Builder
}
)
// NewBuilder creates account builder from provided Country.
// Country sets validation rules for created accounts.
func NewBuilder(country Country) *Builder {
return &Builder{
essential: &essentialAttributes{
Country: country.Code(),
BankIDCode: country.BankIDCode(),
},
optional: &optionalAttributes{
AccountClassification: "Personal",
},
validate: validator.New(),
}
}
// CastBuilderFrom creates an account builder from existing account object.
// This enables to modify, validate and create new account object.
///////
// This function would be essential for PATCH operation.
///////
func CastBuilderFrom(account *Account) *Builder {
return &Builder{
essential: &essentialAttributes{
ID: account.ID(),
OrganizationID: account.OrganizationID(),
Country: account.Country(),
BankIDCode: account.BankIDCode(),
BankID: account.BankID(),
Bic: account.Bic(),
Iban: account.Iban(),
},
optional: &optionalAttributes{
VersionIndex: account.Version(),
AccountNumber: account.AccountNumber(),
BaseCurrency: account.BaseCurrency(),
CustomerID: account.CustomerID(),
Title: account.Title(),
FirstName: account.FirstName(),
BankAccountName: account.BankAccountName(),
AltBankAccountNames: account.AltBankAccountNames(),
AccountClassification: account.AccountClassification(),
SecondaryIdentification: account.SecondaryIdentification(),
JointAccount: account.IsJointAccount(),
AccountMatchingOptOut: account.IsAccountMatchingOptOut(),
},
validate: validator.New(),
}
}
// SetID of an account. Unique identifier (UUID) string - required for all accounts.
func (b *Builder) SetID(id string) *Builder {
b.essential.ID = id
return b
}
// SetOrganizationID of an account. Unique identifier (UUID) string - required for all accounts.
func (b *Builder) SetOrganizationID(id string) *Builder {
b.essential.OrganizationID = id
return b
}
// SetBankID - Local country bank identifier. Format depends on the country. Required for most countries.
func (b *Builder) SetBankID(bankID string) *Builder {
b.essential.BankID = bankID
return b
}
// SetBic - SWIFT BIC in either 8 or 11 character format e.g. 'NWBKGB22'
func (b *Builder) SetBic(bic string) *Builder {
b.essential.Bic = bic
return b
}
// SetIban - IBAN of the account. Will be calculated from other fields if not supplied.
func (b *Builder) SetIban(iban string) *Builder {
b.essential.Iban = iban
return b
}
// Validate checks set fields based on country code.
// Returns account object if no errors are generated during validation.
func (b *Builder) Validate() (*Account, error) {
b.validate.SetTagName(b.essential.Country)
if err := validateStruct(b.validate, b.essential); err != nil {
return nil, err
}
b.validate.SetTagName("validate")
if err := validateStruct(b.validate, b.essential); err != nil {
return nil, err
}
if err := validateStruct(b.validate, b.optional); err != nil {
return nil, err
}
return &Account{
id: b.essential.ID,
organizationID: b.essential.OrganizationID,
versionIndex: b.optional.VersionIndex,
country: b.essential.Country,
bankIDCode: b.essential.BankIDCode,
bankID: b.essential.BankID,
bic: b.essential.Bic,
iban: b.essential.Iban,
baseCurrency: b.optional.BaseCurrency,
accountNumber: b.optional.AccountNumber,
customerID: b.optional.CustomerID,
title: b.optional.Title,
firstName: b.optional.FirstName,
bankAccountName: b.optional.BankAccountName,
altBankAccountNames: b.optional.AltBankAccountNames,
accountClassification: b.optional.AccountClassification,
jointAccount: b.optional.JointAccount,
accountMatchingOptOut: b.optional.AccountMatchingOptOut,
secondaryIdentification: b.optional.SecondaryIdentification,
}, nil
}
// SetOptionalAttribute returns a list of methods for setting optional account attributes.
func (b *Builder) SetOptionalAttribute() OptionalAttributes {
return &optionalAttributes{
Builder: b,
}
}
// SetAccountNumber - A unique account number will automatically be generated if not provided.
func (opt *optionalAttributes)
|
(accountNumber string) *Builder {
opt.Builder.optional.AccountNumber = accountNumber
return opt.Builder
}
// SetVersion - version number of account object. Needs to be incremented when Patching an existing account.
func (opt *optionalAttributes) SetVersion(version int) *Builder {
opt.Builder.optional.VersionIndex = version
return opt.Builder
}
// SetBaseCurrency - ISO 4217 code used to identify the base currency of the account, e.g. 'GBP', 'EUR'
// Provide currency unit object from golang text library.
func (opt *optionalAttributes) SetBaseCurrency(unit currency.Unit) *Builder {
opt.Builder.optional.BaseCurrency = unit.String()
return opt.Builder
}
// SetCustomerID - A free-format reference that can be used to link this account to an external system
func (opt *optionalAttributes) SetCustomerID(customerID string) *Builder {
opt.Builder.optional.CustomerID = customerID
return opt.Builder
}
// SetTitle - The account holder's title, e.g. Ms, Dr, Mr.
// Valid up to string[40]
func (opt *optionalAttributes) SetTitle(title string) *Builder {
opt.Builder.optional.Title = title
return opt.Builder
}
// SetFirstName - The account holder's first name.
// Valid up to string[40]
func (opt *optionalAttributes) SetFirstName(firstName string) *Builder {
opt.Builder.optional.FirstName = firstName
return opt.Builder
}
// SetBankAccountName - Primary
|
SetAccountNumber
|
identifier_name
|
mod.rs
|
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let size = data.get_size() as gl::types::GLsizeiptr;
let raw = data.get_address() as *const gl::types::GLvoid;
let usage = match usage {
super::UsageStatic => gl::STATIC_DRAW,
super::UsageDynamic => gl::DYNAMIC_DRAW,
super::UsageStream => gl::STREAM_DRAW,
};
unsafe {
gl::BufferData(gl::ARRAY_BUFFER, size, raw, usage);
}
}
fn process(&mut self, request: super::CastRequest) {
match request {
super::Clear(data) => {
let mut flags = match data.color {
//gl::ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE);
Some(super::target::Color([r,g,b,a])) => {
gl::ClearColor(r, g, b, a);
gl::COLOR_BUFFER_BIT
},
None => 0 as gl::types::GLenum
};
data.depth.map(|value| {
gl::DepthMask(gl::TRUE);
gl::ClearDepth(value as gl::types::GLclampd);
flags |= gl::DEPTH_BUFFER_BIT;
});
data.stencil.map(|value| {
gl::StencilMask(-1);
gl::ClearStencil(value as gl::types::GLint);
flags |= gl::STENCIL_BUFFER_BIT;
});
gl::Clear(flags);
},
super::BindProgram(program) => {
gl::UseProgram(program);
},
super::BindArrayBuffer(array_buffer) => {
if self.caps.array_buffer_supported {
gl::BindVertexArray(array_buffer);
} else {
error!("Ignored unsupported GL Request: {}", request)
}
},
super::BindAttribute(slot, buffer, count, el_type, stride, offset) => {
let gl_type = match el_type {
a::Int(_, a::U8, a::Unsigned) => gl::UNSIGNED_BYTE,
a::Int(_, a::U8, a::Signed) => gl::BYTE,
a::Int(_, a::U16, a::Unsigned) => gl::UNSIGNED_SHORT,
a::Int(_, a::U16, a::Signed) => gl::SHORT,
a::Int(_, a::U32, a::Unsigned) => gl::UNSIGNED_INT,
a::Int(_, a::U32, a::Signed) => gl::INT,
a::Float(_, a::F16) => gl::HALF_FLOAT,
a::Float(_, a::F32) => gl::FLOAT,
a::Float(_, a::F64) => gl::DOUBLE,
_ => {
error!("Unsupported element type: {}", el_type);
return
}
};
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let offset = offset as *const gl::types::GLvoid;
match el_type {
a::Int(a::IntRaw, _, _) => unsafe {
gl::VertexAttribIPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type,
stride as gl::types::GLint, offset);
},
a::Int(a::IntNormalized, _, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::TRUE,
stride as gl::types::GLint, offset);
},
a::Int(a::IntAsFloat, _, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::FALSE,
stride as gl::types::GLint, offset);
},
a::Float(a::FloatDefault, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::FALSE,
stride as gl::types::GLint, offset);
},
a::Float(a::FloatPrecision, _) => unsafe {
gl::VertexAttribLPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type,
stride as gl::types::GLint, offset);
},
_ => ()
}
gl::EnableVertexAttribArray(slot as gl::types::GLuint);
},
super::BindIndex(buffer) => {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, buffer);
},
super::BindFrameBuffer(frame_buffer) => {
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, frame_buffer);
},
super::BindTarget(target, plane) => {
let attachment = match target {
super::target::TargetColor(index) =>
gl::COLOR_ATTACHMENT0 + (index as gl::types::GLenum),
super::target::TargetDepth => gl::DEPTH_ATTACHMENT,
super::target::TargetStencil => gl::STENCIL_ATTACHMENT,
super::target::TargetDepthStencil => gl::DEPTH_STENCIL_ATTACHMENT,
};
match plane {
super::target::PlaneEmpty => gl::FramebufferRenderbuffer
(gl::DRAW_FRAMEBUFFER, attachment, gl::RENDERBUFFER, 0),
super::target::PlaneSurface(name) => gl::FramebufferRenderbuffer
(gl::DRAW_FRAMEBUFFER, attachment, gl::RENDERBUFFER, name),
super::target::PlaneTexture(tex, level) => gl::FramebufferTexture
(gl::DRAW_FRAMEBUFFER, attachment, tex.name, level as gl::types::GLint),
super::target::PlaneTextureLayer(tex, level, layer) => gl::FramebufferTextureLayer
(gl::DRAW_FRAMEBUFFER, attachment, tex.name, level as gl::types::GLint, layer as gl::types::GLint),
}
},
super::BindUniformBlock(program, index, loc, buffer) => {
gl::UniformBlockBinding(program, index as gl::types::GLuint, loc as gl::types::GLuint);
gl::BindBufferBase(gl::UNIFORM_BUFFER, loc as gl::types::GLuint, buffer);
},
super::BindUniform(loc, uniform) => {
shade::bind_uniform(loc as gl::types::GLint, uniform);
},
super::BindTexture(loc, tex, sam) => {
tex::bind_texture(loc as gl::types::GLuint, tex, sam, self);
},
super::SetPrimitiveState(prim) => {
rast::bind_primitive(prim);
},
super::SetDepthStencilState(depth, stencil, cull) => {
rast::bind_stencil(stencil, cull);
rast::bind_depth(depth);
},
super::SetBlendState(blend) => {
rast::bind_blend(blend);
},
super::UpdateBuffer(buffer, data) => {
self.update_buffer(buffer, data, super::UsageDynamic);
},
super::UpdateTexture(tex, image_info, data) => {
tex::update_texture(tex, image_info, data);
},
super::Draw(start, count) => {
gl::DrawArrays(gl::TRIANGLES,
start as gl::types::GLsizei,
count as gl::types::GLsizei);
self.check();
},
super::DrawIndexed(start, count) => {
let offset = start * (std::mem::size_of::<u16>() as u16);
unsafe {
gl::DrawElements(gl::TRIANGLES,
count as gl::types::GLsizei,
gl::UNSIGNED_SHORT,
offset as *const gl::types::GLvoid);
}
self.check();
},
}
}
}
#[cfg(test)]
mod tests {
use super::Version;
#[test]
fn test_version_parse()
|
{
assert_eq!(Version::parse("1"), Err("1"));
assert_eq!(Version::parse("1."), Err("1."));
assert_eq!(Version::parse("1 h3l1o. W0rld"), Err("1 h3l1o. W0rld"));
assert_eq!(Version::parse("1. h3l1o. W0rld"), Err("1. h3l1o. W0rld"));
assert_eq!(Version::parse("1.2.3"), Ok(Version(1, 2, Some(3), "")));
assert_eq!(Version::parse("1.2"), Ok(Version(1, 2, None, "")));
assert_eq!(Version::parse("1.2 h3l1o. W0rld"), Ok(Version(1, 2, None, "h3l1o. W0rld")));
assert_eq!(Version::parse("1.2.h3l1o. W0rld"), Ok(Version(1, 2, None, "W0rld")));
assert_eq!(Version::parse("1.2. h3l1o. W0rld"), Ok(Version(1, 2, None, "h3l1o. W0rld")));
assert_eq!(Version::parse("1.2.3.h3l1o. W0rld"), Ok(Version(1, 2, Some(3), "W0rld")));
assert_eq!(Version::parse("1.2.3 h3l1o. W0rld"), Ok(Version(1, 2, Some(3), "h3l1o. W0rld")));
}
|
identifier_body
|
|
mod.rs
|
deriving(Eq, PartialEq, Show)]
pub struct PlatformName {
/// The company responsible for the OpenGL implementation
pub vendor: &'static str,
/// The name of the renderer
pub renderer: &'static str,
}
impl PlatformName {
fn get() -> PlatformName {
PlatformName {
vendor: get_string(gl::VENDOR),
renderer: get_string(gl::RENDERER),
}
}
}
/// OpenGL implementation information
#[deriving(Show)]
pub struct Info {
/// The platform identifier
pub platform_name: PlatformName,
/// The OpenGL API vesion number
pub version: Version,
/// The GLSL vesion number
pub shading_language: Version,
/// The extensions supported by the implementation
pub extensions: HashSet<&'static str>,
}
impl Info {
fn get() -> Info {
let info = {
let platform_name = PlatformName::get();
let version = Version::parse(get_string(gl::VERSION)).unwrap();
let shading_language = Version::parse(get_string(gl::SHADING_LANGUAGE_VERSION)).unwrap();
let extensions = if version >= Version(3, 2, None, "") {
let num_exts = get_uint(gl::NUM_EXTENSIONS) as gl::types::GLuint;
range(0, num_exts).map(|i| {
unsafe {
str::raw::c_str_to_static_slice(
gl::GetStringi(gl::EXTENSIONS, i) as *const i8,
)
}
}).collect()
} else {
// Fallback
get_string(gl::EXTENSIONS).split(' ').collect()
};
Info {
platform_name: platform_name,
version: version,
shading_language: shading_language,
extensions: extensions,
}
};
info!("Vendor: {}", info.platform_name.vendor);
info!("Renderer: {}", info.platform_name.renderer);
info!("Version: {}", info.version);
info!("Shading Language: {}", info.shading_language);
info!("Loaded Extensions:")
for extension in info.extensions.iter() {
info!("- {}", *extension);
}
info
}
/// Returns `true` if the implementation supports the extension
pub fn is_extension_supported(&self, s: &str) -> bool {
self.extensions.contains_equiv(&s)
}
}
#[deriving(Eq, PartialEq, Show)]
pub enum ErrorType {
InvalidEnum,
InvalidValue,
InvalidOperation,
InvalidFramebufferOperation,
OutOfMemory,
UnknownError,
}
/// An OpenGL back-end with GLSL shaders
pub struct GlBackEnd {
caps: super::Capabilities,
info: Info,
make_texture: fn(::tex::TextureInfo) -> Texture,
/// Maps (by the index) from texture name to TextureInfo, so we can look up what texture target
/// to bind this texture to later. Yuck!
// Doesn't use a SmallIntMap to avoid the overhead of Option
samplers: Vec<::tex::SamplerInfo>,
}
impl GlBackEnd {
/// Load OpenGL symbols and detect driver information
pub fn new(provider: &super::GlProvider) -> GlBackEnd {
gl::load_with(|s| provider.get_proc_address(s));
let info = Info::get();
let caps = super::Capabilities {
shader_model: shade::get_model(),
max_draw_buffers: get_uint(gl::MAX_DRAW_BUFFERS),
max_texture_size: get_uint(gl::MAX_TEXTURE_SIZE),
max_vertex_attributes: get_uint(gl::MAX_VERTEX_ATTRIBS),
uniform_block_supported: info.version >= Version(3, 1, None, "")
|| info.is_extension_supported("GL_ARB_uniform_buffer_object"),
array_buffer_supported: info.version >= Version(3, 0, None, "")
|| info.is_extension_supported("GL_ARB_vertex_array_object"),
immutable_storage_supported: info.version >= Version(4, 2, None, "")
|| info.is_extension_supported("GL_ARB_texture_storage"),
sampler_objects_supported: info.version >= Version(3, 3, None, "")
|| info.is_extension_supported("GL_ARB_sampler_objects"),
};
GlBackEnd {
caps: caps,
info: info,
make_texture: if caps.immutable_storage_supported {
tex::make_with_storage
} else {
tex::make_without_storage
},
samplers: Vec::new(),
}
}
#[allow(dead_code)]
fn get_error(&mut self) -> Result<(), ErrorType> {
match gl::GetError() {
gl::NO_ERROR => Ok(()),
gl::INVALID_ENUM => Err(InvalidEnum),
gl::INVALID_VALUE => Err(InvalidValue),
gl::INVALID_OPERATION => Err(InvalidOperation),
gl::INVALID_FRAMEBUFFER_OPERATION => Err(InvalidFramebufferOperation),
gl::OUT_OF_MEMORY => Err(OutOfMemory),
_ => Err(UnknownError),
}
}
/// Fails during a debug build if the implementation's error flag was set.
#[allow(dead_code)]
fn check(&mut self) {
debug_assert_eq!(self.get_error(), Ok(()));
}
/// Get the OpenGL-specific driver information
pub fn get_info<'a>(&'a self) -> &'a Info {
&self.info
}
}
impl super::ApiBackEnd for GlBackEnd {
fn get_capabilities<'a>(&'a self) -> &'a super::Capabilities {
&self.caps
}
fn create_buffer(&mut self) -> Buffer {
let mut name = 0 as Buffer;
unsafe {
gl::GenBuffers(1, &mut name);
}
info!("\tCreated buffer {}", name);
name
}
fn create_array_buffer(&mut self) -> Result<ArrayBuffer, ()> {
if self.caps.array_buffer_supported {
let mut name = 0 as ArrayBuffer;
unsafe {
gl::GenVertexArrays(1, &mut name);
}
info!("\tCreated array buffer {}", name);
Ok(name)
} else {
error!("\tarray buffer creation unsupported, ignored")
Err(())
}
}
fn create_shader(&mut self, stage: super::shade::Stage, code: super::shade::ShaderSource) -> Result<Shader, super::shade::CreateShaderError> {
let (name, info) = shade::create_shader(stage, code, self.get_capabilities().shader_model);
info.map(|info| {
let level = if name.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tShader compile log: {}", info);
});
name
}
fn create_program(&mut self, shaders: &[Shader]) -> Result<super::shade::ProgramMeta, ()> {
let (meta, info) = shade::create_program(&self.caps, shaders);
info.map(|info| {
let level = if meta.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tProgram link log: {}", info);
});
meta
}
fn create_frame_buffer(&mut self) -> FrameBuffer {
let mut name = 0 as FrameBuffer;
unsafe {
gl::GenFramebuffers(1, &mut name);
}
info!("\tCreated frame buffer {}", name);
name
}
fn create_texture(&mut self, info: ::tex::TextureInfo) -> Texture {
(self.make_texture)(info)
}
fn create_sampler(&mut self, info: ::tex::SamplerInfo) -> Sampler {
if self.caps.sampler_objects_supported {
tex::make_sampler(info)
} else {
self.samplers.push(info);
self.samplers.len() as Sampler - 1
}
}
fn update_buffer(&mut self, buffer: Buffer, data: &super::Blob, usage: super::BufferUsage) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let size = data.get_size() as gl::types::GLsizeiptr;
let raw = data.get_address() as *const gl::types::GLvoid;
let usage = match usage {
super::UsageStatic => gl::STATIC_DRAW,
super::UsageDynamic => gl::DYNAMIC_DRAW,
super::UsageStream => gl::STREAM_DRAW,
};
unsafe {
gl::BufferData(gl::ARRAY_BUFFER, size, raw, usage);
}
}
fn process(&mut self, request: super::CastRequest) {
match request {
super::Clear(data) => {
let mut flags = match data.color {
//gl::ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE);
Some(super::target::Color([r,g,b,a])) => {
gl::ClearColor(r, g, b, a);
gl::COLOR_BUFFER_BIT
},
None => 0 as gl::types::GLenum
};
data.depth.map(|value| {
gl::DepthMask(gl::TRUE);
gl::ClearDepth(value as gl::types::GLclampd);
flags |= gl::DEPTH_BUFFER_BIT;
});
data.stencil.map(|value| {
gl::StencilMask(-1);
gl::ClearStencil(value as gl::types::GLint);
flags |= gl::STENCIL_BUFFER_BIT;
});
gl::Clear(flags);
},
super::BindProgram(program) => {
gl::UseProgram(program);
},
super::BindArrayBuffer(array_buffer) => {
if self.caps.array_buffer_supported {
gl::BindVertexArray(array_buffer);
|
} else {
|
random_line_split
|
|
mod.rs
|
(src: &'static str) -> Result<Version, &'static str> {
let (version, vendor_info) = match src.find(' ') {
Some(i) => (src.slice_to(i), src.slice_from(i + 1)),
None => (src, ""),
};
// TODO: make this even more lenient so that we can also accept
// `<major> "." <minor> [<???>]`
let mut it = version.split('.');
let major = it.next().and_then(from_str);
let minor = it.next().and_then(from_str);
let revision = it.next().and_then(from_str);
match (major, minor, revision) {
(Some(major), Some(minor), revision) =>
Ok(Version(major, minor, revision, vendor_info)),
(_, _, _) => Err(src),
}
}
}
impl fmt::Show for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Version(major, minor, Some(revision), "") =>
write!(f, "Version({}.{}.{})", major, minor, revision),
Version(major, minor, None, "") =>
write!(f, "Version({}.{})", major, minor),
Version(major, minor, Some(revision), vendor_info) =>
write!(f, "Version({}.{}.{}, {})", major, minor, revision, vendor_info),
Version(major, minor, None, vendor_info) =>
write!(f, "Version({}.{}, {})", major, minor, vendor_info),
}
}
}
/// A unique platform identifier that does not change between releases
#[deriving(Eq, PartialEq, Show)]
pub struct PlatformName {
/// The company responsible for the OpenGL implementation
pub vendor: &'static str,
/// The name of the renderer
pub renderer: &'static str,
}
impl PlatformName {
fn get() -> PlatformName {
PlatformName {
vendor: get_string(gl::VENDOR),
renderer: get_string(gl::RENDERER),
}
}
}
/// OpenGL implementation information
#[deriving(Show)]
pub struct Info {
/// The platform identifier
pub platform_name: PlatformName,
/// The OpenGL API vesion number
pub version: Version,
/// The GLSL vesion number
pub shading_language: Version,
/// The extensions supported by the implementation
pub extensions: HashSet<&'static str>,
}
impl Info {
fn get() -> Info {
let info = {
let platform_name = PlatformName::get();
let version = Version::parse(get_string(gl::VERSION)).unwrap();
let shading_language = Version::parse(get_string(gl::SHADING_LANGUAGE_VERSION)).unwrap();
let extensions = if version >= Version(3, 2, None, "") {
let num_exts = get_uint(gl::NUM_EXTENSIONS) as gl::types::GLuint;
range(0, num_exts).map(|i| {
unsafe {
str::raw::c_str_to_static_slice(
gl::GetStringi(gl::EXTENSIONS, i) as *const i8,
)
}
}).collect()
} else {
// Fallback
get_string(gl::EXTENSIONS).split(' ').collect()
};
Info {
platform_name: platform_name,
version: version,
shading_language: shading_language,
extensions: extensions,
}
};
info!("Vendor: {}", info.platform_name.vendor);
info!("Renderer: {}", info.platform_name.renderer);
info!("Version: {}", info.version);
info!("Shading Language: {}", info.shading_language);
info!("Loaded Extensions:")
for extension in info.extensions.iter() {
info!("- {}", *extension);
}
info
}
/// Returns `true` if the implementation supports the extension
pub fn is_extension_supported(&self, s: &str) -> bool {
self.extensions.contains_equiv(&s)
}
}
#[deriving(Eq, PartialEq, Show)]
pub enum ErrorType {
InvalidEnum,
InvalidValue,
InvalidOperation,
InvalidFramebufferOperation,
OutOfMemory,
UnknownError,
}
/// An OpenGL back-end with GLSL shaders
pub struct GlBackEnd {
caps: super::Capabilities,
info: Info,
make_texture: fn(::tex::TextureInfo) -> Texture,
/// Maps (by the index) from texture name to TextureInfo, so we can look up what texture target
/// to bind this texture to later. Yuck!
// Doesn't use a SmallIntMap to avoid the overhead of Option
samplers: Vec<::tex::SamplerInfo>,
}
impl GlBackEnd {
/// Load OpenGL symbols and detect driver information
pub fn new(provider: &super::GlProvider) -> GlBackEnd {
gl::load_with(|s| provider.get_proc_address(s));
let info = Info::get();
let caps = super::Capabilities {
shader_model: shade::get_model(),
max_draw_buffers: get_uint(gl::MAX_DRAW_BUFFERS),
max_texture_size: get_uint(gl::MAX_TEXTURE_SIZE),
max_vertex_attributes: get_uint(gl::MAX_VERTEX_ATTRIBS),
uniform_block_supported: info.version >= Version(3, 1, None, "")
|| info.is_extension_supported("GL_ARB_uniform_buffer_object"),
array_buffer_supported: info.version >= Version(3, 0, None, "")
|| info.is_extension_supported("GL_ARB_vertex_array_object"),
immutable_storage_supported: info.version >= Version(4, 2, None, "")
|| info.is_extension_supported("GL_ARB_texture_storage"),
sampler_objects_supported: info.version >= Version(3, 3, None, "")
|| info.is_extension_supported("GL_ARB_sampler_objects"),
};
GlBackEnd {
caps: caps,
info: info,
make_texture: if caps.immutable_storage_supported {
tex::make_with_storage
} else {
tex::make_without_storage
},
samplers: Vec::new(),
}
}
#[allow(dead_code)]
fn get_error(&mut self) -> Result<(), ErrorType> {
match gl::GetError() {
gl::NO_ERROR => Ok(()),
gl::INVALID_ENUM => Err(InvalidEnum),
gl::INVALID_VALUE => Err(InvalidValue),
gl::INVALID_OPERATION => Err(InvalidOperation),
gl::INVALID_FRAMEBUFFER_OPERATION => Err(InvalidFramebufferOperation),
gl::OUT_OF_MEMORY => Err(OutOfMemory),
_ => Err(UnknownError),
}
}
/// Fails during a debug build if the implementation's error flag was set.
#[allow(dead_code)]
fn check(&mut self) {
debug_assert_eq!(self.get_error(), Ok(()));
}
/// Get the OpenGL-specific driver information
pub fn get_info<'a>(&'a self) -> &'a Info {
&self.info
}
}
impl super::ApiBackEnd for GlBackEnd {
fn get_capabilities<'a>(&'a self) -> &'a super::Capabilities {
&self.caps
}
fn create_buffer(&mut self) -> Buffer {
let mut name = 0 as Buffer;
unsafe {
gl::GenBuffers(1, &mut name);
}
info!("\tCreated buffer {}", name);
name
}
fn create_array_buffer(&mut self) -> Result<ArrayBuffer, ()> {
if self.caps.array_buffer_supported {
let mut name = 0 as ArrayBuffer;
unsafe {
gl::GenVertexArrays(1, &mut name);
}
info!("\tCreated array buffer {}", name);
Ok(name)
} else {
error!("\tarray buffer creation unsupported, ignored")
Err(())
}
}
fn create_shader(&mut self, stage: super::shade::Stage, code: super::shade::ShaderSource) -> Result<Shader, super::shade::CreateShaderError> {
let (name, info) = shade::create_shader(stage, code, self.get_capabilities().shader_model);
info.map(|info| {
let level = if name.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tShader compile log: {}", info);
});
name
}
fn create_program(&mut self, shaders: &[Shader]) -> Result<super::shade::ProgramMeta, ()> {
let (meta, info) = shade::create_program(&self.caps, shaders);
info.map(|info| {
let level = if meta.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tProgram link log: {}", info);
});
meta
}
fn create_frame_buffer(&mut self) -> FrameBuffer {
let mut name = 0 as FrameBuffer;
unsafe {
gl::GenFramebuffers(1, &mut name);
}
info!("\tCreated frame buffer {}", name);
name
}
fn create_texture(&mut self, info: ::tex::TextureInfo) -> Texture {
(self.make_texture)(info)
}
fn create_sampler(&mut self, info: ::tex::SamplerInfo) -> Sampler {
if self.caps.sampler_objects_supported {
tex::make_sampler(info)
} else {
self.samplers.push(info);
self.samplers.len() as Sampler - 1
}
}
fn update_buffer(&mut self, buffer: Buffer, data: &super::Blob, usage: super::BufferUsage) {
gl::Bind
|
parse
|
identifier_name
|
|
xform_anno.py
|
mers (chris.sommers@keysight.com)
#
from __future__ import print_function
import p4.config.p4info_pb2 as p4info_pb2
import argparse
import sys
import google.protobuf.json_format as json_format
import google.protobuf.text_format as text_format
import textwrap
# Conditionally print a verbose message
def log_verbose(msg):
if verbose:
print(msg, file=sys.stderr)
# Set document.brief
def set_doc_brief(doc, value):
doc.brief = value;
# Set document.description
def set_doc_description(doc, value):
doc.description = value;
# Extract the string value embedded in an annotation
# Asssumes just one string, surrounded by escaped quotes e.g. "\"string\""
def get_anno_value(anno):
return anno.split('(\"')[1].split('\")')[0]
# Detect @brief() and @description() annotations and transform into document.brief, document.description
def xform_doc_annotation(container_name, doc, anno_list, anno):
if '@brief' in anno:
log_verbose( "*** %sTransform doc anno in %s: %s => doc.brief" % (drystr, container_name, anno))
if dry == False:
set_doc_brief(doc, get_anno_value(anno))
anno_list.remove(anno)
if '@description' in anno:
log_verbose( "*** %sTransform doc anno in %s: %s => doc.description" % (drystr, container_name, anno))
if dry == False:
set_doc_description(doc, get_anno_value(anno))
anno_list.remove(anno)
# Transform annotations into preamble.document.brief, .description
def xform_preamble_doc_annotations(message):
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(message.preamble.annotations):
xform_doc_annotation(message.preamble.name, message.preamble.doc, message.preamble.annotations, anno)
# Transform match_field annotations (doc)
def xform_table_match_field_annotations(table):
for matchfield in table.match_fields:
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(matchfield.annotations):
xform_doc_annotation("match_field %s.%s" %(table.preamble.name, matchfield.name),
matchfield.doc, matchfield.annotations, anno)
# Transform action anotations (doc)
def xform_action_param_annotations(action):
for param in action.params:
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(param.annotations):
xform_doc_annotation("action %s(%s)" % (action.preamble.name,param.name),
param.doc, param.annotations, anno)
# Convenience function to define argument parser
def
|
():
parser = argparse.ArgumentParser(description='P4info transform utility',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
Either or both of infile, outfile can be omitted; a hyphen signifies stdin and stdout, respectively.
Using -i none or -o none overrides input/output file descriptors.
Examples:
=========
xform_anno.py [opts] <infile> <outfile> read infile, write to outfile
xform_anno.py [opts] <infile> - read infile, write to stdout
xform_anno.py [opts] <infile> read infile, write to stdout
xform_anno.py [opts] - <outfile> read stdin, write to outfile
xform_anno.py [opts] - - read stdin, write to stdout
xform_anno.py [opts] read stdin, write to stdout
Populate PkgInfo fields, some from cmd-line and one from a file; you can populate all pkg_xxx fields either way:
.xform_anno.py --pkg_name "MyPackage" --pkg_brief "A cool package" --pkg_descrip "`cat descrip.txt`" <infile> <outfile>
'''))
# Parsing options
parser.add_argument('-d', help='Dry-run only; report transforms (via -v) but do not make changes',
action="store_true", dest='dry', default=False)
parser.add_argument('-v', help='Verbose reporting of transform steps',
action="store_true", dest='verbose', default=False)
parser.add_argument('infile', nargs='?', help='Input file name (use - or omit for stdin; -i none means no input)',
type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', help='Input file name (use - or omit for stdout; -o none means no output)',
type=argparse.FileType('wb'), default=sys.stdout)
parser.add_argument('-o', help='Output Format', dest='outfmt',
type=str, action='store', choices=['proto', 'json', 'text', 'none'],
default='proto')
parser.add_argument('-i', help='Input Format', dest='infmt',
type=str, action='store', choices=['proto', 'json', 'none'],
default='proto')
# PkgInfo elements
parser.add_argument('--pkg_name', help='Package name', type=str, action='store')
parser.add_argument('--pkg_doc_brief', help='Package document brief', type=str, action='store')
parser.add_argument('--pkg_doc_descr', help='Package document description', type=str, action='store')
parser.add_argument('--pkg_version', help='Package version', type=str, action='store')
parser.add_argument('--pkg_arch', help='Package target architecture', type=str, action='store')
parser.add_argument('--pkg_organization', help='Package organization', type=str, action='store')
parser.add_argument('--pkg_contact', help='Package contact', type=str, action='store')
parser.add_argument('--pkg_url', help='Package url', type=str, action='store')
parser.add_argument('--pkg_anno', help='Package annotation, can use multiple times', type=str, action='append')
return parser
# Extract cmd-line args and insert into PkgInfo Message
def add_arg_elements(args, p4info):
if args == None:
return
if p4info == None:
return
if args.pkg_name != None:
if dry == False:
p4info.pkg_info.name = args.pkg_name
log_verbose('+++ %sAdded pkg_name "%s"' % (drystr, args.pkg_name))
if args.pkg_version != None:
if dry == False:
p4info.pkg_info.version = args.pkg_version
log_verbose('+++ %sAdded pkg_version "%s"' % (drystr, args.pkg_version))
if args.pkg_doc_brief != None:
if dry == False:
p4info.pkg_info.doc.brief = args.pkg_doc_brief
log_verbose('+++ %sAdded pkg_doc_brief "%s"' % (drystr, args.pkg_doc_brief))
if args.pkg_doc_descr != None:
if dry == False:
p4info.pkg_info.doc.description = args.pkg_doc_descr
log_verbose('+++ %sAdded pkg_doc_descr "%s"' % (drystr, args.pkg_doc_descr))
if args.pkg_arch != None:
if dry == False:
p4info.pkg_info.arch = args.pkg_arch
log_verbose('+++ %sAdded pkg_arch "%s"' % (drystr, args.pkg_arch))
if args.pkg_organization != None:
if dry == False:
p4info.pkg_info.organization = args.pkg_organization
log_verbose('+++ %sAdded pkg_organization "%s"' % (drystr, args.pkg_organization))
if args.pkg_contact != None:
if dry == False:
p4info.pkg_info.contact = args.pkg_contact
log_verbose('+++ %sAdded pkg_contact "%s"' % (drystr, args.pkg_contact))
if args.pkg_url != None:
if dry == False:
p4info.pkg_info.url = args.pkg_url
log_verbose('+++ %sAdded pkg_url "%s"' % (drystr, args.pkg_url))
if args.pkg_anno != None:
tmp = [];
for anno in args.pkg_anno:
if dry == False:
p4info.pkg_info.annotations.append(anno)
else :
tmp.append(anno);
if dry == False:
log_verbose('+++ Added pkg_anno "%s"' % (p4info.pkg_info.annotations))
else:
log_verbose('+++ %sAdded pkg_anno "%s"' % (drystr, tmp))
return
########################################################
# Main - read file, transform it, write it
########################################################
#
# Get args
#
parser = get_arg_parser()
args = parser.parse_args()
verbose = args.verbose
dry = args.dry
if dry == True:
drystr='(dry): '
else:
drystr=''
infmt = args.infmt
outfmt = args.outfmt
if infmt != 'none':
infile = args.infile
if outfmt != 'none':
outfile = args.outfile
#
# Read intput into protobuf
#
p4info = p4info_pb2.P4Info()
if (infmt == 'json'):
p4info = json_format.Parse(infile.read(), p4info_pb2.P4Info(), ignore_unknown_fields=False)
infile.close()
elif infmt == 'proto':
p4info.ParseFromString(infile.read())
infile.close()
add_arg_elements(args,
|
get_arg_parser
|
identifier_name
|
xform_anno.py
|
.preamble.annotations):
xform_doc_annotation(message.preamble.name, message.preamble.doc, message.preamble.annotations, anno)
# Transform match_field annotations (doc)
def xform_table_match_field_annotations(table):
for matchfield in table.match_fields:
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(matchfield.annotations):
xform_doc_annotation("match_field %s.%s" %(table.preamble.name, matchfield.name),
matchfield.doc, matchfield.annotations, anno)
# Transform action anotations (doc)
def xform_action_param_annotations(action):
for param in action.params:
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(param.annotations):
xform_doc_annotation("action %s(%s)" % (action.preamble.name,param.name),
param.doc, param.annotations, anno)
# Convenience function to define argument parser
def get_arg_parser():
parser = argparse.ArgumentParser(description='P4info transform utility',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
Either or both of infile, outfile can be omitted; a hyphen signifies stdin and stdout, respectively.
Using -i none or -o none overrides input/output file descriptors.
Examples:
=========
xform_anno.py [opts] <infile> <outfile> read infile, write to outfile
xform_anno.py [opts] <infile> - read infile, write to stdout
xform_anno.py [opts] <infile> read infile, write to stdout
xform_anno.py [opts] - <outfile> read stdin, write to outfile
xform_anno.py [opts] - - read stdin, write to stdout
xform_anno.py [opts] read stdin, write to stdout
Populate PkgInfo fields, some from cmd-line and one from a file; you can populate all pkg_xxx fields either way:
.xform_anno.py --pkg_name "MyPackage" --pkg_brief "A cool package" --pkg_descrip "`cat descrip.txt`" <infile> <outfile>
'''))
# Parsing options
parser.add_argument('-d', help='Dry-run only; report transforms (via -v) but do not make changes',
action="store_true", dest='dry', default=False)
parser.add_argument('-v', help='Verbose reporting of transform steps',
action="store_true", dest='verbose', default=False)
parser.add_argument('infile', nargs='?', help='Input file name (use - or omit for stdin; -i none means no input)',
type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', help='Input file name (use - or omit for stdout; -o none means no output)',
type=argparse.FileType('wb'), default=sys.stdout)
parser.add_argument('-o', help='Output Format', dest='outfmt',
type=str, action='store', choices=['proto', 'json', 'text', 'none'],
default='proto')
parser.add_argument('-i', help='Input Format', dest='infmt',
type=str, action='store', choices=['proto', 'json', 'none'],
default='proto')
# PkgInfo elements
parser.add_argument('--pkg_name', help='Package name', type=str, action='store')
parser.add_argument('--pkg_doc_brief', help='Package document brief', type=str, action='store')
parser.add_argument('--pkg_doc_descr', help='Package document description', type=str, action='store')
parser.add_argument('--pkg_version', help='Package version', type=str, action='store')
parser.add_argument('--pkg_arch', help='Package target architecture', type=str, action='store')
parser.add_argument('--pkg_organization', help='Package organization', type=str, action='store')
parser.add_argument('--pkg_contact', help='Package contact', type=str, action='store')
parser.add_argument('--pkg_url', help='Package url', type=str, action='store')
parser.add_argument('--pkg_anno', help='Package annotation, can use multiple times', type=str, action='append')
return parser
# Extract cmd-line args and insert into PkgInfo Message
def add_arg_elements(args, p4info):
if args == None:
return
if p4info == None:
return
if args.pkg_name != None:
if dry == False:
p4info.pkg_info.name = args.pkg_name
log_verbose('+++ %sAdded pkg_name "%s"' % (drystr, args.pkg_name))
if args.pkg_version != None:
if dry == False:
p4info.pkg_info.version = args.pkg_version
log_verbose('+++ %sAdded pkg_version "%s"' % (drystr, args.pkg_version))
if args.pkg_doc_brief != None:
if dry == False:
p4info.pkg_info.doc.brief = args.pkg_doc_brief
log_verbose('+++ %sAdded pkg_doc_brief "%s"' % (drystr, args.pkg_doc_brief))
if args.pkg_doc_descr != None:
if dry == False:
p4info.pkg_info.doc.description = args.pkg_doc_descr
log_verbose('+++ %sAdded pkg_doc_descr "%s"' % (drystr, args.pkg_doc_descr))
if args.pkg_arch != None:
if dry == False:
p4info.pkg_info.arch = args.pkg_arch
log_verbose('+++ %sAdded pkg_arch "%s"' % (drystr, args.pkg_arch))
if args.pkg_organization != None:
if dry == False:
p4info.pkg_info.organization = args.pkg_organization
log_verbose('+++ %sAdded pkg_organization "%s"' % (drystr, args.pkg_organization))
if args.pkg_contact != None:
if dry == False:
p4info.pkg_info.contact = args.pkg_contact
log_verbose('+++ %sAdded pkg_contact "%s"' % (drystr, args.pkg_contact))
if args.pkg_url != None:
if dry == False:
p4info.pkg_info.url = args.pkg_url
log_verbose('+++ %sAdded pkg_url "%s"' % (drystr, args.pkg_url))
if args.pkg_anno != None:
tmp = [];
for anno in args.pkg_anno:
if dry == False:
p4info.pkg_info.annotations.append(anno)
else :
tmp.append(anno);
if dry == False:
log_verbose('+++ Added pkg_anno "%s"' % (p4info.pkg_info.annotations))
else:
log_verbose('+++ %sAdded pkg_anno "%s"' % (drystr, tmp))
return
########################################################
# Main - read file, transform it, write it
########################################################
#
# Get args
#
parser = get_arg_parser()
args = parser.parse_args()
verbose = args.verbose
dry = args.dry
if dry == True:
drystr='(dry): '
else:
drystr=''
infmt = args.infmt
outfmt = args.outfmt
if infmt != 'none':
infile = args.infile
if outfmt != 'none':
outfile = args.outfile
#
# Read intput into protobuf
#
p4info = p4info_pb2.P4Info()
if (infmt == 'json'):
p4info = json_format.Parse(infile.read(), p4info_pb2.P4Info(), ignore_unknown_fields=False)
infile.close()
elif infmt == 'proto':
p4info.ParseFromString(infile.read())
infile.close()
add_arg_elements(args, p4info)
#
# Transform protobuf object(s)
for table in p4info.tables:
log_verbose("=== process table %s" % table.preamble.name)
xform_preamble_doc_annotations(table)
xform_table_match_field_annotations(table)
for action in p4info.actions:
log_verbose("=== process action %s" % action.preamble.name)
xform_preamble_doc_annotations(action)
xform_action_param_annotations(action)
for action_profile in p4info.action_profiles:
log_verbose("=== process action_profile %s" % action_profile.preamble.name)
xform_preamble_doc_annotations(action_profile)
for counter in p4info.counters:
log_verbose("=== process indirect counter %s" % counter.preamble.name)
xform_preamble_doc_annotations(counter)
for counter in p4info.direct_counters:
log_verbose("=== process direct_counter %s" % counter.preamble.name)
xform_preamble_doc_annotations(counter)
for meter in p4info.meters:
log_verbose("=== process indirect meter %s" % meter.preamble.name)
xform_preamble_doc_annotations(meter)
for meter in p4info.direct_meters:
log_verbose("=== process direct_meter %s" % meter.preamble.name)
xform_preamble_doc_annotations(meter)
for extern in p4info.externs:
for extern_instance in extern.instances:
log_verbose("=== process extern_instance %s" % extern_instance.preamble.name)
xform_preamble_doc_annotations(extern_instance)
#
# Write to output
#
if outfmt == 'json':
outfile.write(json_format.MessageToJson(p4info))
outfile.close()
elif outfmt == 'proto':
outfile.write(p4info.SerializeToString())
outfile.close()
elif outfmt == 'text':
outfile.write(text_format.MessageToString(p4info))
|
outfile.close()
|
random_line_split
|
|
xform_anno.py
|
mers (chris.sommers@keysight.com)
#
from __future__ import print_function
import p4.config.p4info_pb2 as p4info_pb2
import argparse
import sys
import google.protobuf.json_format as json_format
import google.protobuf.text_format as text_format
import textwrap
# Conditionally print a verbose message
def log_verbose(msg):
if verbose:
print(msg, file=sys.stderr)
# Set document.brief
def set_doc_brief(doc, value):
|
# Set document.description
def set_doc_description(doc, value):
doc.description = value;
# Extract the string value embedded in an annotation
# Asssumes just one string, surrounded by escaped quotes e.g. "\"string\""
def get_anno_value(anno):
return anno.split('(\"')[1].split('\")')[0]
# Detect @brief() and @description() annotations and transform into document.brief, document.description
def xform_doc_annotation(container_name, doc, anno_list, anno):
if '@brief' in anno:
log_verbose( "*** %sTransform doc anno in %s: %s => doc.brief" % (drystr, container_name, anno))
if dry == False:
set_doc_brief(doc, get_anno_value(anno))
anno_list.remove(anno)
if '@description' in anno:
log_verbose( "*** %sTransform doc anno in %s: %s => doc.description" % (drystr, container_name, anno))
if dry == False:
set_doc_description(doc, get_anno_value(anno))
anno_list.remove(anno)
# Transform annotations into preamble.document.brief, .description
def xform_preamble_doc_annotations(message):
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(message.preamble.annotations):
xform_doc_annotation(message.preamble.name, message.preamble.doc, message.preamble.annotations, anno)
# Transform match_field annotations (doc)
def xform_table_match_field_annotations(table):
for matchfield in table.match_fields:
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(matchfield.annotations):
xform_doc_annotation("match_field %s.%s" %(table.preamble.name, matchfield.name),
matchfield.doc, matchfield.annotations, anno)
# Transform action anotations (doc)
def xform_action_param_annotations(action):
for param in action.params:
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(param.annotations):
xform_doc_annotation("action %s(%s)" % (action.preamble.name,param.name),
param.doc, param.annotations, anno)
# Convenience function to define argument parser
def get_arg_parser():
parser = argparse.ArgumentParser(description='P4info transform utility',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
Either or both of infile, outfile can be omitted; a hyphen signifies stdin and stdout, respectively.
Using -i none or -o none overrides input/output file descriptors.
Examples:
=========
xform_anno.py [opts] <infile> <outfile> read infile, write to outfile
xform_anno.py [opts] <infile> - read infile, write to stdout
xform_anno.py [opts] <infile> read infile, write to stdout
xform_anno.py [opts] - <outfile> read stdin, write to outfile
xform_anno.py [opts] - - read stdin, write to stdout
xform_anno.py [opts] read stdin, write to stdout
Populate PkgInfo fields, some from cmd-line and one from a file; you can populate all pkg_xxx fields either way:
.xform_anno.py --pkg_name "MyPackage" --pkg_brief "A cool package" --pkg_descrip "`cat descrip.txt`" <infile> <outfile>
'''))
# Parsing options
parser.add_argument('-d', help='Dry-run only; report transforms (via -v) but do not make changes',
action="store_true", dest='dry', default=False)
parser.add_argument('-v', help='Verbose reporting of transform steps',
action="store_true", dest='verbose', default=False)
parser.add_argument('infile', nargs='?', help='Input file name (use - or omit for stdin; -i none means no input)',
type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', help='Input file name (use - or omit for stdout; -o none means no output)',
type=argparse.FileType('wb'), default=sys.stdout)
parser.add_argument('-o', help='Output Format', dest='outfmt',
type=str, action='store', choices=['proto', 'json', 'text', 'none'],
default='proto')
parser.add_argument('-i', help='Input Format', dest='infmt',
type=str, action='store', choices=['proto', 'json', 'none'],
default='proto')
# PkgInfo elements
parser.add_argument('--pkg_name', help='Package name', type=str, action='store')
parser.add_argument('--pkg_doc_brief', help='Package document brief', type=str, action='store')
parser.add_argument('--pkg_doc_descr', help='Package document description', type=str, action='store')
parser.add_argument('--pkg_version', help='Package version', type=str, action='store')
parser.add_argument('--pkg_arch', help='Package target architecture', type=str, action='store')
parser.add_argument('--pkg_organization', help='Package organization', type=str, action='store')
parser.add_argument('--pkg_contact', help='Package contact', type=str, action='store')
parser.add_argument('--pkg_url', help='Package url', type=str, action='store')
parser.add_argument('--pkg_anno', help='Package annotation, can use multiple times', type=str, action='append')
return parser
# Extract cmd-line args and insert into PkgInfo Message
def add_arg_elements(args, p4info):
if args == None:
return
if p4info == None:
return
if args.pkg_name != None:
if dry == False:
p4info.pkg_info.name = args.pkg_name
log_verbose('+++ %sAdded pkg_name "%s"' % (drystr, args.pkg_name))
if args.pkg_version != None:
if dry == False:
p4info.pkg_info.version = args.pkg_version
log_verbose('+++ %sAdded pkg_version "%s"' % (drystr, args.pkg_version))
if args.pkg_doc_brief != None:
if dry == False:
p4info.pkg_info.doc.brief = args.pkg_doc_brief
log_verbose('+++ %sAdded pkg_doc_brief "%s"' % (drystr, args.pkg_doc_brief))
if args.pkg_doc_descr != None:
if dry == False:
p4info.pkg_info.doc.description = args.pkg_doc_descr
log_verbose('+++ %sAdded pkg_doc_descr "%s"' % (drystr, args.pkg_doc_descr))
if args.pkg_arch != None:
if dry == False:
p4info.pkg_info.arch = args.pkg_arch
log_verbose('+++ %sAdded pkg_arch "%s"' % (drystr, args.pkg_arch))
if args.pkg_organization != None:
if dry == False:
p4info.pkg_info.organization = args.pkg_organization
log_verbose('+++ %sAdded pkg_organization "%s"' % (drystr, args.pkg_organization))
if args.pkg_contact != None:
if dry == False:
p4info.pkg_info.contact = args.pkg_contact
log_verbose('+++ %sAdded pkg_contact "%s"' % (drystr, args.pkg_contact))
if args.pkg_url != None:
if dry == False:
p4info.pkg_info.url = args.pkg_url
log_verbose('+++ %sAdded pkg_url "%s"' % (drystr, args.pkg_url))
if args.pkg_anno != None:
tmp = [];
for anno in args.pkg_anno:
if dry == False:
p4info.pkg_info.annotations.append(anno)
else :
tmp.append(anno);
if dry == False:
log_verbose('+++ Added pkg_anno "%s"' % (p4info.pkg_info.annotations))
else:
log_verbose('+++ %sAdded pkg_anno "%s"' % (drystr, tmp))
return
########################################################
# Main - read file, transform it, write it
########################################################
#
# Get args
#
parser = get_arg_parser()
args = parser.parse_args()
verbose = args.verbose
dry = args.dry
if dry == True:
drystr='(dry): '
else:
drystr=''
infmt = args.infmt
outfmt = args.outfmt
if infmt != 'none':
infile = args.infile
if outfmt != 'none':
outfile = args.outfile
#
# Read intput into protobuf
#
p4info = p4info_pb2.P4Info()
if (infmt == 'json'):
p4info = json_format.Parse(infile.read(), p4info_pb2.P4Info(), ignore_unknown_fields=False)
infile.close()
elif infmt == 'proto':
p4info.ParseFromString(infile.read())
infile.close()
add_arg_elements(args,
|
doc.brief = value;
|
identifier_body
|
xform_anno.py
|
anno):
return anno.split('(\"')[1].split('\")')[0]
# Detect @brief() and @description() annotations and transform into document.brief, document.description
def xform_doc_annotation(container_name, doc, anno_list, anno):
if '@brief' in anno:
log_verbose( "*** %sTransform doc anno in %s: %s => doc.brief" % (drystr, container_name, anno))
if dry == False:
set_doc_brief(doc, get_anno_value(anno))
anno_list.remove(anno)
if '@description' in anno:
log_verbose( "*** %sTransform doc anno in %s: %s => doc.description" % (drystr, container_name, anno))
if dry == False:
set_doc_description(doc, get_anno_value(anno))
anno_list.remove(anno)
# Transform annotations into preamble.document.brief, .description
def xform_preamble_doc_annotations(message):
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(message.preamble.annotations):
xform_doc_annotation(message.preamble.name, message.preamble.doc, message.preamble.annotations, anno)
# Transform match_field annotations (doc)
def xform_table_match_field_annotations(table):
for matchfield in table.match_fields:
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(matchfield.annotations):
xform_doc_annotation("match_field %s.%s" %(table.preamble.name, matchfield.name),
matchfield.doc, matchfield.annotations, anno)
# Transform action anotations (doc)
def xform_action_param_annotations(action):
for param in action.params:
#reverse iterate so deleting elements doesn't cause subsequent one to get skipped
for anno in reversed(param.annotations):
xform_doc_annotation("action %s(%s)" % (action.preamble.name,param.name),
param.doc, param.annotations, anno)
# Convenience function to define argument parser
def get_arg_parser():
parser = argparse.ArgumentParser(description='P4info transform utility',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
Either or both of infile, outfile can be omitted; a hyphen signifies stdin and stdout, respectively.
Using -i none or -o none overrides input/output file descriptors.
Examples:
=========
xform_anno.py [opts] <infile> <outfile> read infile, write to outfile
xform_anno.py [opts] <infile> - read infile, write to stdout
xform_anno.py [opts] <infile> read infile, write to stdout
xform_anno.py [opts] - <outfile> read stdin, write to outfile
xform_anno.py [opts] - - read stdin, write to stdout
xform_anno.py [opts] read stdin, write to stdout
Populate PkgInfo fields, some from cmd-line and one from a file; you can populate all pkg_xxx fields either way:
.xform_anno.py --pkg_name "MyPackage" --pkg_brief "A cool package" --pkg_descrip "`cat descrip.txt`" <infile> <outfile>
'''))
# Parsing options
parser.add_argument('-d', help='Dry-run only; report transforms (via -v) but do not make changes',
action="store_true", dest='dry', default=False)
parser.add_argument('-v', help='Verbose reporting of transform steps',
action="store_true", dest='verbose', default=False)
parser.add_argument('infile', nargs='?', help='Input file name (use - or omit for stdin; -i none means no input)',
type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', help='Input file name (use - or omit for stdout; -o none means no output)',
type=argparse.FileType('wb'), default=sys.stdout)
parser.add_argument('-o', help='Output Format', dest='outfmt',
type=str, action='store', choices=['proto', 'json', 'text', 'none'],
default='proto')
parser.add_argument('-i', help='Input Format', dest='infmt',
type=str, action='store', choices=['proto', 'json', 'none'],
default='proto')
# PkgInfo elements
parser.add_argument('--pkg_name', help='Package name', type=str, action='store')
parser.add_argument('--pkg_doc_brief', help='Package document brief', type=str, action='store')
parser.add_argument('--pkg_doc_descr', help='Package document description', type=str, action='store')
parser.add_argument('--pkg_version', help='Package version', type=str, action='store')
parser.add_argument('--pkg_arch', help='Package target architecture', type=str, action='store')
parser.add_argument('--pkg_organization', help='Package organization', type=str, action='store')
parser.add_argument('--pkg_contact', help='Package contact', type=str, action='store')
parser.add_argument('--pkg_url', help='Package url', type=str, action='store')
parser.add_argument('--pkg_anno', help='Package annotation, can use multiple times', type=str, action='append')
return parser
# Extract cmd-line args and insert into PkgInfo Message
def add_arg_elements(args, p4info):
if args == None:
return
if p4info == None:
return
if args.pkg_name != None:
if dry == False:
p4info.pkg_info.name = args.pkg_name
log_verbose('+++ %sAdded pkg_name "%s"' % (drystr, args.pkg_name))
if args.pkg_version != None:
if dry == False:
p4info.pkg_info.version = args.pkg_version
log_verbose('+++ %sAdded pkg_version "%s"' % (drystr, args.pkg_version))
if args.pkg_doc_brief != None:
if dry == False:
p4info.pkg_info.doc.brief = args.pkg_doc_brief
log_verbose('+++ %sAdded pkg_doc_brief "%s"' % (drystr, args.pkg_doc_brief))
if args.pkg_doc_descr != None:
if dry == False:
p4info.pkg_info.doc.description = args.pkg_doc_descr
log_verbose('+++ %sAdded pkg_doc_descr "%s"' % (drystr, args.pkg_doc_descr))
if args.pkg_arch != None:
if dry == False:
p4info.pkg_info.arch = args.pkg_arch
log_verbose('+++ %sAdded pkg_arch "%s"' % (drystr, args.pkg_arch))
if args.pkg_organization != None:
if dry == False:
p4info.pkg_info.organization = args.pkg_organization
log_verbose('+++ %sAdded pkg_organization "%s"' % (drystr, args.pkg_organization))
if args.pkg_contact != None:
if dry == False:
p4info.pkg_info.contact = args.pkg_contact
log_verbose('+++ %sAdded pkg_contact "%s"' % (drystr, args.pkg_contact))
if args.pkg_url != None:
if dry == False:
p4info.pkg_info.url = args.pkg_url
log_verbose('+++ %sAdded pkg_url "%s"' % (drystr, args.pkg_url))
if args.pkg_anno != None:
tmp = [];
for anno in args.pkg_anno:
if dry == False:
p4info.pkg_info.annotations.append(anno)
else :
tmp.append(anno);
if dry == False:
log_verbose('+++ Added pkg_anno "%s"' % (p4info.pkg_info.annotations))
else:
log_verbose('+++ %sAdded pkg_anno "%s"' % (drystr, tmp))
return
########################################################
# Main - read file, transform it, write it
########################################################
#
# Get args
#
parser = get_arg_parser()
args = parser.parse_args()
verbose = args.verbose
dry = args.dry
if dry == True:
drystr='(dry): '
else:
drystr=''
infmt = args.infmt
outfmt = args.outfmt
if infmt != 'none':
infile = args.infile
if outfmt != 'none':
outfile = args.outfile
#
# Read intput into protobuf
#
p4info = p4info_pb2.P4Info()
if (infmt == 'json'):
p4info = json_format.Parse(infile.read(), p4info_pb2.P4Info(), ignore_unknown_fields=False)
infile.close()
elif infmt == 'proto':
p4info.ParseFromString(infile.read())
infile.close()
add_arg_elements(args, p4info)
#
# Transform protobuf object(s)
for table in p4info.tables:
log_verbose("=== process table %s" % table.preamble.name)
xform_preamble_doc_annotations(table)
xform_table_match_field_annotations(table)
for action in p4info.actions:
log_verbose("=== process action %s" % action.preamble.name)
xform_preamble_doc_annotations(action)
xform_action_param_annotations(action)
for action_profile in p4info.action_profiles:
log_verbose("=== process action_profile %s" % action_profile.preamble.name)
xform_preamble_doc_annotations(action_profile)
for counter in p4info.counters:
|
log_verbose("=== process indirect counter %s" % counter.preamble.name)
xform_preamble_doc_annotations(counter)
|
conditional_block
|
|
admin.py
|
Class, used with admin special functions. Used the env for adminList to see people with permission to run these commands
class AdminCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.adminList = ADMIN_LIST
# Functions ###########################################################################################################################################################
# runProcess: Used to run a terminal command
async def runProcess(self, command):
try:
print("[AdminCog.runProcess] Git pull...")
process = await asyncio.create_subprocess_shell(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = await process.communicate()
except NotImplementedError:
print("[AdminCog.runProcess] NotImplementedError")
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = await self.bot.loop.run_in_executor(None, process.communicate)
return [output.decode() for output in result]
# reloadLoadExtension: Tries to reload cogs or load new ones
def reloadLoadExtension(self, module):
module = "cogs." + module
try:
self.bot.reload_extension(module)
print("[AdminCog.reloadLoadExtension] Reloading " + str(module))
except commands.ExtensionNotLoaded:
print("[AdminCog.reloadLoadExtension] Loading " + str(module))
self.bot.load_extension(module)
# unloadExtension: Tries to deactivate cogs
def unloadExtension(self, module):
module = "cogs." + module
try:
self.bot.unload_extension(module)
print("[AdminCog.unloadExtension] Unloading " + str(module))
except commands.ExtensionNotLoaded:
print("[AdminCog.unloadExtension] Cog " + str(module) + "not loaded")
# reloadAll: Tries to reload all cogs, pulling new ones from git.
async def reloadAll(self, ctx, shouldPull):
# Tries to run git pull in a process, to update the bot before reloading
print("[AdminCog.reloadAll] Running process")
# If it has a 't' as argument, it pulls, else it ignores the git pull step
if shouldPull == "t":
async with ctx.typing():
stdout, stderr = await self.runProcess('git pull')
if stderr != "":
print("[AdminCog.reloadAll] stderr: " + str(stderr))
# Progress and other stuff redirected to stderr in git pull
# Messages like "fast forward" and files along with the text "already up-to-date" are in stdout
# As we wish to rebuild even if git is up-to-date, we just print the result and keep the procces, without returning
if stdout.startswith('Already up-to-date.'):
print("[AdminCog.reloadAll] Already up-to-date.")
# return
else:
print("[AdminCog.reloadAll] Pulled changes")
else:
print("[AdminCog.reloadAll] Ignoring git pull")
# One list to activate cogs, one list to deactive
print("[AdminCog.reloadAll] Getting cog list")
modulesActivate = []
modulesDeactivate = []
# Appends all files in cogFile.txt (We reload ALL cogs, even the ones that was not updated, and we unload deactive cogs)
cogList = cogManage.readCogFile()
for cog in cogList:
# Ignore comments
if cog[0] == "#":
continue
# Add cogs to be deactivated
if cog[0] == "!":
# Ignore repetitions
if cog not in modulesDeactivate:
modulesDeactivate.append(cog[2:])
continue
# If it dont has a '!' on start, it is a cog to reload, and ignore repetitions
if cog not in modulesActivate:
modulesActivate.append(cog)
print("[AdminCog.reloadAll] Starting reloads\n")
# Tries to load/reload all cogs
for module in modulesActivate:
try:
self.reloadLoadExtension(module)
except commands.ExtensionError as e:
print("[AdminCog.reloadAll] commands.ExtensionError " + str(e))
print("[AdminCog.reloadAll] Starting unloads\n")
# Tries to unload all cogs
for module in modulesDeactivate:
try:
self.unloadExtension(module)
except commands.ExtensionError as e:
print("[AdminCog.reloadAll] commands.ExtensionError " + str(e))
print("[AdminCog.reloadAll] Finished rebuilding\n")
# findPermission: Tries to read all the adminIDs from the env list, to see if the user has needed permissions
async def findPermission(self, ctx):
authorID = str(ctx.author.id)
# Check the admin list
for adminID in self.adminList:
if adminID.strip("\"") == authorID:
|
print("[AdminCog.findPermission] Forbidden: authorID (" + str(authorID) + ") not found in adminList\n")
for admin in ADMIN_LIST:
print(admin)
return False
# Commands Methods ####################################################################################################################################################
# reloadCogs: Attempts to reload all cogs of the bot, if the user has permission for that
# Skipps git pull by default, can be activated by passin any argument with the command
@commands.command(pass_context=True, name="reload", aliases=["rebuild", "restart"])
async def reloadCogs(self, ctx, shouldPull="f"):
print("[AdminCog.reloadCogs] Attempting to reload all cogs")
userPermission = await self.findPermission(ctx)
if userPermission:
await self.reloadAll(ctx, shouldPull)
# unloadCog: Attempts to deactivate a single cog, and then reloads all of them without git pull
@commands.command(pass_context=True, name="unload", aliases=["unlaod", "deactivate"])
async def unloadCog(self, ctx, cogName):
print("[AdminCog.unloadCog] Attempting to unload " + cogName)
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.unloadCog] Reading cogFile")
cogList = cogManage.readCogFile()
for index, cog in enumerate(cogList, start=0):
# Ignore unloaded cogs
if cog[0] == "!":
continue
# Ignores the line terminator on cogFile
if cog == cogName:
print("[AdminCog.unloadCog] Found cog to unload on line " + str(index+1))
cog = str("! " + cog)
cogList[index] = cog
# Clear the cogFile
await cogManage.clearCogFile()
# Calls the writer to make new cogFile
await cogManage.writeCogFile(cogList)
print("[AdminCog.unloadCog] Reloading cogs...\n")
# Reload all cogs to apply changes, ignoring git pull
await self.reloadCogs(ctx, "f")
return
print("[AdminCog.unloadCog] Cog " + str(cogName) + " not found.\n")
return
# loadCog: Attempts to activate a single cog, and then reloads all of them without git pull
@commands.command(pass_context=True, name="load", aliases=["laod", "activate"])
async def loadCog(self, ctx, cogName):
print("[AdminCog.loadCog] Attempting to load " + cogName)
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.loadCog] Reading cogFile")
cogList = cogManage.readCogFile()
for index, cog in enumerate(cogList, start=0):
# Ignores active cogs
if cog[0] != "!":
continue
# Ignores the line terminator on cogFile, looks for deactivated cogs
if cog == ("! " + cogName):
print("[AdminCog.loadCog] Found cog to load on line " + str(index+1))
cog = str(cog.split("! ")[1])
cogList[index] = cog
# Clear the cogFile
await cogManage.clearCogFile()
# Calls the writer to make new cogFile
await cogManage.writeCogFile(cogList)
print("[AdminCog.loadCog] Reloading cogs...\n")
# Reload all cogs to apply changes, ignoring git pull
await self.reloadCogs(ctx, "f")
return
print("[AdminCog.loadCog] Cog " + str(cogName) + " not found.\n")
return
# sendCogfile: Reads the cog file and sends all its contents
@commands.command(pass_context=True, name="read", aliases=["raed", "readcogs"])
async def sendCogfile(self, ctx):
print("[AdminCog.sendCogfile] Attempting to read cogFile")
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.sendCogfile] Reading cogFile")
cogList = cogManage.readCogFile()
await ctx.send(cogList)
return
# adminHelpCommand: Help function to get all cog usages
@commands.command(pass_context=True)
async def adminHelpCommand(self, ctx):
print("[AdminCog.adminHelpCommand] Generating embed")
# If it is, we use the generic help function to generate a embed
# First we generate all needed fields
cogName = "admin"
cogDescription = "Admin tools
|
print("[AdminCog.findPermission] Accepted")
return True
|
conditional_block
|
admin.py
|
, used with admin special functions. Used the env for adminList to see people with permission to run these commands
class AdminCog(commands.Cog):
|
module = "cogs." + module
try:
self.bot.reload_extension(module)
print("[AdminCog.reloadLoadExtension] Reloading " + str(module))
except commands.ExtensionNotLoaded:
print("[AdminCog.reloadLoadExtension] Loading " + str(module))
self.bot.load_extension(module)
# unloadExtension: Tries to deactivate cogs
def unloadExtension(self, module):
module = "cogs." + module
try:
self.bot.unload_extension(module)
print("[AdminCog.unloadExtension] Unloading " + str(module))
except commands.ExtensionNotLoaded:
print("[AdminCog.unloadExtension] Cog " + str(module) + "not loaded")
# reloadAll: Tries to reload all cogs, pulling new ones from git.
async def reloadAll(self, ctx, shouldPull):
# Tries to run git pull in a process, to update the bot before reloading
print("[AdminCog.reloadAll] Running process")
# If it has a 't' as argument, it pulls, else it ignores the git pull step
if shouldPull == "t":
async with ctx.typing():
stdout, stderr = await self.runProcess('git pull')
if stderr != "":
print("[AdminCog.reloadAll] stderr: " + str(stderr))
# Progress and other stuff redirected to stderr in git pull
# Messages like "fast forward" and files along with the text "already up-to-date" are in stdout
# As we wish to rebuild even if git is up-to-date, we just print the result and keep the procces, without returning
if stdout.startswith('Already up-to-date.'):
print("[AdminCog.reloadAll] Already up-to-date.")
# return
else:
print("[AdminCog.reloadAll] Pulled changes")
else:
print("[AdminCog.reloadAll] Ignoring git pull")
# One list to activate cogs, one list to deactive
print("[AdminCog.reloadAll] Getting cog list")
modulesActivate = []
modulesDeactivate = []
# Appends all files in cogFile.txt (We reload ALL cogs, even the ones that was not updated, and we unload deactive cogs)
cogList = cogManage.readCogFile()
for cog in cogList:
# Ignore comments
if cog[0] == "#":
continue
# Add cogs to be deactivated
if cog[0] == "!":
# Ignore repetitions
if cog not in modulesDeactivate:
modulesDeactivate.append(cog[2:])
continue
# If it dont has a '!' on start, it is a cog to reload, and ignore repetitions
if cog not in modulesActivate:
modulesActivate.append(cog)
print("[AdminCog.reloadAll] Starting reloads\n")
# Tries to load/reload all cogs
for module in modulesActivate:
try:
self.reloadLoadExtension(module)
except commands.ExtensionError as e:
print("[AdminCog.reloadAll] commands.ExtensionError " + str(e))
print("[AdminCog.reloadAll] Starting unloads\n")
# Tries to unload all cogs
for module in modulesDeactivate:
try:
self.unloadExtension(module)
except commands.ExtensionError as e:
print("[AdminCog.reloadAll] commands.ExtensionError " + str(e))
print("[AdminCog.reloadAll] Finished rebuilding\n")
# findPermission: Tries to read all the adminIDs from the env list, to see if the user has needed permissions
async def findPermission(self, ctx):
authorID = str(ctx.author.id)
# Check the admin list
for adminID in self.adminList:
if adminID.strip("\"") == authorID:
print("[AdminCog.findPermission] Accepted")
return True
print("[AdminCog.findPermission] Forbidden: authorID (" + str(authorID) + ") not found in adminList\n")
for admin in ADMIN_LIST:
print(admin)
return False
# Commands Methods ####################################################################################################################################################
# reloadCogs: Attempts to reload all cogs of the bot, if the user has permission for that
# Skipps git pull by default, can be activated by passin any argument with the command
@commands.command(pass_context=True, name="reload", aliases=["rebuild", "restart"])
async def reloadCogs(self, ctx, shouldPull="f"):
print("[AdminCog.reloadCogs] Attempting to reload all cogs")
userPermission = await self.findPermission(ctx)
if userPermission:
await self.reloadAll(ctx, shouldPull)
# unloadCog: Attempts to deactivate a single cog, and then reloads all of them without git pull
@commands.command(pass_context=True, name="unload", aliases=["unlaod", "deactivate"])
async def unloadCog(self, ctx, cogName):
print("[AdminCog.unloadCog] Attempting to unload " + cogName)
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.unloadCog] Reading cogFile")
cogList = cogManage.readCogFile()
for index, cog in enumerate(cogList, start=0):
# Ignore unloaded cogs
if cog[0] == "!":
continue
# Ignores the line terminator on cogFile
if cog == cogName:
print("[AdminCog.unloadCog] Found cog to unload on line " + str(index+1))
cog = str("! " + cog)
cogList[index] = cog
# Clear the cogFile
await cogManage.clearCogFile()
# Calls the writer to make new cogFile
await cogManage.writeCogFile(cogList)
print("[AdminCog.unloadCog] Reloading cogs...\n")
# Reload all cogs to apply changes, ignoring git pull
await self.reloadCogs(ctx, "f")
return
print("[AdminCog.unloadCog] Cog " + str(cogName) + " not found.\n")
return
# loadCog: Attempts to activate a single cog, and then reloads all of them without git pull
@commands.command(pass_context=True, name="load", aliases=["laod", "activate"])
async def loadCog(self, ctx, cogName):
print("[AdminCog.loadCog] Attempting to load " + cogName)
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.loadCog] Reading cogFile")
cogList = cogManage.readCogFile()
for index, cog in enumerate(cogList, start=0):
# Ignores active cogs
if cog[0] != "!":
continue
# Ignores the line terminator on cogFile, looks for deactivated cogs
if cog == ("! " + cogName):
print("[AdminCog.loadCog] Found cog to load on line " + str(index+1))
cog = str(cog.split("! ")[1])
cogList[index] = cog
# Clear the cogFile
await cogManage.clearCogFile()
# Calls the writer to make new cogFile
await cogManage.writeCogFile(cogList)
print("[AdminCog.loadCog] Reloading cogs...\n")
# Reload all cogs to apply changes, ignoring git pull
await self.reloadCogs(ctx, "f")
return
print("[AdminCog.loadCog] Cog " + str(cogName) + " not found.\n")
return
# sendCogfile: Reads the cog file and sends all its contents
@commands.command(pass_context=True, name="read", aliases=["raed", "readcogs"])
async def sendCogfile(self, ctx):
print("[AdminCog.sendCogfile] Attempting to read cogFile")
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.sendCogfile] Reading cogFile")
cogList = cogManage.readCogFile()
await ctx.send(cogList)
return
# adminHelpCommand: Help function to get all cog usages
@commands.command(pass_context=True)
async def adminHelpCommand(self, ctx):
print("[AdminCog.adminHelpCommand] Generating embed")
# If it is, we use the generic help function to generate a embed
# First we generate all needed fields
cogName = "admin"
cogDescription = "Admin tools to
|
def __init__(self, bot):
self.bot = bot
self.adminList = ADMIN_LIST
# Functions ###########################################################################################################################################################
# runProcess: Used to run a terminal command
async def runProcess(self, command):
try:
print("[AdminCog.runProcess] Git pull...")
process = await asyncio.create_subprocess_shell(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = await process.communicate()
except NotImplementedError:
print("[AdminCog.runProcess] NotImplementedError")
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = await self.bot.loop.run_in_executor(None, process.communicate)
return [output.decode() for output in result]
# reloadLoadExtension: Tries to reload cogs or load new ones
def reloadLoadExtension(self, module):
|
identifier_body
|
admin.py
|
Class, used with admin special functions. Used the env for adminList to see people with permission to run these commands
class AdminCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.adminList = ADMIN_LIST
# Functions ###########################################################################################################################################################
# runProcess: Used to run a terminal command
async def runProcess(self, command):
try:
print("[AdminCog.runProcess] Git pull...")
process = await asyncio.create_subprocess_shell(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = await process.communicate()
except NotImplementedError:
print("[AdminCog.runProcess] NotImplementedError")
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = await self.bot.loop.run_in_executor(None, process.communicate)
return [output.decode() for output in result]
# reloadLoadExtension: Tries to reload cogs or load new ones
def reloadLoadExtension(self, module):
module = "cogs." + module
try:
self.bot.reload_extension(module)
print("[AdminCog.reloadLoadExtension] Reloading " + str(module))
except commands.ExtensionNotLoaded:
print("[AdminCog.reloadLoadExtension] Loading " + str(module))
self.bot.load_extension(module)
# unloadExtension: Tries to deactivate cogs
def unloadExtension(self, module):
module = "cogs." + module
try:
self.bot.unload_extension(module)
print("[AdminCog.unloadExtension] Unloading " + str(module))
except commands.ExtensionNotLoaded:
print("[AdminCog.unloadExtension] Cog " + str(module) + "not loaded")
# reloadAll: Tries to reload all cogs, pulling new ones from git.
async def reloadAll(self, ctx, shouldPull):
# Tries to run git pull in a process, to update the bot before reloading
print("[AdminCog.reloadAll] Running process")
# If it has a 't' as argument, it pulls, else it ignores the git pull step
if shouldPull == "t":
async with ctx.typing():
stdout, stderr = await self.runProcess('git pull')
if stderr != "":
print("[AdminCog.reloadAll] stderr: " + str(stderr))
# Progress and other stuff redirected to stderr in git pull
# Messages like "fast forward" and files along with the text "already up-to-date" are in stdout
# As we wish to rebuild even if git is up-to-date, we just print the result and keep the procces, without returning
if stdout.startswith('Already up-to-date.'):
print("[AdminCog.reloadAll] Already up-to-date.")
# return
else:
print("[AdminCog.reloadAll] Pulled changes")
else:
print("[AdminCog.reloadAll] Ignoring git pull")
# One list to activate cogs, one list to deactive
print("[AdminCog.reloadAll] Getting cog list")
modulesActivate = []
modulesDeactivate = []
# Appends all files in cogFile.txt (We reload ALL cogs, even the ones that was not updated, and we unload deactive cogs)
cogList = cogManage.readCogFile()
for cog in cogList:
# Ignore comments
if cog[0] == "#":
continue
# Add cogs to be deactivated
if cog[0] == "!":
# Ignore repetitions
if cog not in modulesDeactivate:
modulesDeactivate.append(cog[2:])
continue
# If it dont has a '!' on start, it is a cog to reload, and ignore repetitions
if cog not in modulesActivate:
modulesActivate.append(cog)
print("[AdminCog.reloadAll] Starting reloads\n")
# Tries to load/reload all cogs
for module in modulesActivate:
try:
self.reloadLoadExtension(module)
except commands.ExtensionError as e:
print("[AdminCog.reloadAll] commands.ExtensionError " + str(e))
print("[AdminCog.reloadAll] Starting unloads\n")
# Tries to unload all cogs
for module in modulesDeactivate:
try:
self.unloadExtension(module)
except commands.ExtensionError as e:
print("[AdminCog.reloadAll] commands.ExtensionError " + str(e))
print("[AdminCog.reloadAll] Finished rebuilding\n")
# findPermission: Tries to read all the adminIDs from the env list, to see if the user has needed permissions
async def findPermission(self, ctx):
authorID = str(ctx.author.id)
# Check the admin list
for adminID in self.adminList:
if adminID.strip("\"") == authorID:
print("[AdminCog.findPermission] Accepted")
return True
print("[AdminCog.findPermission] Forbidden: authorID (" + str(authorID) + ") not found in adminList\n")
for admin in ADMIN_LIST:
print(admin)
return False
# Commands Methods ####################################################################################################################################################
# reloadCogs: Attempts to reload all cogs of the bot, if the user has permission for that
# Skipps git pull by default, can be activated by passin any argument with the command
@commands.command(pass_context=True, name="reload", aliases=["rebuild", "restart"])
async def reloadCogs(self, ctx, shouldPull="f"):
print("[AdminCog.reloadCogs] Attempting to reload all cogs")
userPermission = await self.findPermission(ctx)
if userPermission:
await self.reloadAll(ctx, shouldPull)
# unloadCog: Attempts to deactivate a single cog, and then reloads all of them without git pull
@commands.command(pass_context=True, name="unload", aliases=["unlaod", "deactivate"])
async def unloadCog(self, ctx, cogName):
print("[AdminCog.unloadCog] Attempting to unload " + cogName)
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.unloadCog] Reading cogFile")
cogList = cogManage.readCogFile()
for index, cog in enumerate(cogList, start=0):
# Ignore unloaded cogs
if cog[0] == "!":
continue
# Ignores the line terminator on cogFile
if cog == cogName:
print("[AdminCog.unloadCog] Found cog to unload on line " + str(index+1))
cog = str("! " + cog)
cogList[index] = cog
# Clear the cogFile
await cogManage.clearCogFile()
# Calls the writer to make new cogFile
await cogManage.writeCogFile(cogList)
print("[AdminCog.unloadCog] Reloading cogs...\n")
# Reload all cogs to apply changes, ignoring git pull
await self.reloadCogs(ctx, "f")
return
print("[AdminCog.unloadCog] Cog " + str(cogName) + " not found.\n")
return
# loadCog: Attempts to activate a single cog, and then reloads all of them without git pull
@commands.command(pass_context=True, name="load", aliases=["laod", "activate"])
async def loadCog(self, ctx, cogName):
print("[AdminCog.loadCog] Attempting to load " + cogName)
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.loadCog] Reading cogFile")
cogList = cogManage.readCogFile()
for index, cog in enumerate(cogList, start=0):
# Ignores active cogs
if cog[0] != "!":
continue
# Ignores the line terminator on cogFile, looks for deactivated cogs
if cog == ("! " + cogName):
print("[AdminCog.loadCog] Found cog to load on line " + str(index+1))
cog = str(cog.split("! ")[1])
cogList[index] = cog
# Clear the cogFile
await cogManage.clearCogFile()
# Calls the writer to make new cogFile
await cogManage.writeCogFile(cogList)
print("[AdminCog.loadCog] Reloading cogs...\n")
# Reload all cogs to apply changes, ignoring git pull
await self.reloadCogs(ctx, "f")
return
print("[AdminCog.loadCog] Cog " + str(cogName) + " not found.\n")
return
# sendCogfile: Reads the cog file and sends all its contents
@commands.command(pass_context=True, name="read", aliases=["raed", "readcogs"])
async def sendCogfile(self, ctx):
print("[AdminCog.sendCogfile] Attempting to read cogFile")
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.sendCogfile] Reading cogFile")
cogList = cogManage.readCogFile()
await ctx.send(cogList)
return
# adminHelpCommand: Help function to get all cog usages
@commands.command(pass_context=True)
async def
|
(self, ctx):
print("[AdminCog.adminHelpCommand] Generating embed")
# If it is, we use the generic help function to generate a embed
# First we generate all needed fields
cogName = "admin"
cogDescription = "Admin
|
adminHelpCommand
|
identifier_name
|
admin.py
|
try:
self.bot.reload_extension(module)
print("[AdminCog.reloadLoadExtension] Reloading " + str(module))
except commands.ExtensionNotLoaded:
print("[AdminCog.reloadLoadExtension] Loading " + str(module))
self.bot.load_extension(module)
# unloadExtension: Tries to deactivate cogs
def unloadExtension(self, module):
module = "cogs." + module
try:
self.bot.unload_extension(module)
print("[AdminCog.unloadExtension] Unloading " + str(module))
except commands.ExtensionNotLoaded:
print("[AdminCog.unloadExtension] Cog " + str(module) + "not loaded")
# reloadAll: Tries to reload all cogs, pulling new ones from git.
async def reloadAll(self, ctx, shouldPull):
# Tries to run git pull in a process, to update the bot before reloading
print("[AdminCog.reloadAll] Running process")
# If it has a 't' as argument, it pulls, else it ignores the git pull step
if shouldPull == "t":
async with ctx.typing():
stdout, stderr = await self.runProcess('git pull')
if stderr != "":
print("[AdminCog.reloadAll] stderr: " + str(stderr))
# Progress and other stuff redirected to stderr in git pull
# Messages like "fast forward" and files along with the text "already up-to-date" are in stdout
# As we wish to rebuild even if git is up-to-date, we just print the result and keep the procces, without returning
if stdout.startswith('Already up-to-date.'):
print("[AdminCog.reloadAll] Already up-to-date.")
# return
else:
print("[AdminCog.reloadAll] Pulled changes")
else:
print("[AdminCog.reloadAll] Ignoring git pull")
# One list to activate cogs, one list to deactive
print("[AdminCog.reloadAll] Getting cog list")
modulesActivate = []
modulesDeactivate = []
# Appends all files in cogFile.txt (We reload ALL cogs, even the ones that was not updated, and we unload deactive cogs)
cogList = cogManage.readCogFile()
for cog in cogList:
# Ignore comments
if cog[0] == "#":
continue
# Add cogs to be deactivated
if cog[0] == "!":
# Ignore repetitions
if cog not in modulesDeactivate:
modulesDeactivate.append(cog[2:])
continue
# If it dont has a '!' on start, it is a cog to reload, and ignore repetitions
if cog not in modulesActivate:
modulesActivate.append(cog)
print("[AdminCog.reloadAll] Starting reloads\n")
# Tries to load/reload all cogs
for module in modulesActivate:
try:
self.reloadLoadExtension(module)
except commands.ExtensionError as e:
print("[AdminCog.reloadAll] commands.ExtensionError " + str(e))
print("[AdminCog.reloadAll] Starting unloads\n")
# Tries to unload all cogs
for module in modulesDeactivate:
try:
self.unloadExtension(module)
except commands.ExtensionError as e:
print("[AdminCog.reloadAll] commands.ExtensionError " + str(e))
print("[AdminCog.reloadAll] Finished rebuilding\n")
# findPermission: Tries to read all the adminIDs from the env list, to see if the user has needed permissions
async def findPermission(self, ctx):
authorID = str(ctx.author.id)
# Check the admin list
for adminID in self.adminList:
if adminID.strip("\"") == authorID:
print("[AdminCog.findPermission] Accepted")
return True
print("[AdminCog.findPermission] Forbidden: authorID (" + str(authorID) + ") not found in adminList\n")
for admin in ADMIN_LIST:
print(admin)
return False
# Commands Methods ####################################################################################################################################################
# reloadCogs: Attempts to reload all cogs of the bot, if the user has permission for that
# Skipps git pull by default, can be activated by passin any argument with the command
@commands.command(pass_context=True, name="reload", aliases=["rebuild", "restart"])
async def reloadCogs(self, ctx, shouldPull="f"):
print("[AdminCog.reloadCogs] Attempting to reload all cogs")
userPermission = await self.findPermission(ctx)
if userPermission:
await self.reloadAll(ctx, shouldPull)
# unloadCog: Attempts to deactivate a single cog, and then reloads all of them without git pull
@commands.command(pass_context=True, name="unload", aliases=["unlaod", "deactivate"])
async def unloadCog(self, ctx, cogName):
print("[AdminCog.unloadCog] Attempting to unload " + cogName)
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.unloadCog] Reading cogFile")
cogList = cogManage.readCogFile()
for index, cog in enumerate(cogList, start=0):
# Ignore unloaded cogs
if cog[0] == "!":
continue
# Ignores the line terminator on cogFile
if cog == cogName:
print("[AdminCog.unloadCog] Found cog to unload on line " + str(index+1))
cog = str("! " + cog)
cogList[index] = cog
# Clear the cogFile
await cogManage.clearCogFile()
# Calls the writer to make new cogFile
await cogManage.writeCogFile(cogList)
print("[AdminCog.unloadCog] Reloading cogs...\n")
# Reload all cogs to apply changes, ignoring git pull
await self.reloadCogs(ctx, "f")
return
print("[AdminCog.unloadCog] Cog " + str(cogName) + " not found.\n")
return
# loadCog: Attempts to activate a single cog, and then reloads all of them without git pull
@commands.command(pass_context=True, name="load", aliases=["laod", "activate"])
async def loadCog(self, ctx, cogName):
print("[AdminCog.loadCog] Attempting to load " + cogName)
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.loadCog] Reading cogFile")
cogList = cogManage.readCogFile()
for index, cog in enumerate(cogList, start=0):
# Ignores active cogs
if cog[0] != "!":
continue
# Ignores the line terminator on cogFile, looks for deactivated cogs
if cog == ("! " + cogName):
print("[AdminCog.loadCog] Found cog to load on line " + str(index+1))
cog = str(cog.split("! ")[1])
cogList[index] = cog
# Clear the cogFile
await cogManage.clearCogFile()
# Calls the writer to make new cogFile
await cogManage.writeCogFile(cogList)
print("[AdminCog.loadCog] Reloading cogs...\n")
# Reload all cogs to apply changes, ignoring git pull
await self.reloadCogs(ctx, "f")
return
print("[AdminCog.loadCog] Cog " + str(cogName) + " not found.\n")
return
# sendCogfile: Reads the cog file and sends all its contents
@commands.command(pass_context=True, name="read", aliases=["raed", "readcogs"])
async def sendCogfile(self, ctx):
print("[AdminCog.sendCogfile] Attempting to read cogFile")
userPermission = await self.findPermission(ctx)
if userPermission:
print("[AdminCog.sendCogfile] Reading cogFile")
cogList = cogManage.readCogFile()
await ctx.send(cogList)
return
# adminHelpCommand: Help function to get all cog usages
@commands.command(pass_context=True)
async def adminHelpCommand(self, ctx):
print("[AdminCog.adminHelpCommand] Generating embed")
# If it is, we use the generic help function to generate a embed
# First we generate all needed fields
cogName = "admin"
cogDescription = "Admin tools to control bot cogs and updates"
cogEmbed = 0xeb4034
# List of commands from this cog
helpList = [
("🔃 reload", "Reloads all cogs on the bot. Can do git pull if used with \"t\"", "`.reload` | `.rebuild` | `.reload t` | `.rebuild t`"),
("⬇️ load", "Activates and load a single cog.", "`.load <cog name>` | `.activate <cog name>`" ),
("⬆️ unload", "Deactivates and unloads a single cog." , "`.unload <cog name>` | `.deactivate <cog name>`"),
("ℹ read", "Reads the entire cog file and sends the cogs." , "`.read` | `.readcogs`"),
|
]
print("[AdminCog.adminHelpCommand] Sending embed\n")
await helpFunction.generateHelpEmbed(ctx, cogName, cogDescription, helpList, cogEmbed)
return
|
random_line_split
|
|
edit_ops.rs
|
<BT, AT>(
base: &Rope,
regions: &[SelRegion],
before_text: BT,
after_text: AT,
) -> RopeDelta
where
BT: Into<Rope>,
AT: Into<Rope>,
{
let mut builder = DeltaBuilder::new(base.len());
let before_rope = before_text.into();
let after_rope = after_text.into();
for region in regions {
let before_iv = Interval::new(region.min(), region.min());
builder.replace(before_iv, before_rope.clone());
let after_iv = Interval::new(region.max(), region.max());
builder.replace(after_iv, after_rope.clone());
}
builder.build()
}
pub fn duplicate_line(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
// get affected lines or regions
let mut to_duplicate = BTreeSet::new();
for region in regions {
let (first_line, _) = LogicalLines.offset_to_line_col(base, region.min());
let line_start = LogicalLines.offset_of_line(base, first_line);
let mut cursor = match region.is_caret() {
true => Cursor::new(base, line_start),
false => {
// duplicate all lines together that are part of the same selections
let (last_line, _) = LogicalLines.offset_to_line_col(base, region.max());
let line_end = LogicalLines.offset_of_line(base, last_line);
Cursor::new(base, line_end)
}
};
if let Some(line_end) = cursor.next::<LinesMetric>() {
to_duplicate.insert((line_start, line_end));
}
}
for (start, end) in to_duplicate {
// insert duplicates
let iv = Interval::new(start, start);
builder.replace(iv, base.slice(start..end));
// last line does not have new line character so it needs to be manually added
if end == base.len() {
builder.replace(iv, Rope::from(&config.line_ending))
}
}
builder.build()
}
/// Used when the user presses the backspace key. If no delta is returned, then nothing changes.
pub fn delete_backward(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
// TODO: this function is workable but probably overall code complexity
// could be improved by implementing a "backspace" movement instead.
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let start = offset_for_delete_backwards(region, base, config);
let iv = Interval::new(start, region.max());
if !iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Common logic for a number of delete methods. For each region in the
/// selection, if the selection is a caret, delete the region between
/// the caret and the movement applied to the caret, otherwise delete
/// the region.
///
/// If `save` is set, the tuple will contain a rope with the deleted text.
///
/// # Arguments
///
/// * `height` - viewport height
pub(crate) fn delete_by_movement(
base: &Rope,
regions: &[SelRegion],
lines: &Lines,
movement: Movement,
height: usize,
save: bool,
) -> (RopeDelta, Option<Rope>) {
// We compute deletions as a selection because the merge logic
// is convenient. Another possibility would be to make the delta
// builder able to handle overlapping deletions (with union semantics).
let mut deletions = Selection::new();
for &r in regions {
if r.is_caret() {
let new_region = region_movement(movement, r, lines, height, base, true);
deletions.add_region(new_region);
} else {
deletions.add_region(r);
}
}
let kill_ring = if save {
let saved = extract_sel_regions(base, &deletions).unwrap_or_default();
Some(Rope::from(saved))
} else {
None
};
(delete_sel_regions(base, &deletions), kill_ring)
}
/// Deletes the given regions.
pub(crate) fn delete_sel_regions(base: &Rope, sel_regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in sel_regions {
let iv = Interval::new(region.min(), region.max());
if !iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Extracts non-caret selection regions into a string,
/// joining multiple regions with newlines.
pub(crate) fn extract_sel_regions<'a>(
base: &'a Rope,
sel_regions: &[SelRegion],
) -> Option<Cow<'a, str>> {
let mut saved = None;
for region in sel_regions {
if !region.is_caret() {
let val = base.slice_to_cow(region);
match saved {
None => saved = Some(val),
Some(ref mut s) => {
s.to_mut().push('\n');
s.to_mut().push_str(&val);
}
}
}
}
saved
}
pub fn insert_newline(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
insert(base, regions, &config.line_ending)
}
pub fn insert_tab(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let const_tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
if line_range.len() > 1 {
for line in line_range {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let iv = Interval::new(offset, offset);
builder.replace(iv, Rope::from(const_tab_text));
}
} else {
let (_, col) = LogicalLines.offset_to_line_col(base, region.start);
let mut tab_size = config.tab_size;
tab_size = tab_size - (col % tab_size);
let tab_text = get_tab_text(config, Some(tab_size));
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, Rope::from(tab_text));
}
}
builder.build()
}
/// Indents or outdents lines based on selection and user's tab settings.
/// Uses a BTreeSet to holds the collection of lines to modify.
/// Preserves cursor position and current selection as much as possible.
/// Tries to have behavior consistent with other editors like Atom,
/// Sublime and VSCode, with non-caret selections not being modified.
pub fn modify_indent(
base: &Rope,
regions: &[SelRegion],
config: &BufferItems,
direction: IndentDirection,
) -> RopeDelta {
let mut lines = BTreeSet::new();
let tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
for line in line_range {
lines.insert(line);
}
}
match direction {
IndentDirection::In => indent(base, lines, tab_text),
IndentDirection::Out => outdent(base, lines, tab_text),
}
}
fn indent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let interval = Interval::new(offset, offset);
builder.replace(interval, Rope::from(tab_text));
}
builder.build()
}
fn outdent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let tab_offset = LogicalLines.line_col_to_offset(base, line, tab_text.len());
let interval = Interval::new(offset, tab_offset);
let leading_slice = base.slice_to_cow(interval.start()..interval.end());
if leading_slice == tab_text {
builder.delete(interval);
} else if let Some(first_char_col) = leading_slice.find(|c: char| !c.is_whitespace()) {
let first_char_offset = LogicalLines.line_col_to_offset(base, line, first_char_col);
let interval = Interval::new(offset, first_char_offset);
builder.delete(interval);
}
}
builder.build()
}
pub fn transpose(base: &Rope, regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let mut last = 0;
let mut optional_previous_selection: Option<(Interval, Rope)> =
last_selection_region(regions).map(|®ion| sel_region_to_interval_and_rope(base, region));
for ®ion in regions {
if region.is_caret() {
let mut middle = region.end;
let mut start = base.prev_grapheme_offset(middle).unwrap_or(0);
let mut end = base.next_grapheme_offset(middle).unwrap_or(middle);
// Note: this matches Emac's behavior. It swaps last
// two characters of line if at end of line.
if start >= last
|
surround
|
identifier_name
|
|
edit_ops.rs
|
pe = after_text.into();
for region in regions {
let before_iv = Interval::new(region.min(), region.min());
builder.replace(before_iv, before_rope.clone());
let after_iv = Interval::new(region.max(), region.max());
builder.replace(after_iv, after_rope.clone());
}
builder.build()
}
pub fn duplicate_line(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
// get affected lines or regions
let mut to_duplicate = BTreeSet::new();
for region in regions {
let (first_line, _) = LogicalLines.offset_to_line_col(base, region.min());
let line_start = LogicalLines.offset_of_line(base, first_line);
let mut cursor = match region.is_caret() {
true => Cursor::new(base, line_start),
false => {
// duplicate all lines together that are part of the same selections
let (last_line, _) = LogicalLines.offset_to_line_col(base, region.max());
let line_end = LogicalLines.offset_of_line(base, last_line);
Cursor::new(base, line_end)
}
};
if let Some(line_end) = cursor.next::<LinesMetric>() {
to_duplicate.insert((line_start, line_end));
}
}
for (start, end) in to_duplicate {
// insert duplicates
let iv = Interval::new(start, start);
builder.replace(iv, base.slice(start..end));
// last line does not have new line character so it needs to be manually added
if end == base.len() {
builder.replace(iv, Rope::from(&config.line_ending))
}
}
builder.build()
}
/// Used when the user presses the backspace key. If no delta is returned, then nothing changes.
pub fn delete_backward(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
// TODO: this function is workable but probably overall code complexity
// could be improved by implementing a "backspace" movement instead.
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let start = offset_for_delete_backwards(region, base, config);
let iv = Interval::new(start, region.max());
if !iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Common logic for a number of delete methods. For each region in the
/// selection, if the selection is a caret, delete the region between
/// the caret and the movement applied to the caret, otherwise delete
/// the region.
///
/// If `save` is set, the tuple will contain a rope with the deleted text.
///
/// # Arguments
///
/// * `height` - viewport height
pub(crate) fn delete_by_movement(
base: &Rope,
regions: &[SelRegion],
lines: &Lines,
movement: Movement,
height: usize,
save: bool,
) -> (RopeDelta, Option<Rope>) {
// We compute deletions as a selection because the merge logic
// is convenient. Another possibility would be to make the delta
// builder able to handle overlapping deletions (with union semantics).
let mut deletions = Selection::new();
for &r in regions {
if r.is_caret() {
let new_region = region_movement(movement, r, lines, height, base, true);
deletions.add_region(new_region);
} else {
deletions.add_region(r);
}
}
let kill_ring = if save
|
else {
None
};
(delete_sel_regions(base, &deletions), kill_ring)
}
/// Deletes the given regions.
pub(crate) fn delete_sel_regions(base: &Rope, sel_regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in sel_regions {
let iv = Interval::new(region.min(), region.max());
if !iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Extracts non-caret selection regions into a string,
/// joining multiple regions with newlines.
pub(crate) fn extract_sel_regions<'a>(
base: &'a Rope,
sel_regions: &[SelRegion],
) -> Option<Cow<'a, str>> {
let mut saved = None;
for region in sel_regions {
if !region.is_caret() {
let val = base.slice_to_cow(region);
match saved {
None => saved = Some(val),
Some(ref mut s) => {
s.to_mut().push('\n');
s.to_mut().push_str(&val);
}
}
}
}
saved
}
pub fn insert_newline(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
insert(base, regions, &config.line_ending)
}
pub fn insert_tab(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let const_tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
if line_range.len() > 1 {
for line in line_range {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let iv = Interval::new(offset, offset);
builder.replace(iv, Rope::from(const_tab_text));
}
} else {
let (_, col) = LogicalLines.offset_to_line_col(base, region.start);
let mut tab_size = config.tab_size;
tab_size = tab_size - (col % tab_size);
let tab_text = get_tab_text(config, Some(tab_size));
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, Rope::from(tab_text));
}
}
builder.build()
}
/// Indents or outdents lines based on selection and user's tab settings.
/// Uses a BTreeSet to holds the collection of lines to modify.
/// Preserves cursor position and current selection as much as possible.
/// Tries to have behavior consistent with other editors like Atom,
/// Sublime and VSCode, with non-caret selections not being modified.
pub fn modify_indent(
base: &Rope,
regions: &[SelRegion],
config: &BufferItems,
direction: IndentDirection,
) -> RopeDelta {
let mut lines = BTreeSet::new();
let tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
for line in line_range {
lines.insert(line);
}
}
match direction {
IndentDirection::In => indent(base, lines, tab_text),
IndentDirection::Out => outdent(base, lines, tab_text),
}
}
fn indent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let interval = Interval::new(offset, offset);
builder.replace(interval, Rope::from(tab_text));
}
builder.build()
}
fn outdent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let tab_offset = LogicalLines.line_col_to_offset(base, line, tab_text.len());
let interval = Interval::new(offset, tab_offset);
let leading_slice = base.slice_to_cow(interval.start()..interval.end());
if leading_slice == tab_text {
builder.delete(interval);
} else if let Some(first_char_col) = leading_slice.find(|c: char| !c.is_whitespace()) {
let first_char_offset = LogicalLines.line_col_to_offset(base, line, first_char_col);
let interval = Interval::new(offset, first_char_offset);
builder.delete(interval);
}
}
builder.build()
}
pub fn transpose(base: &Rope, regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let mut last = 0;
let mut optional_previous_selection: Option<(Interval, Rope)> =
last_selection_region(regions).map(|®ion| sel_region_to_interval_and_rope(base, region));
for ®ion in regions {
if region.is_caret() {
let mut middle = region.end;
let mut start = base.prev_grapheme_offset(middle).unwrap_or(0);
let mut end = base.next_grapheme_offset(middle).unwrap_or(middle);
// Note: this matches Emac's behavior. It swaps last
// two characters of line if at end of line.
if start >= last {
let end_line_offset =
LogicalLines.offset_of_line(base, LogicalLines.line_of_offset(base, end));
// include end != base.len() because if the editor is entirely empty, we dont' want to pull from empty space
if (end == middle || end == end_line_offset) && end != base.len() {
middle = start;
start = base.prev
|
{
let saved = extract_sel_regions(base, &deletions).unwrap_or_default();
Some(Rope::from(saved))
}
|
conditional_block
|
edit_ops.rs
|
pe = after_text.into();
for region in regions {
let before_iv = Interval::new(region.min(), region.min());
builder.replace(before_iv, before_rope.clone());
let after_iv = Interval::new(region.max(), region.max());
builder.replace(after_iv, after_rope.clone());
}
builder.build()
}
pub fn duplicate_line(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
// get affected lines or regions
let mut to_duplicate = BTreeSet::new();
for region in regions {
let (first_line, _) = LogicalLines.offset_to_line_col(base, region.min());
let line_start = LogicalLines.offset_of_line(base, first_line);
let mut cursor = match region.is_caret() {
true => Cursor::new(base, line_start),
false => {
// duplicate all lines together that are part of the same selections
let (last_line, _) = LogicalLines.offset_to_line_col(base, region.max());
let line_end = LogicalLines.offset_of_line(base, last_line);
Cursor::new(base, line_end)
}
};
if let Some(line_end) = cursor.next::<LinesMetric>() {
to_duplicate.insert((line_start, line_end));
}
}
for (start, end) in to_duplicate {
// insert duplicates
let iv = Interval::new(start, start);
builder.replace(iv, base.slice(start..end));
// last line does not have new line character so it needs to be manually added
if end == base.len() {
builder.replace(iv, Rope::from(&config.line_ending))
}
}
builder.build()
}
/// Used when the user presses the backspace key. If no delta is returned, then nothing changes.
pub fn delete_backward(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
// TODO: this function is workable but probably overall code complexity
// could be improved by implementing a "backspace" movement instead.
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let start = offset_for_delete_backwards(region, base, config);
let iv = Interval::new(start, region.max());
if !iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Common logic for a number of delete methods. For each region in the
/// selection, if the selection is a caret, delete the region between
/// the caret and the movement applied to the caret, otherwise delete
/// the region.
///
/// If `save` is set, the tuple will contain a rope with the deleted text.
///
/// # Arguments
///
/// * `height` - viewport height
pub(crate) fn delete_by_movement(
base: &Rope,
regions: &[SelRegion],
lines: &Lines,
movement: Movement,
height: usize,
save: bool,
) -> (RopeDelta, Option<Rope>)
|
(delete_sel_regions(base, &deletions), kill_ring)
}
/// Deletes the given regions.
pub(crate) fn delete_sel_regions(base: &Rope, sel_regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in sel_regions {
let iv = Interval::new(region.min(), region.max());
if !iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Extracts non-caret selection regions into a string,
/// joining multiple regions with newlines.
pub(crate) fn extract_sel_regions<'a>(
base: &'a Rope,
sel_regions: &[SelRegion],
) -> Option<Cow<'a, str>> {
let mut saved = None;
for region in sel_regions {
if !region.is_caret() {
let val = base.slice_to_cow(region);
match saved {
None => saved = Some(val),
Some(ref mut s) => {
s.to_mut().push('\n');
s.to_mut().push_str(&val);
}
}
}
}
saved
}
pub fn insert_newline(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
insert(base, regions, &config.line_ending)
}
pub fn insert_tab(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let const_tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
if line_range.len() > 1 {
for line in line_range {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let iv = Interval::new(offset, offset);
builder.replace(iv, Rope::from(const_tab_text));
}
} else {
let (_, col) = LogicalLines.offset_to_line_col(base, region.start);
let mut tab_size = config.tab_size;
tab_size = tab_size - (col % tab_size);
let tab_text = get_tab_text(config, Some(tab_size));
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, Rope::from(tab_text));
}
}
builder.build()
}
/// Indents or outdents lines based on selection and user's tab settings.
/// Uses a BTreeSet to holds the collection of lines to modify.
/// Preserves cursor position and current selection as much as possible.
/// Tries to have behavior consistent with other editors like Atom,
/// Sublime and VSCode, with non-caret selections not being modified.
pub fn modify_indent(
base: &Rope,
regions: &[SelRegion],
config: &BufferItems,
direction: IndentDirection,
) -> RopeDelta {
let mut lines = BTreeSet::new();
let tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
for line in line_range {
lines.insert(line);
}
}
match direction {
IndentDirection::In => indent(base, lines, tab_text),
IndentDirection::Out => outdent(base, lines, tab_text),
}
}
fn indent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let interval = Interval::new(offset, offset);
builder.replace(interval, Rope::from(tab_text));
}
builder.build()
}
fn outdent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let tab_offset = LogicalLines.line_col_to_offset(base, line, tab_text.len());
let interval = Interval::new(offset, tab_offset);
let leading_slice = base.slice_to_cow(interval.start()..interval.end());
if leading_slice == tab_text {
builder.delete(interval);
} else if let Some(first_char_col) = leading_slice.find(|c: char| !c.is_whitespace()) {
let first_char_offset = LogicalLines.line_col_to_offset(base, line, first_char_col);
let interval = Interval::new(offset, first_char_offset);
builder.delete(interval);
}
}
builder.build()
}
pub fn transpose(base: &Rope, regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let mut last = 0;
let mut optional_previous_selection: Option<(Interval, Rope)> =
last_selection_region(regions).map(|®ion| sel_region_to_interval_and_rope(base, region));
for ®ion in regions {
if region.is_caret() {
let mut middle = region.end;
let mut start = base.prev_grapheme_offset(middle).unwrap_or(0);
let mut end = base.next_grapheme_offset(middle).unwrap_or(middle);
// Note: this matches Emac's behavior. It swaps last
// two characters of line if at end of line.
if start >= last {
let end_line_offset =
LogicalLines.offset_of_line(base, LogicalLines.line_of_offset(base, end));
// include end != base.len() because if the editor is entirely empty, we dont' want to pull from empty space
if (end == middle || end == end_line_offset) && end != base.len() {
middle = start;
start = base
|
{
// We compute deletions as a selection because the merge logic
// is convenient. Another possibility would be to make the delta
// builder able to handle overlapping deletions (with union semantics).
let mut deletions = Selection::new();
for &r in regions {
if r.is_caret() {
let new_region = region_movement(movement, r, lines, height, base, true);
deletions.add_region(new_region);
} else {
deletions.add_region(r);
}
}
let kill_ring = if save {
let saved = extract_sel_regions(base, &deletions).unwrap_or_default();
Some(Rope::from(saved))
} else {
None
};
|
identifier_body
|
edit_ops.rs
|
pe = after_text.into();
for region in regions {
let before_iv = Interval::new(region.min(), region.min());
builder.replace(before_iv, before_rope.clone());
let after_iv = Interval::new(region.max(), region.max());
builder.replace(after_iv, after_rope.clone());
}
builder.build()
}
pub fn duplicate_line(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
// get affected lines or regions
let mut to_duplicate = BTreeSet::new();
for region in regions {
let (first_line, _) = LogicalLines.offset_to_line_col(base, region.min());
let line_start = LogicalLines.offset_of_line(base, first_line);
let mut cursor = match region.is_caret() {
true => Cursor::new(base, line_start),
false => {
// duplicate all lines together that are part of the same selections
let (last_line, _) = LogicalLines.offset_to_line_col(base, region.max());
let line_end = LogicalLines.offset_of_line(base, last_line);
Cursor::new(base, line_end)
}
};
if let Some(line_end) = cursor.next::<LinesMetric>() {
to_duplicate.insert((line_start, line_end));
}
}
for (start, end) in to_duplicate {
// insert duplicates
let iv = Interval::new(start, start);
builder.replace(iv, base.slice(start..end));
// last line does not have new line character so it needs to be manually added
if end == base.len() {
builder.replace(iv, Rope::from(&config.line_ending))
}
}
builder.build()
}
/// Used when the user presses the backspace key. If no delta is returned, then nothing changes.
pub fn delete_backward(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
// TODO: this function is workable but probably overall code complexity
// could be improved by implementing a "backspace" movement instead.
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let start = offset_for_delete_backwards(region, base, config);
let iv = Interval::new(start, region.max());
if !iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Common logic for a number of delete methods. For each region in the
/// selection, if the selection is a caret, delete the region between
/// the caret and the movement applied to the caret, otherwise delete
/// the region.
///
/// If `save` is set, the tuple will contain a rope with the deleted text.
///
/// # Arguments
///
/// * `height` - viewport height
pub(crate) fn delete_by_movement(
base: &Rope,
regions: &[SelRegion],
lines: &Lines,
movement: Movement,
height: usize,
save: bool,
) -> (RopeDelta, Option<Rope>) {
// We compute deletions as a selection because the merge logic
// is convenient. Another possibility would be to make the delta
// builder able to handle overlapping deletions (with union semantics).
let mut deletions = Selection::new();
for &r in regions {
if r.is_caret() {
let new_region = region_movement(movement, r, lines, height, base, true);
deletions.add_region(new_region);
} else {
deletions.add_region(r);
}
}
let kill_ring = if save {
let saved = extract_sel_regions(base, &deletions).unwrap_or_default();
Some(Rope::from(saved))
} else {
None
};
(delete_sel_regions(base, &deletions), kill_ring)
}
/// Deletes the given regions.
pub(crate) fn delete_sel_regions(base: &Rope, sel_regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in sel_regions {
let iv = Interval::new(region.min(), region.max());
if !iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Extracts non-caret selection regions into a string,
/// joining multiple regions with newlines.
pub(crate) fn extract_sel_regions<'a>(
base: &'a Rope,
sel_regions: &[SelRegion],
) -> Option<Cow<'a, str>> {
let mut saved = None;
for region in sel_regions {
if !region.is_caret() {
let val = base.slice_to_cow(region);
match saved {
None => saved = Some(val),
Some(ref mut s) => {
s.to_mut().push('\n');
s.to_mut().push_str(&val);
}
}
}
}
saved
}
pub fn insert_newline(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
insert(base, regions, &config.line_ending)
}
pub fn insert_tab(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let const_tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
if line_range.len() > 1 {
for line in line_range {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let iv = Interval::new(offset, offset);
builder.replace(iv, Rope::from(const_tab_text));
}
} else {
let (_, col) = LogicalLines.offset_to_line_col(base, region.start);
let mut tab_size = config.tab_size;
tab_size = tab_size - (col % tab_size);
let tab_text = get_tab_text(config, Some(tab_size));
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, Rope::from(tab_text));
}
}
builder.build()
}
/// Indents or outdents lines based on selection and user's tab settings.
/// Uses a BTreeSet to holds the collection of lines to modify.
/// Preserves cursor position and current selection as much as possible.
/// Tries to have behavior consistent with other editors like Atom,
/// Sublime and VSCode, with non-caret selections not being modified.
pub fn modify_indent(
base: &Rope,
regions: &[SelRegion],
config: &BufferItems,
direction: IndentDirection,
) -> RopeDelta {
let mut lines = BTreeSet::new();
let tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
for line in line_range {
lines.insert(line);
}
}
match direction {
IndentDirection::In => indent(base, lines, tab_text),
IndentDirection::Out => outdent(base, lines, tab_text),
}
}
fn indent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let interval = Interval::new(offset, offset);
builder.replace(interval, Rope::from(tab_text));
}
builder.build()
}
|
fn outdent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let tab_offset = LogicalLines.line_col_to_offset(base, line, tab_text.len());
let interval = Interval::new(offset, tab_offset);
let leading_slice = base.slice_to_cow(interval.start()..interval.end());
if leading_slice == tab_text {
builder.delete(interval);
} else if let Some(first_char_col) = leading_slice.find(|c: char| !c.is_whitespace()) {
let first_char_offset = LogicalLines.line_col_to_offset(base, line, first_char_col);
let interval = Interval::new(offset, first_char_offset);
builder.delete(interval);
}
}
builder.build()
}
pub fn transpose(base: &Rope, regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let mut last = 0;
let mut optional_previous_selection: Option<(Interval, Rope)> =
last_selection_region(regions).map(|®ion| sel_region_to_interval_and_rope(base, region));
for ®ion in regions {
if region.is_caret() {
let mut middle = region.end;
let mut start = base.prev_grapheme_offset(middle).unwrap_or(0);
let mut end = base.next_grapheme_offset(middle).unwrap_or(middle);
// Note: this matches Emac's behavior. It swaps last
// two characters of line if at end of line.
if start >= last {
let end_line_offset =
LogicalLines.offset_of_line(base, LogicalLines.line_of_offset(base, end));
// include end != base.len() because if the editor is entirely empty, we dont' want to pull from empty space
if (end == middle || end == end_line_offset) && end != base.len() {
middle = start;
start = base.prev
|
random_line_split
|
|
utils.py
|
download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
label_colours = [(0, 0, 0),
# 0=background
(148, 65, 137), (255, 116, 69), (86, 156, 137),
(202, 179, 158), (155, 99, 235), (161, 107, 108),
(133, 160, 103), (76, 152, 126), (84, 62, 35),
(44, 80, 130), (31, 184, 157), (101, 144, 77),
(23, 197, 62), (141, 168, 145), (142, 151, 136),
(115, 201, 77), (100, 216, 255), (57, 156, 36),
(88, 108, 129), (105, 129, 112), (42, 137, 126),
(155, 108, 249), (166, 148, 143), (81, 91, 87),
(100, 124, 51), (73, 131, 121), (157, 210, 220),
(134, 181, 60), (221, 223, 147), (123, 108, 131),
(161, 66, 179), (163, 221, 160), (31, 146, 98),
(99, 121, 30), (49, 89, 240), (116, 108, 9),
(161, 176, 169), (80, 29, 135), (177, 105, 197),
(139, 110, 246)]
'''
def download_trained_weights(model_name, model_path, verbose=1):
"""
Download trained weights from previous training on depth images or rgbd images.
"""
# depth_model_path: local path of depth trained weights
if verbose > 0:
print("Downloading pretrained model to " + model_path + " ...")
if model_name == 'depth':
with contextlib.closing(request.urlopen(DEPTH_TRAINED_MODEL)) as resp, open(model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
elif model_name == 'rgbd':
with contextlib.closing(request.urlopen(REDNET_PRETRAINED_MODEL)) as resp, open(model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
if verbose > 0:
print("... done downloading pretrained model!")
'''
'''
#An example of using depth2plane:
fittingSize = 2
import imageio
from utils.utils import depth2plane
depth = imageio.imread('./data/SUNRGBD/kv1/NYUdata/NYU0002/depth_bfx/NYU0002.png')
plane = depth2plane(depth, extrinsic, intrinsic, fittingSize)
planeImage = plane.getPlaneImage()
plane.visualizePlaneImage(planeImage)
'''
class depth2plane:
def __init__(self, depth, extrinsic, intrinsic, fittingSize=5):
self.depthImage = depth
self.extrinsic = extrinsic
self.intrinsic = intrinsic
self.fittingSize = fittingSize
def getPlaneImage(self):
planeImage = self.estimate_planes()
return planeImage
def matrix_from_txt(self, file):
f = open(file)
l = []
for line in f.readlines():
line = line.strip('\n')
for j in range(len(list(line.split()))):
l.append(line.split()[j])
matrix = np.array(l, dtype=np.float32)
return matrix
def getCameraInfo(self):
K = self.intrinsic
ifocal_length_x = 1.0/K[0]
ifocal_length_y = 1.0/K[4]
center_x = K[2]
center_y = K[5]
camera_pose = self.extrinsic
camera_pose = camera_pose.reshape(3, 4)
Rtilt = camera_pose[0:3, 0:3]
#A = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0], dtype=np.float32)
#A = A.reshape(3, 3)
#B = np.array([1, 0, 0, 0, 0, -1, 0, 1, 0], dtype=np.float32)
#B = B.reshape(3, 3)
#Rtilt = A*Rtilt*B
#Rtilt = np.matmul(np.matmul(A, Rtilt), B)
return ifocal_length_x, ifocal_length_y, center_x, center_y, Rtilt
def bitShiftDepthMap(self, depthImage):
|
def depthImage2ptcloud(self, depth_img):
[ifocal_length_x, ifocal_length_y, center_x, center_y, Rtilt] = self.getCameraInfo()
ptCloud = np.zeros(shape=(int(depth_img.shape[0]), int(depth_img.shape[1]), 3), dtype=np.float32)
for y in xrange(0, depth_img.shape[0]):
for x in xrange(0, depth_img.shape[1]):
depth = depth_img[y, x]
if (~np.isnan(depth)):
#print(depth)
ptCloud[y, x, 0] = ifocal_length_x * (x - center_x) * depth
ptCloud[y, x, 1] = ifocal_length_y * (y - center_y) * depth
ptCloud[y, x, 2] = depth
points3d = ptCloud.reshape(-1, 3) # [height*width, 3]
ptCloud = (np.matmul(Rtilt, points3d.T)).T
ptCloud.astype(np.float32)
ptCloud = ptCloud.reshape(depth_img.shape[0], depth_img.shape[1], 3) # [height, width, 3]
return ptCloud
def estimate_planes(self):
winsize = self.fittingSize
depthImage = self.depthImage
depthImage = self.smoothing(depthImage, filtersize=3)
#depth_img = self.bitShiftDepthMap(depthImage) # no need to shift in real scenerio
depth_img = depthImage
ptCloud = self.depthImage2ptcloud(depth_img)
#ptCloud = self.smoothing(ptCloud, filtersize=5) # smoothing the inpainted depth image will cause inconvergence in fitting
planes_img = np.zeros(shape=(int(depth_img.shape[0] / winsize), int(depth_img.shape[1] / winsize), 4), dtype=np.float32)
for y in range(0, depth_img.shape[0]-winsize, winsize):
for x in range(0, depth_img.shape[1]-winsize, winsize):
windowDepths = depth_img[y:(y + winsize + 1), x:(x + winsize + 1)]
# print(windowDepths)
numValidPoints = np.count_nonzero(~np.isnan(windowDepths))
# print(numValidPoints)
if (numValidPoints < 3):
plane3 = np.array([0, 0, 0, 0])
else:
pts3D = np.empty(shape=(numValidPoints, 3), dtype=np.float32)
offset = 0
# print(pts3D)
for ywin in range(0, winsize + 1):
for xwin in range(0, winsize + 1):
if (~np
|
depthVisData = np.asarray(depthImage, np.uint16)
depthInpaint = np.bitwise_or(np.right_shift(depthVisData, 3), np.left_shift(depthVisData, 16 - 3))
depthInpaint = depthInpaint.astype(np.single) / 1000
depthInpaint[depthInpaint > 8] = 8
return depthInpaint
|
identifier_body
|
utils.py
|
, 65, 137), (255, 116, 69), (86, 156, 137),
(202, 179, 158), (155, 99, 235), (161, 107, 108),
(133, 160, 103), (76, 152, 126), (84, 62, 35),
(44, 80, 130), (31, 184, 157), (101, 144, 77),
(23, 197, 62), (141, 168, 145), (142, 151, 136),
(115, 201, 77), (100, 216, 255), (57, 156, 36),
(88, 108, 129), (105, 129, 112), (42, 137, 126),
(155, 108, 249), (166, 148, 143), (81, 91, 87),
(100, 124, 51), (73, 131, 121), (157, 210, 220),
(134, 181, 60), (221, 223, 147), (123, 108, 131),
(161, 66, 179), (163, 221, 160), (31, 146, 98),
(99, 121, 30), (49, 89, 240), (116, 108, 9),
(161, 176, 169), (80, 29, 135), (177, 105, 197),
(139, 110, 246)]
'''
def download_trained_weights(model_name, model_path, verbose=1):
"""
Download trained weights from previous training on depth images or rgbd images.
"""
# depth_model_path: local path of depth trained weights
if verbose > 0:
print("Downloading pretrained model to " + model_path + " ...")
if model_name == 'depth':
with contextlib.closing(request.urlopen(DEPTH_TRAINED_MODEL)) as resp, open(model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
elif model_name == 'rgbd':
with contextlib.closing(request.urlopen(REDNET_PRETRAINED_MODEL)) as resp, open(model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
if verbose > 0:
print("... done downloading pretrained model!")
'''
'''
#An example of using depth2plane:
fittingSize = 2
import imageio
from utils.utils import depth2plane
depth = imageio.imread('./data/SUNRGBD/kv1/NYUdata/NYU0002/depth_bfx/NYU0002.png')
plane = depth2plane(depth, extrinsic, intrinsic, fittingSize)
planeImage = plane.getPlaneImage()
plane.visualizePlaneImage(planeImage)
'''
class depth2plane:
def __init__(self, depth, extrinsic, intrinsic, fittingSize=5):
self.depthImage = depth
self.extrinsic = extrinsic
self.intrinsic = intrinsic
self.fittingSize = fittingSize
def getPlaneImage(self):
planeImage = self.estimate_planes()
return planeImage
def matrix_from_txt(self, file):
f = open(file)
l = []
for line in f.readlines():
line = line.strip('\n')
for j in range(len(list(line.split()))):
l.append(line.split()[j])
matrix = np.array(l, dtype=np.float32)
return matrix
def getCameraInfo(self):
K = self.intrinsic
ifocal_length_x = 1.0/K[0]
ifocal_length_y = 1.0/K[4]
center_x = K[2]
center_y = K[5]
camera_pose = self.extrinsic
camera_pose = camera_pose.reshape(3, 4)
Rtilt = camera_pose[0:3, 0:3]
#A = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0], dtype=np.float32)
#A = A.reshape(3, 3)
#B = np.array([1, 0, 0, 0, 0, -1, 0, 1, 0], dtype=np.float32)
#B = B.reshape(3, 3)
#Rtilt = A*Rtilt*B
#Rtilt = np.matmul(np.matmul(A, Rtilt), B)
return ifocal_length_x, ifocal_length_y, center_x, center_y, Rtilt
def bitShiftDepthMap(self, depthImage):
depthVisData = np.asarray(depthImage, np.uint16)
depthInpaint = np.bitwise_or(np.right_shift(depthVisData, 3), np.left_shift(depthVisData, 16 - 3))
depthInpaint = depthInpaint.astype(np.single) / 1000
depthInpaint[depthInpaint > 8] = 8
return depthInpaint
def depthImage2ptcloud(self, depth_img):
[ifocal_length_x, ifocal_length_y, center_x, center_y, Rtilt] = self.getCameraInfo()
ptCloud = np.zeros(shape=(int(depth_img.shape[0]), int(depth_img.shape[1]), 3), dtype=np.float32)
for y in xrange(0, depth_img.shape[0]):
for x in xrange(0, depth_img.shape[1]):
depth = depth_img[y, x]
if (~np.isnan(depth)):
#print(depth)
ptCloud[y, x, 0] = ifocal_length_x * (x - center_x) * depth
ptCloud[y, x, 1] = ifocal_length_y * (y - center_y) * depth
ptCloud[y, x, 2] = depth
points3d = ptCloud.reshape(-1, 3) # [height*width, 3]
ptCloud = (np.matmul(Rtilt, points3d.T)).T
ptCloud.astype(np.float32)
ptCloud = ptCloud.reshape(depth_img.shape[0], depth_img.shape[1], 3) # [height, width, 3]
return ptCloud
def estimate_planes(self):
winsize = self.fittingSize
depthImage = self.depthImage
depthImage = self.smoothing(depthImage, filtersize=3)
#depth_img = self.bitShiftDepthMap(depthImage) # no need to shift in real scenerio
depth_img = depthImage
ptCloud = self.depthImage2ptcloud(depth_img)
#ptCloud = self.smoothing(ptCloud, filtersize=5) # smoothing the inpainted depth image will cause inconvergence in fitting
planes_img = np.zeros(shape=(int(depth_img.shape[0] / winsize), int(depth_img.shape[1] / winsize), 4), dtype=np.float32)
for y in range(0, depth_img.shape[0]-winsize, winsize):
|
for x in range(0, depth_img.shape[1]-winsize, winsize):
windowDepths = depth_img[y:(y + winsize + 1), x:(x + winsize + 1)]
# print(windowDepths)
numValidPoints = np.count_nonzero(~np.isnan(windowDepths))
# print(numValidPoints)
if (numValidPoints < 3):
plane3 = np.array([0, 0, 0, 0])
else:
pts3D = np.empty(shape=(numValidPoints, 3), dtype=np.float32)
offset = 0
# print(pts3D)
for ywin in range(0, winsize + 1):
for xwin in range(0, winsize + 1):
if (~np.isnan(ptCloud[y + ywin, x + xwin, 2])):
pts3D[offset, :] = ptCloud[y + ywin, x + xwin, :]
offset += 1
plane3 = self.fitPlaneImplicitLeastSquares(pts3D)
planes_img[int(y/winsize), int(x/winsize), :] = plane3
|
conditional_block
|
|
utils.py
|
Download trained weights from previous training on depth images or rgbd images.
"""
# depth_model_path: local path of depth trained weights
if verbose > 0:
print("Downloading pretrained model to " + model_path + " ...")
if model_name == 'depth':
with contextlib.closing(request.urlopen(DEPTH_TRAINED_MODEL)) as resp, open(model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
elif model_name == 'rgbd':
with contextlib.closing(request.urlopen(REDNET_PRETRAINED_MODEL)) as resp, open(model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
if verbose > 0:
print("... done downloading pretrained model!")
'''
'''
#An example of using depth2plane:
fittingSize = 2
import imageio
from utils.utils import depth2plane
depth = imageio.imread('./data/SUNRGBD/kv1/NYUdata/NYU0002/depth_bfx/NYU0002.png')
plane = depth2plane(depth, extrinsic, intrinsic, fittingSize)
planeImage = plane.getPlaneImage()
plane.visualizePlaneImage(planeImage)
'''
class depth2plane:
def __init__(self, depth, extrinsic, intrinsic, fittingSize=5):
self.depthImage = depth
self.extrinsic = extrinsic
self.intrinsic = intrinsic
self.fittingSize = fittingSize
def getPlaneImage(self):
planeImage = self.estimate_planes()
return planeImage
def matrix_from_txt(self, file):
f = open(file)
l = []
for line in f.readlines():
line = line.strip('\n')
for j in range(len(list(line.split()))):
l.append(line.split()[j])
matrix = np.array(l, dtype=np.float32)
return matrix
def getCameraInfo(self):
K = self.intrinsic
ifocal_length_x = 1.0/K[0]
ifocal_length_y = 1.0/K[4]
center_x = K[2]
center_y = K[5]
camera_pose = self.extrinsic
camera_pose = camera_pose.reshape(3, 4)
Rtilt = camera_pose[0:3, 0:3]
#A = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0], dtype=np.float32)
#A = A.reshape(3, 3)
#B = np.array([1, 0, 0, 0, 0, -1, 0, 1, 0], dtype=np.float32)
#B = B.reshape(3, 3)
#Rtilt = A*Rtilt*B
#Rtilt = np.matmul(np.matmul(A, Rtilt), B)
return ifocal_length_x, ifocal_length_y, center_x, center_y, Rtilt
def bitShiftDepthMap(self, depthImage):
depthVisData = np.asarray(depthImage, np.uint16)
depthInpaint = np.bitwise_or(np.right_shift(depthVisData, 3), np.left_shift(depthVisData, 16 - 3))
depthInpaint = depthInpaint.astype(np.single) / 1000
depthInpaint[depthInpaint > 8] = 8
return depthInpaint
def depthImage2ptcloud(self, depth_img):
[ifocal_length_x, ifocal_length_y, center_x, center_y, Rtilt] = self.getCameraInfo()
ptCloud = np.zeros(shape=(int(depth_img.shape[0]), int(depth_img.shape[1]), 3), dtype=np.float32)
for y in xrange(0, depth_img.shape[0]):
for x in xrange(0, depth_img.shape[1]):
depth = depth_img[y, x]
if (~np.isnan(depth)):
#print(depth)
ptCloud[y, x, 0] = ifocal_length_x * (x - center_x) * depth
ptCloud[y, x, 1] = ifocal_length_y * (y - center_y) * depth
ptCloud[y, x, 2] = depth
points3d = ptCloud.reshape(-1, 3) # [height*width, 3]
ptCloud = (np.matmul(Rtilt, points3d.T)).T
ptCloud.astype(np.float32)
ptCloud = ptCloud.reshape(depth_img.shape[0], depth_img.shape[1], 3) # [height, width, 3]
return ptCloud
def estimate_planes(self):
winsize = self.fittingSize
depthImage = self.depthImage
depthImage = self.smoothing(depthImage, filtersize=3)
#depth_img = self.bitShiftDepthMap(depthImage) # no need to shift in real scenerio
depth_img = depthImage
ptCloud = self.depthImage2ptcloud(depth_img)
#ptCloud = self.smoothing(ptCloud, filtersize=5) # smoothing the inpainted depth image will cause inconvergence in fitting
planes_img = np.zeros(shape=(int(depth_img.shape[0] / winsize), int(depth_img.shape[1] / winsize), 4), dtype=np.float32)
for y in range(0, depth_img.shape[0]-winsize, winsize):
for x in range(0, depth_img.shape[1]-winsize, winsize):
windowDepths = depth_img[y:(y + winsize + 1), x:(x + winsize + 1)]
# print(windowDepths)
numValidPoints = np.count_nonzero(~np.isnan(windowDepths))
# print(numValidPoints)
if (numValidPoints < 3):
plane3 = np.array([0, 0, 0, 0])
else:
pts3D = np.empty(shape=(numValidPoints, 3), dtype=np.float32)
offset = 0
# print(pts3D)
for ywin in range(0, winsize + 1):
for xwin in range(0, winsize + 1):
if (~np.isnan(ptCloud[y + ywin, x + xwin, 2])):
pts3D[offset, :] = ptCloud[y + ywin, x + xwin, :]
offset += 1
plane3 = self.fitPlaneImplicitLeastSquares(pts3D)
planes_img[int(y/winsize), int(x/winsize), :] = plane3
return planes_img
def fitPlaneImplicitLeastSquares(self, points):
from numpy import linalg as LA
plane3 = np.empty(shape=(4), dtype=np.float32)
centroid = np.mean(points, 0)
demeaned_pts3D = points - centroid
_MtM = np.matmul(demeaned_pts3D.transpose(), demeaned_pts3D)
[_, v] = LA.linalg.eigh(_MtM)
plane3[0:3] = v[:, 0]
plane3[3] = -np.dot(plane3[0:3], centroid[:])
if (abs(plane3[3]) > abs(plane3[2])): # use d as criterion
if (plane3[3] < 0):
plane3 = -plane3
elif (plane3[2] > 0): # use c as criterion
plane3 = -plane3
return plane3
def smoothing(self, img, filtersize):
img = cv2.GaussianBlur(img, (filtersize, filtersize), cv2.BORDER_CONSTANT)
return img
def visualizePlaneImage(self, plane_img):
#imageio.imwrite('0002.tiff', plane_img)
#cv2.namedWindow("planeImage", cv2.WINDOW_NORMAL)
vis_plane_img_c = np.zeros(shape=(plane_img.shape[0], plane_img.shape[1], 3), dtype=np.uint8)
for y in range(0, plane_img.shape[0]):
for x in range(0, plane_img.shape[1]):
vis_plane_img_c[y, x, :] = plane_img[y, x, 2] * 255
cv2.imshow("c channel", vis_plane_img_c)
cv2.imshow("abs_planeImage", abs(plane_img[:, :, 0:3]))
cv2.imshow("ori_planeImage", plane_img[:, :, 0:3])
cv2.waitKey()
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=med_frq):
super(CrossEntropyLoss2d, self).__init__()
self.ce_loss = nn.CrossEntropyLoss(torch.from_numpy(np.array(weight)).float(),
reduction='none')
|
def forward(self, inputs_scales, targets_scales):
losses = []
for inputs, targets in zip(inputs_scales, targets_scales):
mask = targets > 1e-3 # should be larger than 0 but tensor seems to treat 0 as a very small number like 2e-9 then
targets_m = targets.clone() # when we subtract 1 from 2e-9 it gives us -1 which causes an IndexError: target -1 out of bounds
|
random_line_split
|
|
utils.py
|
79), (163, 221, 160), (31, 146, 98),
(99, 121, 30), (49, 89, 240), (116, 108, 9),
(161, 176, 169), (80, 29, 135), (177, 105, 197),
(139, 110, 246)]
'''
def download_trained_weights(model_name, model_path, verbose=1):
"""
Download trained weights from previous training on depth images or rgbd images.
"""
# depth_model_path: local path of depth trained weights
if verbose > 0:
print("Downloading pretrained model to " + model_path + " ...")
if model_name == 'depth':
with contextlib.closing(request.urlopen(DEPTH_TRAINED_MODEL)) as resp, open(model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
elif model_name == 'rgbd':
with contextlib.closing(request.urlopen(REDNET_PRETRAINED_MODEL)) as resp, open(model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
if verbose > 0:
print("... done downloading pretrained model!")
'''
'''
#An example of using depth2plane:
fittingSize = 2
import imageio
from utils.utils import depth2plane
depth = imageio.imread('./data/SUNRGBD/kv1/NYUdata/NYU0002/depth_bfx/NYU0002.png')
plane = depth2plane(depth, extrinsic, intrinsic, fittingSize)
planeImage = plane.getPlaneImage()
plane.visualizePlaneImage(planeImage)
'''
class depth2plane:
def __init__(self, depth, extrinsic, intrinsic, fittingSize=5):
self.depthImage = depth
self.extrinsic = extrinsic
self.intrinsic = intrinsic
self.fittingSize = fittingSize
def getPlaneImage(self):
planeImage = self.estimate_planes()
return planeImage
def matrix_from_txt(self, file):
f = open(file)
l = []
for line in f.readlines():
line = line.strip('\n')
for j in range(len(list(line.split()))):
l.append(line.split()[j])
matrix = np.array(l, dtype=np.float32)
return matrix
def getCameraInfo(self):
K = self.intrinsic
ifocal_length_x = 1.0/K[0]
ifocal_length_y = 1.0/K[4]
center_x = K[2]
center_y = K[5]
camera_pose = self.extrinsic
camera_pose = camera_pose.reshape(3, 4)
Rtilt = camera_pose[0:3, 0:3]
#A = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0], dtype=np.float32)
#A = A.reshape(3, 3)
#B = np.array([1, 0, 0, 0, 0, -1, 0, 1, 0], dtype=np.float32)
#B = B.reshape(3, 3)
#Rtilt = A*Rtilt*B
#Rtilt = np.matmul(np.matmul(A, Rtilt), B)
return ifocal_length_x, ifocal_length_y, center_x, center_y, Rtilt
def bitShiftDepthMap(self, depthImage):
depthVisData = np.asarray(depthImage, np.uint16)
depthInpaint = np.bitwise_or(np.right_shift(depthVisData, 3), np.left_shift(depthVisData, 16 - 3))
depthInpaint = depthInpaint.astype(np.single) / 1000
depthInpaint[depthInpaint > 8] = 8
return depthInpaint
def depthImage2ptcloud(self, depth_img):
[ifocal_length_x, ifocal_length_y, center_x, center_y, Rtilt] = self.getCameraInfo()
ptCloud = np.zeros(shape=(int(depth_img.shape[0]), int(depth_img.shape[1]), 3), dtype=np.float32)
for y in xrange(0, depth_img.shape[0]):
for x in xrange(0, depth_img.shape[1]):
depth = depth_img[y, x]
if (~np.isnan(depth)):
#print(depth)
ptCloud[y, x, 0] = ifocal_length_x * (x - center_x) * depth
ptCloud[y, x, 1] = ifocal_length_y * (y - center_y) * depth
ptCloud[y, x, 2] = depth
points3d = ptCloud.reshape(-1, 3) # [height*width, 3]
ptCloud = (np.matmul(Rtilt, points3d.T)).T
ptCloud.astype(np.float32)
ptCloud = ptCloud.reshape(depth_img.shape[0], depth_img.shape[1], 3) # [height, width, 3]
return ptCloud
def estimate_planes(self):
winsize = self.fittingSize
depthImage = self.depthImage
depthImage = self.smoothing(depthImage, filtersize=3)
#depth_img = self.bitShiftDepthMap(depthImage) # no need to shift in real scenerio
depth_img = depthImage
ptCloud = self.depthImage2ptcloud(depth_img)
#ptCloud = self.smoothing(ptCloud, filtersize=5) # smoothing the inpainted depth image will cause inconvergence in fitting
planes_img = np.zeros(shape=(int(depth_img.shape[0] / winsize), int(depth_img.shape[1] / winsize), 4), dtype=np.float32)
for y in range(0, depth_img.shape[0]-winsize, winsize):
for x in range(0, depth_img.shape[1]-winsize, winsize):
windowDepths = depth_img[y:(y + winsize + 1), x:(x + winsize + 1)]
# print(windowDepths)
numValidPoints = np.count_nonzero(~np.isnan(windowDepths))
# print(numValidPoints)
if (numValidPoints < 3):
plane3 = np.array([0, 0, 0, 0])
else:
pts3D = np.empty(shape=(numValidPoints, 3), dtype=np.float32)
offset = 0
# print(pts3D)
for ywin in range(0, winsize + 1):
for xwin in range(0, winsize + 1):
if (~np.isnan(ptCloud[y + ywin, x + xwin, 2])):
pts3D[offset, :] = ptCloud[y + ywin, x + xwin, :]
offset += 1
plane3 = self.fitPlaneImplicitLeastSquares(pts3D)
planes_img[int(y/winsize), int(x/winsize), :] = plane3
return planes_img
def fitPlaneImplicitLeastSquares(self, points):
from numpy import linalg as LA
plane3 = np.empty(shape=(4), dtype=np.float32)
centroid = np.mean(points, 0)
demeaned_pts3D = points - centroid
_MtM = np.matmul(demeaned_pts3D.transpose(), demeaned_pts3D)
[_, v] = LA.linalg.eigh(_MtM)
plane3[0:3] = v[:, 0]
plane3[3] = -np.dot(plane3[0:3], centroid[:])
if (abs(plane3[3]) > abs(plane3[2])): # use d as criterion
if (plane3[3] < 0):
plane3 = -plane3
elif (plane3[2] > 0): # use c as criterion
plane3 = -plane3
return plane3
def smoothing(self, img, filtersize):
img = cv2.GaussianBlur(img, (filtersize, filtersize), cv2.BORDER_CONSTANT)
return img
def visualizePlaneImage(self, plane_img):
#imageio.imwrite('0002.tiff', plane_img)
#cv2.namedWindow("planeImage", cv2.WINDOW_NORMAL)
vis_plane_img_c = np.zeros(shape=(plane_img.shape[0], plane_img.shape[1], 3), dtype=np.uint8)
for y in range(0, plane_img.shape[0]):
for x in range(0, plane_img.shape[1]):
vis_plane_img_c[y, x, :] = plane_img[y, x, 2] * 255
cv2.imshow("c channel", vis_plane_img_c)
cv2.imshow("abs_planeImage", abs(plane_img[:, :, 0:3]))
cv2.imshow("ori_planeImage", plane_img[:, :, 0:3])
cv2.waitKey()
class CrossEntropyLoss2d(nn.Module):
def
|
__init__
|
identifier_name
|
|
app.component.ts
|
// console.log(p);
// });
// But we can get the URL
// If routeGuard hinders navigation, the event will contain redirect route, and not the hindered route! If no redirect route, event will not fire.
this.router.events
.filter(event => event instanceof NavigationEnd)
.subscribe(event => {
// console.log(event);
});
this.userSubscription = this.authService.user$.subscribe(user => this.user = user);
console.log(this.fooPipe.transform('pipe'));
}
ngOnDestroy() {
this.userSubscription.unsubscribe();
}
logOut() {
this.authService.logout();
}
displayMessages() {
this.router.navigate([{ outlets: { popup: ['messages'] } }]);
this.messageService.isDisplayed = true;
}
hideMessages() {
this.router.navigate([{ outlets: { popup: null } }]);
this.messageService.isDisplayed = false;
}
}
/* tslint:disable */
/*
When injecting authService public and use it in template like this: <li *ngIf="!authService.isLoggedIn()">
The isLoggedIn() function is called whenever something "happens" like for instance user types something or navigates
The function is called 4 times
Lets instead try to use an event emitter
*/
/*
# PROVIDING DATA WITH A ROUTE
- Route parameters like :id
- See product-routing.module, product-edit.component and product-detail.component
- Optional route parameters, as seen below
- Query parameters
- As seen in product-list.component and product-detail.component (see also comments in templates) to retain settings for filter and showing images
- Route:s data property
- Can not change, use for static data
- See product-routing.module and product-list.component for retrieving it
- Route Resolver
- See product-routing.module, product-detail.component, product-edit.component, product-edit-info.component and product-edit-tags.component
- Remember that the resolved data is one instance, shared between routable components that fetches the data
- Service that is injected in different components, holding some state
# OPTONAL ROUTES PARAMETERS
They must come last
from template
[routerLink]="['foo', bar.id, { key: value }]"
from code
this.router.navigate(['foo', bar.id, { key: value }]);
const id = +this.activatedRoute.snapshot.params['id'];
const key = this.activatedRoute.snapshot.params['key'];
use paramMap instead: https://stackoverflow.com/questions/47809357/angular-4-5-route-parammap-vs-params , https://angular.io/api/router/ParamMap , https://angular.io/guide/router#activated-route-in-action
We do NOT configure optional route parameters in route configuration
paramMap vs queryParamMap
https://stackoverflow.com/questions/49615857/angular-parammap-vs-queryparammap
# CHILD ROUTES
-- Display routed component:s templates in other routed component:s templates
- We use <router-outlet> in app.component.html and templates are loaded there
- We can use a nested <router-outlet>, in other words a router-outlet in for example product-edit.component
- We then use child routes, component templates that appears in the nested router-outlet
- "Required for lazy loading"
- See product-routing.module for info, and product-edit-info.component and product-edit-tags.component for getting resolve data in child route components
# VALIDATING FORM CONTROLS IN CHILD ROUTE COMPONENTS
- The child routes components are destroyed when user navigates between child routes, that means form validation state is also destroyed
- When using template driven forms, it wont work to put <form> in parent component and a form controls in child component, Angular will ignore form controls in an <router-outlet>
- We can use <form> in each child component and use manual validation that is not dependant of the form
- I think with reactive forms / model driven forms, that we could have a form in each sub route... But Iam not sure at the moment
- The teacher is using template driven forms, so lets just go along with the course for now
# ROUTING EVENTS
- use { enableTracing: true } as second argument for RouterModule.forRoot() to see them, see app-routing.module
# SECONDARY ROUTES
- They only makes sense if we want to show multiple routable components in a router-outlet, in our case we only show messages so a single plain and simple component would have been enough
- See app.component for <router-outlet> and links with a name and messages-routing.module for route
- Use a link like this to trigger a secondary route: [routerLink]="[{ outlets: { popup: ['messages'] } }]"
- Can navigate to both a primary route and a secondary route, though teacher says it is buggy in her version of Angular
[routerLink]="['/products', product.id, 'edit', { outlets: { popup: ['messages', foo.id] } } ]"
- from code: this.router.navigate([{ outlets: { popup: ['messages'] } }]);
- for routing to both primary and secondary, same syntax as above ... Although buggy
- Teacher says this is workaround for the bug (because the not names outlet is default named primary)
this.router.navigate(
[
{
outlets: {
primary: ['a', a.id]
popup: ['foo', bar.id]
}
}
]
);
Other option is to use the navigateByUrl and build the URL manually
this.router.navigateByUrl('/products/5/edit(popup:summary/5)');
We can clear a secondary route by passing null: [routerLink]="[{ outlets: { popup: null } }]"
Same principle when navigating from code
Can also use this.router.navigateByUrl('/foo')
# ROUTE GUARDS
-- Limit access, warn before leaving
- canActivate
- canDeactivate
- canActivateChild
- No canDeactivateChild: https://github.com/angular/angular/issues/11836
- canLoad (prevent async routing)
Guard Processing
canDeactivate > canLoad > canActivateChild > canActivate > resolve
If anyone return false, all other are canceled and the route request is canceled
RouteGuards are constructed as services or functions. The service must be provided at the module level.
Adding a guard to a parent guards each of its children.
"Example: You could have a situation where a user must be authenticated to navigate to the root component, but must have permission 'x' to get to child components.
In cases like this, canActivateChild saves a lot of typing from having to add canActivate guards to each of the children."
https://stackoverflow.com/questions/42632154/trying-to-understand-the-differences-between-canactivate-and-canactivatechild/42632375
canActivate are not reexecuted when child routes are begin requested again. But canActivateChild is reexecuted.
See auth-guard.service and product-routing.module
# LAZY LOADING
-- A chunk of javascript (with css and templates) for a module is only downloaded when a user is requesting a route
- Preparing
- Use a feature module with routable components, the scripts and css for all components in that module will then be lazy loaded
- Lazy loaded routes should be grouped under a single parent, because lazy loading is configured on the parent route
- DO NOT import the lazy loaded feature module in ANY other module
With a canActivate guard on a lazy loaded route, the chunk for that module is requested and downloaded even if access is denied.
We can instead use a canLoad guard to prevent that
See app-routing.module, auth-guard.service
# Preloading (Eager Lazy Loading)
-- We can preload a lazy loaded module while the user is interacting with the app
Preloading makes sense if we KNOW that a lazy loaded module will likely be used
Preload strategies
- No preloading, this is default with a lazy loaded route
- Preload all, which preloads all after app is loaded
- Custom, we can define our own strategy
The strategy is set in root route configuration
- So preload all will apply to ALL lazy loaded routes
- For more fine grained control we use a custom strategy
- Build a preloading strategy service*
- Provide it in a module
- Set it in root route configuration
OBS
canLoad will block preloading, since checks in its logic could give a strange user experience, for example navigating to /login when user is on start page
Use canActivate instead, since logic for guarding will only be run when user tries to navigate to the guarded route
See app-routing.module
*Custom Preload service: https://github.com/DeborahK/Angular-Routing/blob/master/APM-Final/src/app/selective-strategy.service.ts
import { Injectable } from '@angular/core';
import { Route, PreloadingStrategy } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import 'rxjs/add/observable/of';
@Injectable()
export class SelectiveStrategy implements PreloadingStrategy {
preload(route: Route, load: Function): Observable<any> {
// using the data property to determine if a route should be preloaded -- eagerly loaded or lazy loaded
if (route.data && route.data['preload']) {
return load();
}
|
return Observable.of(null);
|
random_line_split
|
|
app.component.ts
|
AppComponent implements OnInit, OnDestroy {
userSubscription: Subscription;
user: UserModel = null;
pageTitle = 'Acme Product Management';
constructor(
private router: Router,
private authService: AuthService,
public messageService: MessageService,
private fooPipe: FooPipe
) {
}
|
() {
// Can we get to the data property from this not routable component? No!
// console.log(this.activatedRoute.snapshot.data['pageTitle']);
// Can we subscribe to a resolve from this not routable component? No!
// this.activatedRouteSubscription = this.activatedRoute.data.subscribe((data) => {
// console.log(data);
// });
// Can we get to route parameters from this not routable component? No!
// "ActivatedRoute won't work on AppComponent" <=======================
// https://stackoverflow.com/questions/40012369/how-to-get-the-active-route-in-app-component
// https://github.com/angular/angular/issues/11023
// this.routeParamSubscription = this.activatedRoute.paramMap.subscribe((p) => {
// console.log(p);
// });
// But we can get the URL
// If routeGuard hinders navigation, the event will contain redirect route, and not the hindered route! If no redirect route, event will not fire.
this.router.events
.filter(event => event instanceof NavigationEnd)
.subscribe(event => {
// console.log(event);
});
this.userSubscription = this.authService.user$.subscribe(user => this.user = user);
console.log(this.fooPipe.transform('pipe'));
}
ngOnDestroy() {
this.userSubscription.unsubscribe();
}
logOut() {
this.authService.logout();
}
displayMessages() {
this.router.navigate([{ outlets: { popup: ['messages'] } }]);
this.messageService.isDisplayed = true;
}
hideMessages() {
this.router.navigate([{ outlets: { popup: null } }]);
this.messageService.isDisplayed = false;
}
}
/* tslint:disable */
/*
When injecting authService public and use it in template like this: <li *ngIf="!authService.isLoggedIn()">
The isLoggedIn() function is called whenever something "happens" like for instance user types something or navigates
The function is called 4 times
Lets instead try to use an event emitter
*/
/*
# PROVIDING DATA WITH A ROUTE
- Route parameters like :id
- See product-routing.module, product-edit.component and product-detail.component
- Optional route parameters, as seen below
- Query parameters
- As seen in product-list.component and product-detail.component (see also comments in templates) to retain settings for filter and showing images
- Route:s data property
- Can not change, use for static data
- See product-routing.module and product-list.component for retrieving it
- Route Resolver
- See product-routing.module, product-detail.component, product-edit.component, product-edit-info.component and product-edit-tags.component
- Remember that the resolved data is one instance, shared between routable components that fetches the data
- Service that is injected in different components, holding some state
# OPTONAL ROUTES PARAMETERS
They must come last
from template
[routerLink]="['foo', bar.id, { key: value }]"
from code
this.router.navigate(['foo', bar.id, { key: value }]);
const id = +this.activatedRoute.snapshot.params['id'];
const key = this.activatedRoute.snapshot.params['key'];
use paramMap instead: https://stackoverflow.com/questions/47809357/angular-4-5-route-parammap-vs-params , https://angular.io/api/router/ParamMap , https://angular.io/guide/router#activated-route-in-action
We do NOT configure optional route parameters in route configuration
paramMap vs queryParamMap
https://stackoverflow.com/questions/49615857/angular-parammap-vs-queryparammap
# CHILD ROUTES
-- Display routed component:s templates in other routed component:s templates
- We use <router-outlet> in app.component.html and templates are loaded there
- We can use a nested <router-outlet>, in other words a router-outlet in for example product-edit.component
- We then use child routes, component templates that appears in the nested router-outlet
- "Required for lazy loading"
- See product-routing.module for info, and product-edit-info.component and product-edit-tags.component for getting resolve data in child route components
# VALIDATING FORM CONTROLS IN CHILD ROUTE COMPONENTS
- The child routes components are destroyed when user navigates between child routes, that means form validation state is also destroyed
- When using template driven forms, it wont work to put <form> in parent component and a form controls in child component, Angular will ignore form controls in an <router-outlet>
- We can use <form> in each child component and use manual validation that is not dependant of the form
- I think with reactive forms / model driven forms, that we could have a form in each sub route... But Iam not sure at the moment
- The teacher is using template driven forms, so lets just go along with the course for now
# ROUTING EVENTS
- use { enableTracing: true } as second argument for RouterModule.forRoot() to see them, see app-routing.module
# SECONDARY ROUTES
- They only makes sense if we want to show multiple routable components in a router-outlet, in our case we only show messages so a single plain and simple component would have been enough
- See app.component for <router-outlet> and links with a name and messages-routing.module for route
- Use a link like this to trigger a secondary route: [routerLink]="[{ outlets: { popup: ['messages'] } }]"
- Can navigate to both a primary route and a secondary route, though teacher says it is buggy in her version of Angular
[routerLink]="['/products', product.id, 'edit', { outlets: { popup: ['messages', foo.id] } } ]"
- from code: this.router.navigate([{ outlets: { popup: ['messages'] } }]);
- for routing to both primary and secondary, same syntax as above ... Although buggy
- Teacher says this is workaround for the bug (because the not names outlet is default named primary)
this.router.navigate(
[
{
outlets: {
primary: ['a', a.id]
popup: ['foo', bar.id]
}
}
]
);
Other option is to use the navigateByUrl and build the URL manually
this.router.navigateByUrl('/products/5/edit(popup:summary/5)');
We can clear a secondary route by passing null: [routerLink]="[{ outlets: { popup: null } }]"
Same principle when navigating from code
Can also use this.router.navigateByUrl('/foo')
# ROUTE GUARDS
-- Limit access, warn before leaving
- canActivate
- canDeactivate
- canActivateChild
- No canDeactivateChild: https://github.com/angular/angular/issues/11836
- canLoad (prevent async routing)
Guard Processing
canDeactivate > canLoad > canActivateChild > canActivate > resolve
If anyone return false, all other are canceled and the route request is canceled
RouteGuards are constructed as services or functions. The service must be provided at the module level.
Adding a guard to a parent guards each of its children.
"Example: You could have a situation where a user must be authenticated to navigate to the root component, but must have permission 'x' to get to child components.
In cases like this, canActivateChild saves a lot of typing from having to add canActivate guards to each of the children."
https://stackoverflow.com/questions/42632154/trying-to-understand-the-differences-between-canactivate-and-canactivatechild/42632375
canActivate are not reexecuted when child routes are begin requested again. But canActivateChild is reexecuted.
See auth-guard.service and product-routing.module
# LAZY LOADING
-- A chunk of javascript (with css and templates) for a module is only downloaded when a user is requesting a route
- Preparing
- Use a feature module with routable components, the scripts and css for all components in that module will then be lazy loaded
- Lazy loaded routes should be grouped under a single parent, because lazy loading is configured on the parent route
- DO NOT import the lazy loaded feature module in ANY other module
With a canActivate guard on a lazy loaded route, the chunk for that module is requested and downloaded even if access is denied.
We can instead use a canLoad guard to prevent that
See app-routing.module, auth-guard.service
# Preloading (Eager Lazy Loading)
-- We can preload a lazy loaded module while the user is interacting with the app
Preloading makes sense if we KNOW that a lazy loaded module will likely be used
Preload strategies
- No preloading, this is default with a lazy loaded route
- Preload all, which preloads all after app is loaded
- Custom, we can define our own strategy
The strategy is set in root route configuration
- So preload all will apply to ALL lazy loaded routes
- For more fine grained control we use a custom strategy
- Build a preloading strategy service*
|
ngOnInit
|
identifier_name
|
app.component.ts
|
AppComponent implements OnInit, OnDestroy {
userSubscription: Subscription;
user: UserModel = null;
pageTitle = 'Acme Product Management';
constructor(
private router: Router,
private authService: AuthService,
public messageService: MessageService,
private fooPipe: FooPipe
) {
}
ngOnInit()
|
.filter(event => event instanceof NavigationEnd)
.subscribe(event => {
// console.log(event);
});
this.userSubscription = this.authService.user$.subscribe(user => this.user = user);
console.log(this.fooPipe.transform('pipe'));
}
ngOnDestroy() {
this.userSubscription.unsubscribe();
}
logOut() {
this.authService.logout();
}
displayMessages() {
this.router.navigate([{ outlets: { popup: ['messages'] } }]);
this.messageService.isDisplayed = true;
}
hideMessages() {
this.router.navigate([{ outlets: { popup: null } }]);
this.messageService.isDisplayed = false;
}
}
/* tslint:disable */
/*
When injecting authService public and use it in template like this: <li *ngIf="!authService.isLoggedIn()">
The isLoggedIn() function is called whenever something "happens" like for instance user types something or navigates
The function is called 4 times
Lets instead try to use an event emitter
*/
/*
# PROVIDING DATA WITH A ROUTE
- Route parameters like :id
- See product-routing.module, product-edit.component and product-detail.component
- Optional route parameters, as seen below
- Query parameters
- As seen in product-list.component and product-detail.component (see also comments in templates) to retain settings for filter and showing images
- Route:s data property
- Can not change, use for static data
- See product-routing.module and product-list.component for retrieving it
- Route Resolver
- See product-routing.module, product-detail.component, product-edit.component, product-edit-info.component and product-edit-tags.component
- Remember that the resolved data is one instance, shared between routable components that fetches the data
- Service that is injected in different components, holding some state
# OPTONAL ROUTES PARAMETERS
They must come last
from template
[routerLink]="['foo', bar.id, { key: value }]"
from code
this.router.navigate(['foo', bar.id, { key: value }]);
const id = +this.activatedRoute.snapshot.params['id'];
const key = this.activatedRoute.snapshot.params['key'];
use paramMap instead: https://stackoverflow.com/questions/47809357/angular-4-5-route-parammap-vs-params , https://angular.io/api/router/ParamMap , https://angular.io/guide/router#activated-route-in-action
We do NOT configure optional route parameters in route configuration
paramMap vs queryParamMap
https://stackoverflow.com/questions/49615857/angular-parammap-vs-queryparammap
# CHILD ROUTES
-- Display routed component:s templates in other routed component:s templates
- We use <router-outlet> in app.component.html and templates are loaded there
- We can use a nested <router-outlet>, in other words a router-outlet in for example product-edit.component
- We then use child routes, component templates that appears in the nested router-outlet
- "Required for lazy loading"
- See product-routing.module for info, and product-edit-info.component and product-edit-tags.component for getting resolve data in child route components
# VALIDATING FORM CONTROLS IN CHILD ROUTE COMPONENTS
- The child routes components are destroyed when user navigates between child routes, that means form validation state is also destroyed
- When using template driven forms, it wont work to put <form> in parent component and a form controls in child component, Angular will ignore form controls in an <router-outlet>
- We can use <form> in each child component and use manual validation that is not dependant of the form
- I think with reactive forms / model driven forms, that we could have a form in each sub route... But Iam not sure at the moment
- The teacher is using template driven forms, so lets just go along with the course for now
# ROUTING EVENTS
- use { enableTracing: true } as second argument for RouterModule.forRoot() to see them, see app-routing.module
# SECONDARY ROUTES
- They only makes sense if we want to show multiple routable components in a router-outlet, in our case we only show messages so a single plain and simple component would have been enough
- See app.component for <router-outlet> and links with a name and messages-routing.module for route
- Use a link like this to trigger a secondary route: [routerLink]="[{ outlets: { popup: ['messages'] } }]"
- Can navigate to both a primary route and a secondary route, though teacher says it is buggy in her version of Angular
[routerLink]="['/products', product.id, 'edit', { outlets: { popup: ['messages', foo.id] } } ]"
- from code: this.router.navigate([{ outlets: { popup: ['messages'] } }]);
- for routing to both primary and secondary, same syntax as above ... Although buggy
- Teacher says this is workaround for the bug (because the not names outlet is default named primary)
this.router.navigate(
[
{
outlets: {
primary: ['a', a.id]
popup: ['foo', bar.id]
}
}
]
);
Other option is to use the navigateByUrl and build the URL manually
this.router.navigateByUrl('/products/5/edit(popup:summary/5)');
We can clear a secondary route by passing null: [routerLink]="[{ outlets: { popup: null } }]"
Same principle when navigating from code
Can also use this.router.navigateByUrl('/foo')
# ROUTE GUARDS
-- Limit access, warn before leaving
- canActivate
- canDeactivate
- canActivateChild
- No canDeactivateChild: https://github.com/angular/angular/issues/11836
- canLoad (prevent async routing)
Guard Processing
canDeactivate > canLoad > canActivateChild > canActivate > resolve
If anyone return false, all other are canceled and the route request is canceled
RouteGuards are constructed as services or functions. The service must be provided at the module level.
Adding a guard to a parent guards each of its children.
"Example: You could have a situation where a user must be authenticated to navigate to the root component, but must have permission 'x' to get to child components.
In cases like this, canActivateChild saves a lot of typing from having to add canActivate guards to each of the children."
https://stackoverflow.com/questions/42632154/trying-to-understand-the-differences-between-canactivate-and-canactivatechild/42632375
canActivate are not reexecuted when child routes are begin requested again. But canActivateChild is reexecuted.
See auth-guard.service and product-routing.module
# LAZY LOADING
-- A chunk of javascript (with css and templates) for a module is only downloaded when a user is requesting a route
- Preparing
- Use a feature module with routable components, the scripts and css for all components in that module will then be lazy loaded
- Lazy loaded routes should be grouped under a single parent, because lazy loading is configured on the parent route
- DO NOT import the lazy loaded feature module in ANY other module
With a canActivate guard on a lazy loaded route, the chunk for that module is requested and downloaded even if access is denied.
We can instead use a canLoad guard to prevent that
See app-routing.module, auth-guard.service
# Preloading (Eager Lazy Loading)
-- We can preload a lazy loaded module while the user is interacting with the app
Preloading makes sense if we KNOW that a lazy loaded module will likely be used
Preload strategies
- No preloading, this is default with a lazy loaded route
- Preload all, which preloads all after app is loaded
- Custom, we can define our own strategy
The strategy is set in root route configuration
- So preload all will apply to ALL lazy loaded routes
- For more fine grained control we use a custom strategy
- Build a preloading strategy service*
|
{
// Can we get to the data property from this not routable component? No!
// console.log(this.activatedRoute.snapshot.data['pageTitle']);
// Can we subscribe to a resolve from this not routable component? No!
// this.activatedRouteSubscription = this.activatedRoute.data.subscribe((data) => {
// console.log(data);
// });
// Can we get to route parameters from this not routable component? No!
// "ActivatedRoute won't work on AppComponent" <=======================
// https://stackoverflow.com/questions/40012369/how-to-get-the-active-route-in-app-component
// https://github.com/angular/angular/issues/11023
// this.routeParamSubscription = this.activatedRoute.paramMap.subscribe((p) => {
// console.log(p);
// });
// But we can get the URL
// If routeGuard hinders navigation, the event will contain redirect route, and not the hindered route! If no redirect route, event will not fire.
this.router.events
|
identifier_body
|
generate.rs
|
███EARNS█ACE
HORNSWOGGLED█ANTS
███TEABAG█SIAM███
DOB██HIS███GRACED
OVERDO█BAMBOOZLED
LATINO█AGAR█MOOLA
LLAMAS█GAGA█ANTSY";
fn write_impl() {
let mut rows = vec![];
for line in GRID.split('\n') {
let mut row = vec![];
for c in line.chars() {
if c == '█' {
row.push(Cell::Black);
} else {
row.push(Cell::White(Letter::from_unicode(c)));
}
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| rows[y][x]);
println!("{:?}", grid);
let windows = WindowMap::from_grid(&Grid::new(grid.size(), |x, y| grid[(x, y)] != Cell::Black));
let mut clues = HashMap::<&str, &str>::new();
clues.insert("PIED", "Like the proverbial piper");
clues.insert("DNA", "Twisted pair?");
clues.insert("PAL", "Alternative to NTSC");
clues.insert("LATINO", "16.7% of the American population");
clues.insert("IDAHO", "The Gem State");
clues.insert("ARES", "Foe of Wonderwoman");
clues.insert("ELECT", "Opt (to)");
clues.insert("IRE", "Choler");
clues.insert("INDIGO", "Infraviolet?");
clues.insert("TEABAG", "Rude post-victory celebration");
clues.insert("MAG", "Toner color: abbr.");
clues.insert("OVERDO", "Cook for 20 minutes, as pasta");
clues.insert("ADD", "More: abbr.");
clues.insert("BETA", "Advice, in climbing jargon");
clues.insert("ARK", "Couple's cruise ship?");
clues.insert("AIL", "Bedevil");
clues.insert("EGG", "Urge (on)");
clues.insert("BREATH", "Form of investiture on Nalthis");
clues.insert("GRACED", "Adorned");
clues.insert("OLEO", "Hydrogenated food product");
clues.insert("ODDS", "What were the ____?");
clues.insert("GEODE", "Rock formation that starts as a gas bubble");
clues.insert("HIS", "Label on a towel");
clues.insert("LEON", "A large gato");
clues.insert("ADDLED", "Like a brain in love");
clues.insert("WAHOOS", "Exclamations of joy");
clues.insert("ARAB", "Desert steed");
clues.insert("ABDUCT", "Take, as by a UFO");
clues.insert("MBA", "Degree for CEOs");
|
clues.insert("CHEAP", "Overpowered, in the 90's");
clues.insert("RAG", "with \"on\", tease");
clues.insert("OVA", "Largest human cells");
clues.insert("RALLY", "Make a comeback, as a military force");
clues.insert("ANTS", "Pants' contents?");
clues.insert("EDIT", "Amend");
clues.insert("AGAR", "Gelatin alternative");
clues.insert("ASIA", "Home of the Indian elephant");
clues.insert("AGA", "Ottoman honorific");
clues.insert("THAI", "Basil variety");
clues.insert("HORNSWOGGLED", "How you feel after solving this puzzle");
clues.insert("SOS", "[Help!]");
clues.insert("EDGER", "Lawnkeeping tool");
clues.insert("OBI", "Kimono part");
clues.insert("RIBALD", "Blue");
clues.insert("ANTLER", "Classic sexual dimorphism feature");
clues.insert("HOODWINKED", "How you feel after solving this puzzle");
clues.insert("ACE", "Skilled pilot");
clues.insert("NASA", "Apollo originator");
clues.insert("EELS", "Fish caught in pots");
clues.insert("NAN", "IEEE-754 reflexivity violator");
clues.insert("DDAY", "Action time");
clues.insert("SIAM", "Name on old Risk boards");
clues.insert("EARLS", "Superiors to viscounts");
clues.insert("USA", "Home of Athens, Berlin, Milan, Palermo, Tripoli, Versailles, and Vienna: abbr");
clues.insert("BAMBOO", "One of the fasting growing plants in the world");
clues.insert("ALLAH", "Being with 99 names");
clues.insert("PAIL", "Bucket");
clues.insert("PARCH", "Scorch");
clues.insert("HEWED", "Sawn");
clues.insert("IRISES", "Organic annuli");
clues.insert("BRA", "Supporter of women?");
clues.insert("AROMA", "Bakery attractant");
clues.insert("LAPSE", "Gap");
clues.insert("GASBAG", "Yapper");
clues.insert("ANA", "Serbian tennis player Ivanovic");
clues.insert("ELATED", "On cloud nine");
clues.insert("AGHA", "Ottoman honorific");
clues.insert("BATS", "Spreaders of White-Nose syndrome");
clues.insert("OVAL", "Egg-like");
clues.insert("SEC", "Short time, for short");
clues.insert("MOOLA", "\"Cheddar\"");
clues.insert("DOLL", "\"It's an action figure, not a ____\"");
clues.insert("GLEAN", "Reap");
clues.insert("EARNS", "Reaps");
clues.insert("ANTSY", "On edge");
clues.insert("ANT", "Inspiration for a size-warping Marvel hero");
clues.insert("RIM", "Lid connector");
clues.insert("BAMBOOZLED", "How you feel after solving this puzzle");
clues.insert("LOOT", "Reward for killing things, in video games");
clues.insert("SHORTCHANGED", "How you feel after solving this puzzle");
clues.insert("EEK", "[A mouse!]");
clues.insert("GAGA", "Player of the Hotel owner in \"American Horror Story: Hotel\"");
clues.insert("LLAMAS", "Halfway between a tibetan priest and major fire?");
clues.insert("DYKES", "Common earthworks");
clues.insert("SAE", "Standards organization for cars");
clues.insert("CLOT", "Response to injury, or cause of illness");
clues.insert("AMAZON", "Origin of Wonderwoman");
clues.insert("LEAVE", "\"Make like a tree and _____\"");
let mut clue_map = HashMap::new();
for (word, clue) in clues {
clue_map.insert(Word::from_str(word).unwrap(), clue);
}
let mut clue_list = vec![];
for window in windows.windows() {
let word: Word = window.positions().map(|(x, y)| match grid[(x, y)] {
Cell::White(Some(x)) => x,
_ => unreachable!(),
}).collect();
clue_list.push((window, clue_map[&word].clone()));
}
clue_list.sort_by_key(|(window, clue)| (window.position().0, window.position().1, window.direction()));
let puzzle = Puzzle {
preamble: vec![],
version: *b"1.4\0",
title: "The First Crossword".to_string(),
author: "Nathan Dobson".to_string(),
copyright: "".to_string(),
grid: Grid::new(grid.size(), |x, y| {
match grid[(x, y)] {
Cell::Black => None,
Cell::White(Some(x)) => Some(PuzzleCell {
solution: [x.to_unicode()].iter().cloned().collect(),
..Default::default()
}),
_ => panic!(),
}
}),
clues: WindowMap::new(clue_list.into_iter().map(|(window, clue)| (window, clue.to_string())), grid.size()),
note: "".to_string(),
};
let mut new_data: Vec<u8> = vec![];
puzzle.write_to(&mut new_data).unwrap();
fs::write("output.puz", &new_data).unwrap();
}
/*fn make_choices(dictionary: &[ScoredWord], grid: &Grid<Cell>, windows: &[Window]) {
let (window, options) = match windows.iter().enumerate().filter(|(index, window)| {
for position in window.positions() {
if grid[position] == Cell::White(None) {
return true;
}
}
false
}).map(|(index, window)| {
let words: Vec<ScoredWord> = dictionary.iter().filter(|word| {
if word.word.len() != window.length {
return false;
}
for (position, &letter) in window.positions().zip(word.word.iter()) {
match grid[position] {
Cell::White(Some(needed)) => if needed != letter {
return false;
}
_ => {}
}
}
true
}).cloned().collect();
(index, words)
}).
|
clues.insert("ICETEA", "???");
clues.insert("DOB", "Important date: abbr");
|
random_line_split
|
generate.rs
|
███EARNS█ACE
HORNSWOGGLED█ANTS
███TEABAG█SIAM███
DOB██HIS███GRACED
OVERDO█BAMBOOZLED
LATINO█AGAR█MOOLA
LLAMAS█GAGA█ANTSY";
fn write_impl() {
let mut rows = vec![];
for line in GRID.split('\n') {
let mut r
|
];
for c in line.chars() {
if c == '█' {
row.push(Cell::Black);
} else {
row.push(Cell::White(Letter::from_unicode(c)));
}
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| rows[y][x]);
println!("{:?}", grid);
let windows = WindowMap::from_grid(&Grid::new(grid.size(), |x, y| grid[(x, y)] != Cell::Black));
let mut clues = HashMap::<&str, &str>::new();
clues.insert("PIED", "Like the proverbial piper");
clues.insert("DNA", "Twisted pair?");
clues.insert("PAL", "Alternative to NTSC");
clues.insert("LATINO", "16.7% of the American population");
clues.insert("IDAHO", "The Gem State");
clues.insert("ARES", "Foe of Wonderwoman");
clues.insert("ELECT", "Opt (to)");
clues.insert("IRE", "Choler");
clues.insert("INDIGO", "Infraviolet?");
clues.insert("TEABAG", "Rude post-victory celebration");
clues.insert("MAG", "Toner color: abbr.");
clues.insert("OVERDO", "Cook for 20 minutes, as pasta");
clues.insert("ADD", "More: abbr.");
clues.insert("BETA", "Advice, in climbing jargon");
clues.insert("ARK", "Couple's cruise ship?");
clues.insert("AIL", "Bedevil");
clues.insert("EGG", "Urge (on)");
clues.insert("BREATH", "Form of investiture on Nalthis");
clues.insert("GRACED", "Adorned");
clues.insert("OLEO", "Hydrogenated food product");
clues.insert("ODDS", "What were the ____?");
clues.insert("GEODE", "Rock formation that starts as a gas bubble");
clues.insert("HIS", "Label on a towel");
clues.insert("LEON", "A large gato");
clues.insert("ADDLED", "Like a brain in love");
clues.insert("WAHOOS", "Exclamations of joy");
clues.insert("ARAB", "Desert steed");
clues.insert("ABDUCT", "Take, as by a UFO");
clues.insert("MBA", "Degree for CEOs");
clues.insert("ICETEA", "???");
clues.insert("DOB", "Important date: abbr");
clues.insert("CHEAP", "Overpowered, in the 90's");
clues.insert("RAG", "with \"on\", tease");
clues.insert("OVA", "Largest human cells");
clues.insert("RALLY", "Make a comeback, as a military force");
clues.insert("ANTS", "Pants' contents?");
clues.insert("EDIT", "Amend");
clues.insert("AGAR", "Gelatin alternative");
clues.insert("ASIA", "Home of the Indian elephant");
clues.insert("AGA", "Ottoman honorific");
clues.insert("THAI", "Basil variety");
clues.insert("HORNSWOGGLED", "How you feel after solving this puzzle");
clues.insert("SOS", "[Help!]");
clues.insert("EDGER", "Lawnkeeping tool");
clues.insert("OBI", "Kimono part");
clues.insert("RIBALD", "Blue");
clues.insert("ANTLER", "Classic sexual dimorphism feature");
clues.insert("HOODWINKED", "How you feel after solving this puzzle");
clues.insert("ACE", "Skilled pilot");
clues.insert("NASA", "Apollo originator");
clues.insert("EELS", "Fish caught in pots");
clues.insert("NAN", "IEEE-754 reflexivity violator");
clues.insert("DDAY", "Action time");
clues.insert("SIAM", "Name on old Risk boards");
clues.insert("EARLS", "Superiors to viscounts");
clues.insert("USA", "Home of Athens, Berlin, Milan, Palermo, Tripoli, Versailles, and Vienna: abbr");
clues.insert("BAMBOO", "One of the fasting growing plants in the world");
clues.insert("ALLAH", "Being with 99 names");
clues.insert("PAIL", "Bucket");
clues.insert("PARCH", "Scorch");
clues.insert("HEWED", "Sawn");
clues.insert("IRISES", "Organic annuli");
clues.insert("BRA", "Supporter of women?");
clues.insert("AROMA", "Bakery attractant");
clues.insert("LAPSE", "Gap");
clues.insert("GASBAG", "Yapper");
clues.insert("ANA", "Serbian tennis player Ivanovic");
clues.insert("ELATED", "On cloud nine");
clues.insert("AGHA", "Ottoman honorific");
clues.insert("BATS", "Spreaders of White-Nose syndrome");
clues.insert("OVAL", "Egg-like");
clues.insert("SEC", "Short time, for short");
clues.insert("MOOLA", "\"Cheddar\"");
clues.insert("DOLL", "\"It's an action figure, not a ____\"");
clues.insert("GLEAN", "Reap");
clues.insert("EARNS", "Reaps");
clues.insert("ANTSY", "On edge");
clues.insert("ANT", "Inspiration for a size-warping Marvel hero");
clues.insert("RIM", "Lid connector");
clues.insert("BAMBOOZLED", "How you feel after solving this puzzle");
clues.insert("LOOT", "Reward for killing things, in video games");
clues.insert("SHORTCHANGED", "How you feel after solving this puzzle");
clues.insert("EEK", "[A mouse!]");
clues.insert("GAGA", "Player of the Hotel owner in \"American Horror Story: Hotel\"");
clues.insert("LLAMAS", "Halfway between a tibetan priest and major fire?");
clues.insert("DYKES", "Common earthworks");
clues.insert("SAE", "Standards organization for cars");
clues.insert("CLOT", "Response to injury, or cause of illness");
clues.insert("AMAZON", "Origin of Wonderwoman");
clues.insert("LEAVE", "\"Make like a tree and _____\"");
let mut clue_map = HashMap::new();
for (word, clue) in clues {
clue_map.insert(Word::from_str(word).unwrap(), clue);
}
let mut clue_list = vec![];
for window in windows.windows() {
let word: Word = window.positions().map(|(x, y)| match grid[(x, y)] {
Cell::White(Some(x)) => x,
_ => unreachable!(),
}).collect();
clue_list.push((window, clue_map[&word].clone()));
}
clue_list.sort_by_key(|(window, clue)| (window.position().0, window.position().1, window.direction()));
let puzzle = Puzzle {
preamble: vec![],
version: *b"1.4\0",
title: "The First Crossword".to_string(),
author: "Nathan Dobson".to_string(),
copyright: "".to_string(),
grid: Grid::new(grid.size(), |x, y| {
match grid[(x, y)] {
Cell::Black => None,
Cell::White(Some(x)) => Some(PuzzleCell {
solution: [x.to_unicode()].iter().cloned().collect(),
..Default::default()
}),
_ => panic!(),
}
}),
clues: WindowMap::new(clue_list.into_iter().map(|(window, clue)| (window, clue.to_string())), grid.size()),
note: "".to_string(),
};
let mut new_data: Vec<u8> = vec![];
puzzle.write_to(&mut new_data).unwrap();
fs::write("output.puz", &new_data).unwrap();
}
/*fn make_choices(dictionary: &[ScoredWord], grid: &Grid<Cell>, windows: &[Window]) {
let (window, options) = match windows.iter().enumerate().filter(|(index, window)| {
for position in window.positions() {
if grid[position] == Cell::White(None) {
return true;
}
}
false
}).map(|(index, window)| {
let words: Vec<ScoredWord> = dictionary.iter().filter(|word| {
if word.word.len() != window.length {
return false;
}
for (position, &letter) in window.positions().zip(word.word.iter()) {
match grid[position] {
Cell::White(Some(needed)) => if needed != letter {
return false;
}
_ => {}
}
}
true
}).cloned().collect();
(index, words)
|
ow = vec![
|
identifier_name
|
generate.rs
|
, as pasta");
clues.insert("ADD", "More: abbr.");
clues.insert("BETA", "Advice, in climbing jargon");
clues.insert("ARK", "Couple's cruise ship?");
clues.insert("AIL", "Bedevil");
clues.insert("EGG", "Urge (on)");
clues.insert("BREATH", "Form of investiture on Nalthis");
clues.insert("GRACED", "Adorned");
clues.insert("OLEO", "Hydrogenated food product");
clues.insert("ODDS", "What were the ____?");
clues.insert("GEODE", "Rock formation that starts as a gas bubble");
clues.insert("HIS", "Label on a towel");
clues.insert("LEON", "A large gato");
clues.insert("ADDLED", "Like a brain in love");
clues.insert("WAHOOS", "Exclamations of joy");
clues.insert("ARAB", "Desert steed");
clues.insert("ABDUCT", "Take, as by a UFO");
clues.insert("MBA", "Degree for CEOs");
clues.insert("ICETEA", "???");
clues.insert("DOB", "Important date: abbr");
clues.insert("CHEAP", "Overpowered, in the 90's");
clues.insert("RAG", "with \"on\", tease");
clues.insert("OVA", "Largest human cells");
clues.insert("RALLY", "Make a comeback, as a military force");
clues.insert("ANTS", "Pants' contents?");
clues.insert("EDIT", "Amend");
clues.insert("AGAR", "Gelatin alternative");
clues.insert("ASIA", "Home of the Indian elephant");
clues.insert("AGA", "Ottoman honorific");
clues.insert("THAI", "Basil variety");
clues.insert("HORNSWOGGLED", "How you feel after solving this puzzle");
clues.insert("SOS", "[Help!]");
clues.insert("EDGER", "Lawnkeeping tool");
clues.insert("OBI", "Kimono part");
clues.insert("RIBALD", "Blue");
clues.insert("ANTLER", "Classic sexual dimorphism feature");
clues.insert("HOODWINKED", "How you feel after solving this puzzle");
clues.insert("ACE", "Skilled pilot");
clues.insert("NASA", "Apollo originator");
clues.insert("EELS", "Fish caught in pots");
clues.insert("NAN", "IEEE-754 reflexivity violator");
clues.insert("DDAY", "Action time");
clues.insert("SIAM", "Name on old Risk boards");
clues.insert("EARLS", "Superiors to viscounts");
clues.insert("USA", "Home of Athens, Berlin, Milan, Palermo, Tripoli, Versailles, and Vienna: abbr");
clues.insert("BAMBOO", "One of the fasting growing plants in the world");
clues.insert("ALLAH", "Being with 99 names");
clues.insert("PAIL", "Bucket");
clues.insert("PARCH", "Scorch");
clues.insert("HEWED", "Sawn");
clues.insert("IRISES", "Organic annuli");
clues.insert("BRA", "Supporter of women?");
clues.insert("AROMA", "Bakery attractant");
clues.insert("LAPSE", "Gap");
clues.insert("GASBAG", "Yapper");
clues.insert("ANA", "Serbian tennis player Ivanovic");
clues.insert("ELATED", "On cloud nine");
clues.insert("AGHA", "Ottoman honorific");
clues.insert("BATS", "Spreaders of White-Nose syndrome");
clues.insert("OVAL", "Egg-like");
clues.insert("SEC", "Short time, for short");
clues.insert("MOOLA", "\"Cheddar\"");
clues.insert("DOLL", "\"It's an action figure, not a ____\"");
clues.insert("GLEAN", "Reap");
clues.insert("EARNS", "Reaps");
clues.insert("ANTSY", "On edge");
clues.insert("ANT", "Inspiration for a size-warping Marvel hero");
clues.insert("RIM", "Lid connector");
clues.insert("BAMBOOZLED", "How you feel after solving this puzzle");
clues.insert("LOOT", "Reward for killing things, in video games");
clues.insert("SHORTCHANGED", "How you feel after solving this puzzle");
clues.insert("EEK", "[A mouse!]");
clues.insert("GAGA", "Player of the Hotel owner in \"American Horror Story: Hotel\"");
clues.insert("LLAMAS", "Halfway between a tibetan priest and major fire?");
clues.insert("DYKES", "Common earthworks");
clues.insert("SAE", "Standards organization for cars");
clues.insert("CLOT", "Response to injury, or cause of illness");
clues.insert("AMAZON", "Origin of Wonderwoman");
clues.insert("LEAVE", "\"Make like a tree and _____\"");
let mut clue_map = HashMap::new();
for (word, clue) in clues {
clue_map.insert(Word::from_str(word).unwrap(), clue);
}
let mut clue_list = vec![];
for window in windows.windows() {
let word: Word = window.positions().map(|(x, y)| match grid[(x, y)] {
Cell::White(Some(x)) => x,
_ => unreachable!(),
}).collect();
clue_list.push((window, clue_map[&word].clone()));
}
clue_list.sort_by_key(|(window, clue)| (window.position().0, window.position().1, window.direction()));
let puzzle = Puzzle {
preamble: vec![],
version: *b"1.4\0",
title: "The First Crossword".to_string(),
author: "Nathan Dobson".to_string(),
copyright: "".to_string(),
grid: Grid::new(grid.size(), |x, y| {
match grid[(x, y)] {
Cell::Black => None,
Cell::White(Some(x)) => Some(PuzzleCell {
solution: [x.to_unicode()].iter().cloned().collect(),
..Default::default()
}),
_ => panic!(),
}
}),
clues: WindowMap::new(clue_list.into_iter().map(|(window, clue)| (window, clue.to_string())), grid.size()),
note: "".to_string(),
};
let mut new_data: Vec<u8> = vec![];
puzzle.write_to(&mut new_data).unwrap();
fs::write("output.puz", &new_data).unwrap();
}
/*fn make_choices(dictionary: &[ScoredWord], grid: &Grid<Cell>, windows: &[Window]) {
let (window, options) = match windows.iter().enumerate().filter(|(index, window)| {
for position in window.positions() {
if grid[position] == Cell::White(None) {
return true;
}
}
false
}).map(|(index, window)| {
let words: Vec<ScoredWord> = dictionary.iter().filter(|word| {
if word.word.len() != window.length {
return false;
}
for (position, &letter) in window.positions().zip(word.word.iter()) {
match grid[position] {
Cell::White(Some(needed)) => if needed != letter {
return false;
}
_ => {}
}
}
true
}).cloned().collect();
(index, words)
}).min_by_key(|(index, words)| words.len()) {
None => {
for y in 0..grid.size().1 {
for x in 0..grid.size().0 {
print!("{}", grid[(x, y)]);
}
println!();
}
println!();
return;
}
Some(x) => x,
};
for y in 0..grid.size().1 {
for x in 0..grid.size().0 {
print!("{}", grid[(x, y)]);
}
println!();
}
println!();
for option in options {
let mut grid2 = grid.clone();
for (position, value) in windows[window].positions().zip(option.word.iter()) {
grid2[position] = Cell::White(Some(*value));
}
make_choices(dictionary, &grid2, windows);
}
}*/
fn search_impl() -> io::Result<()> {
let mut reader = ReaderBuilder::new()
.has_headers(false)
.from_reader(STA
|
RT);
let mut rows = vec![];
for line in reader.records() {
let mut row = vec![];
for cell in line?.iter() {
row.push(match cell {
"!" => Cell::Black,
"" => Cell::White(None),
letter => Cell::White(Some(Letter::from_unicode(letter.chars().next().unwrap()).unwrap())),
});
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| {
rows[y][x]
});
println!("{}", AsciiGrid(&grid));
let scored_words = ScoredWord::default().unwrap();
let mut dictionary = scored_words.iter().map(|scored_word| scored_word.word).collect::<Vec<Word>>();
//dictionary=dictionary[dictionary.]
|
identifier_body
|
|
Translatable.js
|
pointer
*/
registerTranslatableElement(element, html){
var isTextFromEditor = false;
//Element has been registered already
if (
element.hasPointer && element.hasPointer.indexOf('translatable') > -1
|| element._isInEditorElement
|| (isTextFromEditor = this.isInEditorElement(element))
) {
//Prevent check of editor type next time...
if ( isTextFromEditor ) {
element._isInEditorElement = true;
}
return;
}
CAEditor.pushPointerElement(element, 'translatable', {
//Bind original translate into element property
originalTranslate : this.translatedTree[html],
onPointerCreate : this.events.onPointerCreate.bind(this),
onPointerClick : this.events.onPointerClick.bind(this),
onPointerHide : this.events.onPointerHide.bind(this),
});
},
/*
* We want build tree with keys as translations and values as original texts.
* For better performance for searching elements.
*/
getTranslationsTree(){
//Debug given texts
var debugText = [];
//Build translates tree
for ( var key in this.allTranslates ) {
var translate = this.domPreparer.prepareTranslateHTML(this.allTranslates[key][0]||key);
/*
* DEBUG only given texts
*/
if ( debugText.length > 0 && translate.indexOf(debugText) === -1 ) {
continue;
}
if ( translate && translate.indexOf('je fiktívny text') > -1 ){
console.log(translate)
}
//We need save duplicate translates
if ( translate in this.translatedTree ) {
this.duplicates.push(translate);
}
this.translatedTree[translate] = key;
if ( translate.length > this.maxTranslateLength ) {
|
}
},
/*
* Change translates and HTML dom into same format
* ig. we need sort all attributes by name order, because VueJS sorts attributes... then translates are not same with innerHTML
*/
domPreparer: {
prepared : {},
prepareTranslateHTML(html, e){
//We need cache prepared texts, because othervise it may have heavy performance impact on browser
if ( html in this.prepared ){
return this.prepared[html];
}
var vn = document.createElement('div');
vn.innerHTML = html;
this.modifyElements(vn);
return this.prepared[html] = vn.innerHTML;
},
modifyElements(parentNode){
//Element has no childnodes
if ( !parentNode.childNodes ){
return;
}
for ( var k = 0; k < parentNode.childNodes.length; k++ ) {
let e = parentNode.childNodes[k];
this.sortAttributes(e);
//If childnode has another childs...
if ( e.childNodes && e.childNodes.length > 0 ){
this.modifyElements(e)
}
}
},
sortAttributes(e){
let defaultAttributes = [];
//If childnode has attributes, we need sort them
if ( e.attributes && e.attributes.length > 0 ){
//Build attributes tree
for ( let i = 0; i < e.attributes.length; i++ ){
defaultAttributes.push({
name : e.attributes[i].nodeName,
value : e.attributes[i].nodeValue
});
}
//Sort element attribues by tag name
defaultAttributes = defaultAttributes.sort((a, b) => {
return a.name > b.name ? 1 : -1;
});
//Remove all attributes
defaultAttributes.forEach(item => {
e.removeAttribute(item.name);
});
//Add attributes aggain in correct order
defaultAttributes.forEach(item => {
item = this.updateAttribute(item);
e.setAttribute(item.name, item.value);
});
}
},
updateAttribute(item){
//We want update style to format same as from vuejs render
if ( item.name == 'style' ){
let newValue = item.value.replace(/\:/g, ': ').replace(/\s\s/g, ' ');
if ( newValue && newValue.substr(-1) != ';' ){
newValue += ';';
}
item.value = newValue;
}
return item;
},
},
getTranslatableElements(){
var elements = document.querySelectorAll('*');
//Get all elements with innerhtml from translates
for ( var i = 0; i < elements.length; i++ ){
var html = this.nodeValue(elements[i]);
//We want skip longer texts than 50%
//Because some tags may be encoded...
if ( (html||'').length > this.maxTranslateLength * 1.5 ) {
continue;
}
//Add element into array if has not been added already and has translation
if ( this.translatedTree[html] !== undefined ) {
this.registerTranslatableElement(elements[i], html);
}
//Look for text childs
else {
for ( var n = 0; n < elements[i].childNodes.length; n++ ){
var node = elements[i].childNodes[n];
if ( node.nodeName !== '#text' ) {
continue;
}
html = this.nodeValue(node);
//If is only one textnode child in parent,
//then we want boot translation into parent element.
//This is because innerHTML in parent element may be escaped, and wont be matched
//with translation. But value in textNode is correct for this element.
if ( elements[i].childNodes.length === 1 ) {
node = node.parentElement;
}
if ( this.translatedTree[html] !== undefined ) {
this.registerTranslatableElement(node, html);
}
}
}
}
},
/*
* Get element or node value
*/
nodeValue(e){
var value = e.nodeName == '#text' ? e.data : e.innerHTML,
value = value||'';
return this.domPreparer.prepareTranslateHTML(value, e).trim();
},
/*
* Check if given translation is in text block from editor field (type:editor)
*/
isInEditorElement(element){
if ( element ) {
if ( element.hasPointer && element.hasPointer.indexOf('translatable') > -1 ) {
return true;
}
if ( element.nodeName != '#text' && element.getAttribute('data-crudadmin-editor') === '' ) {
return true;
}
if ( element.parentElement ) {
return this.isInEditorElement(element.parentElement);
}
}
return false;
},
/*
* Update translation on change
*/
updateTranslation(e){
var data = { changes : {} },
value = this.nodeValue(e);
//We need replace for empty spaces. Because if we push empty char it will change to this encoded value.
//We need place empty char, when user will delete whole translation. This situation is buggy in some browsers..
//So we need remove here this empty char at the end.
if ( value.substr(-6) == ' ' ) {
value = value.substr(0, -6);
}
//If is not raw text, we can save unencoded value
//Because double encodion would be applied from laravel side
if ( Editor.hasAllowedFormation(e) === false ) {
value = Helpers.htmlspecialcharsDecode(value);
}
data.changes[e.getPointerSetting('originalTranslate', 'translatable')] = value;
//Clear previous key change
if ( this._ajaxSend ) {
clearTimeout(this._ajaxSend);
}
//Remove error class before sending ajax
Helpers.removeClass(e._CAPencil, Pencils.classNameError);
//We need send ajax minimally once per second,
//because gettext is cached on file timestamp. which in in seconds...
this._ajaxSend = setTimeout(() => {
var url = CAEditor.config.requests.updateText;
CAEditor.ajax.post(url, data, {
success(response){
Helpers.addClass(e._CAPencil, Pencils.classNameSaved);
},
error(response){
//Add red pointer color
Helpers.addClass(e._CAPencil, Pencils.classNameError);
}
});
this.updateSameTranslationElements(e);
}, 1000);
},
/*
* Update all translates with same translation
*/
updateSameTranslationElements(element){
for ( var i = 0; i < CAEditor.matchedElements.length; i++ ) {
if ( CAEditor.matchedElements[i].getPointerSetting('originalTranslate', 'translatable') == element.getPointerSetting('originalTranslate', 'translatable') ) {
if ( CAEditor.matchedElements[i] != element ) {
CAEditor.matchedElements[i].innerHTML = element.innerHTML;
}
}
}
},
/*
* Check if is translate visible
*/
isInvisibleElement(element){
//If is textNode
if ( element.nodeName == '#text' ) {
element = element.parentElement;
}
var css = window.getComputedStyle(element),
opacity = parseInt(css.opacity);
//If is invisible element
if ( opacity <= 0.5 || css.visibility == 'hidden' || css.fontSize == 0 ) {
return true;
}
return false;
},
/*
* Edit hidden translate in promt modal message
*/
openAlertModal(element, actualValue){
var newText = prompt(CATranslates.texts.update, actualValue);
//On cancel
if ( newText == null ) {
return
|
this.maxTranslateLength = translate.length;
}
|
conditional_block
|
Translatable.js
|
this.events.onPointerHide.bind(this),
});
},
/*
* We want build tree with keys as translations and values as original texts.
* For better performance for searching elements.
*/
getTranslationsTree(){
//Debug given texts
var debugText = [];
//Build translates tree
for ( var key in this.allTranslates ) {
var translate = this.domPreparer.prepareTranslateHTML(this.allTranslates[key][0]||key);
/*
* DEBUG only given texts
*/
if ( debugText.length > 0 && translate.indexOf(debugText) === -1 ) {
continue;
}
if ( translate && translate.indexOf('je fiktívny text') > -1 ){
console.log(translate)
}
//We need save duplicate translates
if ( translate in this.translatedTree ) {
this.duplicates.push(translate);
}
this.translatedTree[translate] = key;
if ( translate.length > this.maxTranslateLength ) {
this.maxTranslateLength = translate.length;
}
}
},
/*
* Change translates and HTML dom into same format
* ig. we need sort all attributes by name order, because VueJS sorts attributes... then translates are not same with innerHTML
*/
domPreparer: {
prepared : {},
prepareTranslateHTML(html, e){
//We need cache prepared texts, because othervise it may have heavy performance impact on browser
if ( html in this.prepared ){
return this.prepared[html];
}
var vn = document.createElement('div');
vn.innerHTML = html;
this.modifyElements(vn);
return this.prepared[html] = vn.innerHTML;
},
modifyElements(parentNode){
//Element has no childnodes
if ( !parentNode.childNodes ){
return;
}
for ( var k = 0; k < parentNode.childNodes.length; k++ ) {
let e = parentNode.childNodes[k];
this.sortAttributes(e);
//If childnode has another childs...
if ( e.childNodes && e.childNodes.length > 0 ){
this.modifyElements(e)
}
}
},
sortAttributes(e){
let defaultAttributes = [];
//If childnode has attributes, we need sort them
if ( e.attributes && e.attributes.length > 0 ){
//Build attributes tree
for ( let i = 0; i < e.attributes.length; i++ ){
defaultAttributes.push({
name : e.attributes[i].nodeName,
value : e.attributes[i].nodeValue
});
}
//Sort element attribues by tag name
defaultAttributes = defaultAttributes.sort((a, b) => {
return a.name > b.name ? 1 : -1;
});
//Remove all attributes
defaultAttributes.forEach(item => {
e.removeAttribute(item.name);
});
//Add attributes aggain in correct order
defaultAttributes.forEach(item => {
item = this.updateAttribute(item);
e.setAttribute(item.name, item.value);
});
}
},
updateAttribute(item){
//We want update style to format same as from vuejs render
if ( item.name == 'style' ){
let newValue = item.value.replace(/\:/g, ': ').replace(/\s\s/g, ' ');
if ( newValue && newValue.substr(-1) != ';' ){
newValue += ';';
}
item.value = newValue;
}
return item;
},
},
getTranslatableElements(){
var elements = document.querySelectorAll('*');
//Get all elements with innerhtml from translates
for ( var i = 0; i < elements.length; i++ ){
var html = this.nodeValue(elements[i]);
//We want skip longer texts than 50%
//Because some tags may be encoded...
if ( (html||'').length > this.maxTranslateLength * 1.5 ) {
continue;
}
//Add element into array if has not been added already and has translation
if ( this.translatedTree[html] !== undefined ) {
this.registerTranslatableElement(elements[i], html);
}
//Look for text childs
else {
for ( var n = 0; n < elements[i].childNodes.length; n++ ){
var node = elements[i].childNodes[n];
if ( node.nodeName !== '#text' ) {
continue;
}
html = this.nodeValue(node);
//If is only one textnode child in parent,
//then we want boot translation into parent element.
//This is because innerHTML in parent element may be escaped, and wont be matched
//with translation. But value in textNode is correct for this element.
if ( elements[i].childNodes.length === 1 ) {
node = node.parentElement;
}
if ( this.translatedTree[html] !== undefined ) {
this.registerTranslatableElement(node, html);
}
}
}
}
},
/*
* Get element or node value
*/
nodeValue(e){
var value = e.nodeName == '#text' ? e.data : e.innerHTML,
value = value||'';
return this.domPreparer.prepareTranslateHTML(value, e).trim();
},
/*
* Check if given translation is in text block from editor field (type:editor)
*/
isInEditorElement(element){
if ( element ) {
if ( element.hasPointer && element.hasPointer.indexOf('translatable') > -1 ) {
return true;
}
if ( element.nodeName != '#text' && element.getAttribute('data-crudadmin-editor') === '' ) {
return true;
}
if ( element.parentElement ) {
return this.isInEditorElement(element.parentElement);
}
}
return false;
},
/*
* Update translation on change
*/
updateTranslation(e){
var data = { changes : {} },
value = this.nodeValue(e);
//We need replace for empty spaces. Because if we push empty char it will change to this encoded value.
//We need place empty char, when user will delete whole translation. This situation is buggy in some browsers..
//So we need remove here this empty char at the end.
if ( value.substr(-6) == ' ' ) {
value = value.substr(0, -6);
}
//If is not raw text, we can save unencoded value
//Because double encodion would be applied from laravel side
if ( Editor.hasAllowedFormation(e) === false ) {
value = Helpers.htmlspecialcharsDecode(value);
}
data.changes[e.getPointerSetting('originalTranslate', 'translatable')] = value;
//Clear previous key change
if ( this._ajaxSend ) {
clearTimeout(this._ajaxSend);
}
//Remove error class before sending ajax
Helpers.removeClass(e._CAPencil, Pencils.classNameError);
//We need send ajax minimally once per second,
//because gettext is cached on file timestamp. which in in seconds...
this._ajaxSend = setTimeout(() => {
var url = CAEditor.config.requests.updateText;
CAEditor.ajax.post(url, data, {
success(response){
Helpers.addClass(e._CAPencil, Pencils.classNameSaved);
},
error(response){
//Add red pointer color
Helpers.addClass(e._CAPencil, Pencils.classNameError);
}
});
this.updateSameTranslationElements(e);
}, 1000);
},
/*
* Update all translates with same translation
*/
updateSameTranslationElements(element){
for ( var i = 0; i < CAEditor.matchedElements.length; i++ ) {
if ( CAEditor.matchedElements[i].getPointerSetting('originalTranslate', 'translatable') == element.getPointerSetting('originalTranslate', 'translatable') ) {
if ( CAEditor.matchedElements[i] != element ) {
CAEditor.matchedElements[i].innerHTML = element.innerHTML;
}
}
}
},
/*
* Check if is translate visible
*/
isInvisibleElement(element){
//If is textNode
if ( element.nodeName == '#text' ) {
element = element.parentElement;
}
var css = window.getComputedStyle(element),
opacity = parseInt(css.opacity);
//If is invisible element
if ( opacity <= 0.5 || css.visibility == 'hidden' || css.fontSize == 0 ) {
return true;
}
return false;
},
/*
* Edit hidden translate in promt modal message
*/
openAlertModal(element, actualValue){
var newText = prompt(CATranslates.texts.update, actualValue);
//On cancel
if ( newText == null ) {
return;
}
//We need update node, or innerHTML tag value
if ( element.nodeName == '#text' ) {
element.data = newText;
} else {
element.innerHTML = newText;
}
CAEditor.pencils.repaintPencils();
this.updateTranslation(element);
},
isStaticEditor(element){
return element.nodeName != '#text' && element.getAttribute('data-crudadmin-static-editor') === '';
},
/*
* Pencil events
*/
events : {
onPointerCreate(pencil, element){
if ( this.isStaticEditor(element) ){
Helpers.addClass(pencil, Pencils.classNameIcon);
Helpers.addClass(pencil, Pencils.classNameEditor);
}
pencil.setAttribute('data-translate', element.getPointerSetting('originalTranslate', 'translatable'));
},
o
|
nPointerClick(
|
identifier_name
|
|
Translatable.js
|
}
},
/*
* Change translates and HTML dom into same format
* ig. we need sort all attributes by name order, because VueJS sorts attributes... then translates are not same with innerHTML
*/
domPreparer: {
prepared : {},
prepareTranslateHTML(html, e){
//We need cache prepared texts, because othervise it may have heavy performance impact on browser
if ( html in this.prepared ){
return this.prepared[html];
}
var vn = document.createElement('div');
vn.innerHTML = html;
this.modifyElements(vn);
return this.prepared[html] = vn.innerHTML;
},
modifyElements(parentNode){
//Element has no childnodes
if ( !parentNode.childNodes ){
return;
}
for ( var k = 0; k < parentNode.childNodes.length; k++ ) {
let e = parentNode.childNodes[k];
this.sortAttributes(e);
//If childnode has another childs...
if ( e.childNodes && e.childNodes.length > 0 ){
this.modifyElements(e)
}
}
},
sortAttributes(e){
let defaultAttributes = [];
//If childnode has attributes, we need sort them
if ( e.attributes && e.attributes.length > 0 ){
//Build attributes tree
for ( let i = 0; i < e.attributes.length; i++ ){
defaultAttributes.push({
name : e.attributes[i].nodeName,
value : e.attributes[i].nodeValue
});
}
//Sort element attribues by tag name
defaultAttributes = defaultAttributes.sort((a, b) => {
return a.name > b.name ? 1 : -1;
});
//Remove all attributes
defaultAttributes.forEach(item => {
e.removeAttribute(item.name);
});
//Add attributes aggain in correct order
defaultAttributes.forEach(item => {
item = this.updateAttribute(item);
e.setAttribute(item.name, item.value);
});
}
},
updateAttribute(item){
//We want update style to format same as from vuejs render
if ( item.name == 'style' ){
let newValue = item.value.replace(/\:/g, ': ').replace(/\s\s/g, ' ');
if ( newValue && newValue.substr(-1) != ';' ){
newValue += ';';
}
item.value = newValue;
}
return item;
},
},
getTranslatableElements(){
var elements = document.querySelectorAll('*');
//Get all elements with innerhtml from translates
for ( var i = 0; i < elements.length; i++ ){
var html = this.nodeValue(elements[i]);
//We want skip longer texts than 50%
//Because some tags may be encoded...
if ( (html||'').length > this.maxTranslateLength * 1.5 ) {
continue;
}
//Add element into array if has not been added already and has translation
if ( this.translatedTree[html] !== undefined ) {
this.registerTranslatableElement(elements[i], html);
}
//Look for text childs
else {
for ( var n = 0; n < elements[i].childNodes.length; n++ ){
var node = elements[i].childNodes[n];
if ( node.nodeName !== '#text' ) {
continue;
}
html = this.nodeValue(node);
//If is only one textnode child in parent,
//then we want boot translation into parent element.
//This is because innerHTML in parent element may be escaped, and wont be matched
//with translation. But value in textNode is correct for this element.
if ( elements[i].childNodes.length === 1 ) {
node = node.parentElement;
}
if ( this.translatedTree[html] !== undefined ) {
this.registerTranslatableElement(node, html);
}
}
}
}
},
/*
* Get element or node value
*/
nodeValue(e){
var value = e.nodeName == '#text' ? e.data : e.innerHTML,
value = value||'';
return this.domPreparer.prepareTranslateHTML(value, e).trim();
},
/*
* Check if given translation is in text block from editor field (type:editor)
*/
isInEditorElement(element){
if ( element ) {
if ( element.hasPointer && element.hasPointer.indexOf('translatable') > -1 ) {
return true;
}
if ( element.nodeName != '#text' && element.getAttribute('data-crudadmin-editor') === '' ) {
return true;
}
if ( element.parentElement ) {
return this.isInEditorElement(element.parentElement);
}
}
return false;
},
/*
* Update translation on change
*/
updateTranslation(e){
var data = { changes : {} },
value = this.nodeValue(e);
//We need replace for empty spaces. Because if we push empty char it will change to this encoded value.
//We need place empty char, when user will delete whole translation. This situation is buggy in some browsers..
//So we need remove here this empty char at the end.
if ( value.substr(-6) == ' ' ) {
value = value.substr(0, -6);
}
//If is not raw text, we can save unencoded value
//Because double encodion would be applied from laravel side
if ( Editor.hasAllowedFormation(e) === false ) {
value = Helpers.htmlspecialcharsDecode(value);
}
data.changes[e.getPointerSetting('originalTranslate', 'translatable')] = value;
//Clear previous key change
if ( this._ajaxSend ) {
clearTimeout(this._ajaxSend);
}
//Remove error class before sending ajax
Helpers.removeClass(e._CAPencil, Pencils.classNameError);
//We need send ajax minimally once per second,
//because gettext is cached on file timestamp. which in in seconds...
this._ajaxSend = setTimeout(() => {
var url = CAEditor.config.requests.updateText;
CAEditor.ajax.post(url, data, {
success(response){
Helpers.addClass(e._CAPencil, Pencils.classNameSaved);
},
error(response){
//Add red pointer color
Helpers.addClass(e._CAPencil, Pencils.classNameError);
}
});
this.updateSameTranslationElements(e);
}, 1000);
},
/*
* Update all translates with same translation
*/
updateSameTranslationElements(element){
for ( var i = 0; i < CAEditor.matchedElements.length; i++ ) {
if ( CAEditor.matchedElements[i].getPointerSetting('originalTranslate', 'translatable') == element.getPointerSetting('originalTranslate', 'translatable') ) {
if ( CAEditor.matchedElements[i] != element ) {
CAEditor.matchedElements[i].innerHTML = element.innerHTML;
}
}
}
},
/*
* Check if is translate visible
*/
isInvisibleElement(element){
//If is textNode
if ( element.nodeName == '#text' ) {
element = element.parentElement;
}
var css = window.getComputedStyle(element),
opacity = parseInt(css.opacity);
//If is invisible element
if ( opacity <= 0.5 || css.visibility == 'hidden' || css.fontSize == 0 ) {
return true;
}
return false;
},
/*
* Edit hidden translate in promt modal message
*/
openAlertModal(element, actualValue){
var newText = prompt(CATranslates.texts.update, actualValue);
//On cancel
if ( newText == null ) {
return;
}
//We need update node, or innerHTML tag value
if ( element.nodeName == '#text' ) {
element.data = newText;
} else {
element.innerHTML = newText;
}
CAEditor.pencils.repaintPencils();
this.updateTranslation(element);
},
isStaticEditor(element){
return element.nodeName != '#text' && element.getAttribute('data-crudadmin-static-editor') === '';
},
/*
* Pencil events
*/
events : {
onPointerCreate(pencil, element){
if ( this.isStaticEditor(element) ){
Helpers.addClass(pencil, Pencils.classNameIcon);
Helpers.addClass(pencil, Pencils.classNameEditor);
}
pencil.setAttribute('data-translate', element.getPointerSetting('originalTranslate', 'translatable'));
},
onPointerClick(element, pencil){
var actualValue = this.nodeValue(element);
//We cant allow update duplicate translates. Because change may be updated on right source translate.
if ( this.duplicates.indexOf(actualValue) > -1 ) {
alert(CATranslates.texts.cannotUpdate);
return;
}
//Invisible element cannot be edited in editor style
if ( this.isInvisibleElement(element) ) {
this.openAlertModal(element, actualValue);
} else if ( this.isStaticEditor(element) ) {
Editor.makeInlineCKEditor(element, () => {
Translatable.updateTranslation(element);
});
} else {
Editor.makeEditableNode(element, actualValue);
}
//When pointer is clicked, we want remove all additional pointers
Pencils.removeAdditionalPointers(element._CAPencil);
},
onPointerHide(element, pencil){
//If element is beign edite state, we want open alert instead.
|
//Because propably this elements has been hidden
if ( element.isContentEditable !== true ){
return;
|
random_line_split
|
|
Translatable.js
|
pointer
*/
registerTranslatableElement(element, html){
var isTextFromEditor = false;
//Element has been registered already
if (
element.hasPointer && element.hasPointer.indexOf('translatable') > -1
|| element._isInEditorElement
|| (isTextFromEditor = this.isInEditorElement(element))
) {
//Prevent check of editor type next time...
if ( isTextFromEditor ) {
element._isInEditorElement = true;
}
return;
}
CAEditor.pushPointerElement(element, 'translatable', {
//Bind original translate into element property
originalTranslate : this.translatedTree[html],
onPointerCreate : this.events.onPointerCreate.bind(this),
onPointerClick : this.events.onPointerClick.bind(this),
onPointerHide : this.events.onPointerHide.bind(this),
});
},
/*
* We want build tree with keys as translations and values as original texts.
* For better performance for searching elements.
*/
getTranslationsTree(){
//Debug given texts
var debugText = [];
//Build translates tree
for ( var key in this.allTranslates ) {
var translate = this.domPreparer.prepareTranslateHTML(this.allTranslates[key][0]||key);
/*
* DEBUG only given texts
*/
if ( debugText.length > 0 && translate.indexOf(debugText) === -1 ) {
continue;
}
if ( translate && translate.indexOf('je fiktívny text') > -1 ){
console.log(translate)
}
//We need save duplicate translates
if ( translate in this.translatedTree ) {
this.duplicates.push(translate);
}
this.translatedTree[translate] = key;
if ( translate.length > this.maxTranslateLength ) {
this.maxTranslateLength = translate.length;
}
}
},
/*
* Change translates and HTML dom into same format
* ig. we need sort all attributes by name order, because VueJS sorts attributes... then translates are not same with innerHTML
*/
domPreparer: {
prepared : {},
prepareTranslateHTML(html, e){
//We need cache prepared texts, because othervise it may have heavy performance impact on browser
if ( html in this.prepared ){
return this.prepared[html];
}
var vn = document.createElement('div');
vn.innerHTML = html;
this.modifyElements(vn);
return this.prepared[html] = vn.innerHTML;
},
modifyElements(parentNode){
//Element has no childnodes
if ( !parentNode.childNodes ){
return;
}
for ( var k = 0; k < parentNode.childNodes.length; k++ ) {
let e = parentNode.childNodes[k];
this.sortAttributes(e);
//If childnode has another childs...
if ( e.childNodes && e.childNodes.length > 0 ){
this.modifyElements(e)
}
}
},
sortAttributes(e){
|
e.removeAttribute(item.name);
});
//Add attributes aggain in correct order
defaultAttributes.forEach(item => {
item = this.updateAttribute(item);
e.setAttribute(item.name, item.value);
});
}
},
updateAttribute(item){
//We want update style to format same as from vuejs render
if ( item.name == 'style' ){
let newValue = item.value.replace(/\:/g, ': ').replace(/\s\s/g, ' ');
if ( newValue && newValue.substr(-1) != ';' ){
newValue += ';';
}
item.value = newValue;
}
return item;
},
},
getTranslatableElements(){
var elements = document.querySelectorAll('*');
//Get all elements with innerhtml from translates
for ( var i = 0; i < elements.length; i++ ){
var html = this.nodeValue(elements[i]);
//We want skip longer texts than 50%
//Because some tags may be encoded...
if ( (html||'').length > this.maxTranslateLength * 1.5 ) {
continue;
}
//Add element into array if has not been added already and has translation
if ( this.translatedTree[html] !== undefined ) {
this.registerTranslatableElement(elements[i], html);
}
//Look for text childs
else {
for ( var n = 0; n < elements[i].childNodes.length; n++ ){
var node = elements[i].childNodes[n];
if ( node.nodeName !== '#text' ) {
continue;
}
html = this.nodeValue(node);
//If is only one textnode child in parent,
//then we want boot translation into parent element.
//This is because innerHTML in parent element may be escaped, and wont be matched
//with translation. But value in textNode is correct for this element.
if ( elements[i].childNodes.length === 1 ) {
node = node.parentElement;
}
if ( this.translatedTree[html] !== undefined ) {
this.registerTranslatableElement(node, html);
}
}
}
}
},
/*
* Get element or node value
*/
nodeValue(e){
var value = e.nodeName == '#text' ? e.data : e.innerHTML,
value = value||'';
return this.domPreparer.prepareTranslateHTML(value, e).trim();
},
/*
* Check if given translation is in text block from editor field (type:editor)
*/
isInEditorElement(element){
if ( element ) {
if ( element.hasPointer && element.hasPointer.indexOf('translatable') > -1 ) {
return true;
}
if ( element.nodeName != '#text' && element.getAttribute('data-crudadmin-editor') === '' ) {
return true;
}
if ( element.parentElement ) {
return this.isInEditorElement(element.parentElement);
}
}
return false;
},
/*
* Update translation on change
*/
updateTranslation(e){
var data = { changes : {} },
value = this.nodeValue(e);
//We need replace for empty spaces. Because if we push empty char it will change to this encoded value.
//We need place empty char, when user will delete whole translation. This situation is buggy in some browsers..
//So we need remove here this empty char at the end.
if ( value.substr(-6) == ' ' ) {
value = value.substr(0, -6);
}
//If is not raw text, we can save unencoded value
//Because double encodion would be applied from laravel side
if ( Editor.hasAllowedFormation(e) === false ) {
value = Helpers.htmlspecialcharsDecode(value);
}
data.changes[e.getPointerSetting('originalTranslate', 'translatable')] = value;
//Clear previous key change
if ( this._ajaxSend ) {
clearTimeout(this._ajaxSend);
}
//Remove error class before sending ajax
Helpers.removeClass(e._CAPencil, Pencils.classNameError);
//We need send ajax minimally once per second,
//because gettext is cached on file timestamp. which in in seconds...
this._ajaxSend = setTimeout(() => {
var url = CAEditor.config.requests.updateText;
CAEditor.ajax.post(url, data, {
success(response){
Helpers.addClass(e._CAPencil, Pencils.classNameSaved);
},
error(response){
//Add red pointer color
Helpers.addClass(e._CAPencil, Pencils.classNameError);
}
});
this.updateSameTranslationElements(e);
}, 1000);
},
/*
* Update all translates with same translation
*/
updateSameTranslationElements(element){
for ( var i = 0; i < CAEditor.matchedElements.length; i++ ) {
if ( CAEditor.matchedElements[i].getPointerSetting('originalTranslate', 'translatable') == element.getPointerSetting('originalTranslate', 'translatable') ) {
if ( CAEditor.matchedElements[i] != element ) {
CAEditor.matchedElements[i].innerHTML = element.innerHTML;
}
}
}
},
/*
* Check if is translate visible
*/
isInvisibleElement(element){
//If is textNode
if ( element.nodeName == '#text' ) {
element = element.parentElement;
}
var css = window.getComputedStyle(element),
opacity = parseInt(css.opacity);
//If is invisible element
if ( opacity <= 0.5 || css.visibility == 'hidden' || css.fontSize == 0 ) {
return true;
}
return false;
},
/*
* Edit hidden translate in promt modal message
*/
openAlertModal(element, actualValue){
var newText = prompt(CATranslates.texts.update, actualValue);
//On cancel
if ( newText == null ) {
return
|
let defaultAttributes = [];
//If childnode has attributes, we need sort them
if ( e.attributes && e.attributes.length > 0 ){
//Build attributes tree
for ( let i = 0; i < e.attributes.length; i++ ){
defaultAttributes.push({
name : e.attributes[i].nodeName,
value : e.attributes[i].nodeValue
});
}
//Sort element attribues by tag name
defaultAttributes = defaultAttributes.sort((a, b) => {
return a.name > b.name ? 1 : -1;
});
//Remove all attributes
defaultAttributes.forEach(item => {
|
identifier_body
|
index.b4f5078c.js
|
: ParcelRequire, ...};
declare var HMR_HOST: string;
declare var HMR_PORT: string;
declare var HMR_ENV_HASH: string;
declare var HMR_SECURE: boolean;
*/
var OVERLAY_ID = '__parcel__error__overlay__';
var OldModule = module.bundle.Module;
function Module(moduleName) {
OldModule.call(this, moduleName);
this.hot = {
data: module.bundle.hotData,
_acceptCallbacks: [],
_disposeCallbacks: [],
accept: function (fn) {
this._acceptCallbacks.push(fn || (function () {}));
},
dispose: function (fn) {
this._disposeCallbacks.push(fn);
}
};
module.bundle.hotData = undefined;
}
module.bundle.Module = Module;
var checkedAssets, /*: {|[string]: boolean|}*/
acceptedAssets, /*: {|[string]: boolean|}*/
/*: {|[string]: boolean|}*/
assetsToAccept;
function getHostname() {
return HMR_HOST || (location.protocol.indexOf('http') === 0 ? location.hostname : 'localhost');
}
function getPort() {
return HMR_PORT || location.port;
}
// eslint-disable-next-line no-redeclare
var parent = module.bundle.parent;
if ((!parent || !parent.isParcelRequire) && typeof WebSocket !== 'undefined') {
var hostname = getHostname();
var port = getPort();
var protocol = HMR_SECURE || location.protocol == 'https:' && !(/localhost|127.0.0.1|0.0.0.0/).test(hostname) ? 'wss' : 'ws';
var ws = new WebSocket(protocol + '://' + hostname + (port ? ':' + port : '') + '/');
// $FlowFixMe
ws.onmessage = function (event) /*: {data: string, ...}*/
{
checkedAssets = {
/*: {|[string]: boolean|}*/
};
acceptedAssets = {
/*: {|[string]: boolean|}*/
};
assetsToAccept = [];
var data = /*: HMRMessage*/
JSON.parse(event.data);
if (data.type === 'update') {
// Remove error overlay if there is one
removeErrorOverlay();
let assets = data.assets.filter(asset => asset.envHash === HMR_ENV_HASH);
// Handle HMR Update
var handled = false;
assets.forEach(asset => {
var didAccept = asset.type === 'css' || asset.type === 'js' && hmrAcceptCheck(module.bundle.root, asset.id, asset.depsByBundle);
if (didAccept) {
handled = true;
}
});
if (handled) {
console.clear();
assets.forEach(function (asset) {
hmrApply(module.bundle.root, asset);
});
for (var i = 0; i < assetsToAccept.length; i++) {
var id = assetsToAccept[i][1];
if (!acceptedAssets[id]) {
hmrAcceptRun(assetsToAccept[i][0], id);
}
}
} else {
window.location.reload();
}
}
if (data.type === 'error') {
// Log parcel errors to console
for (let ansiDiagnostic of data.diagnostics.ansi) {
let stack = ansiDiagnostic.codeframe ? ansiDiagnostic.codeframe : ansiDiagnostic.stack;
console.error('🚨 [parcel]: ' + ansiDiagnostic.message + '\n' + stack + '\n\n' + ansiDiagnostic.hints.join('\n'));
}
// Render the fancy html overlay
removeErrorOverlay();
var overlay = createErrorOverlay(data.diagnostics.html);
// $FlowFixMe
document.body.appendChild(overlay);
}
};
ws.onerror = function (e) {
console.error(e.message);
};
ws.onclose = function (e) {
if (undefined !== 'test') {
console.warn('[parcel] 🚨 Connection to the HMR server was lost');
}
};
}
function removeErrorOverlay() {
var overlay = document.getElementById(OVERLAY_ID);
|
console.log('[parcel] ✨ Error resolved');
}
}
function createErrorOverlay(diagnostics) {
var overlay = document.createElement('div');
overlay.id = OVERLAY_ID;
let errorHTML = '<div style="background: black; opacity: 0.85; font-size: 16px; color: white; position: fixed; height: 100%; width: 100%; top: 0px; left: 0px; padding: 30px; font-family: Menlo, Consolas, monospace; z-index: 9999;">';
for (let diagnostic of diagnostics) {
let stack = diagnostic.codeframe ? diagnostic.codeframe : diagnostic.stack;
errorHTML += `
<div>
<div style="font-size: 18px; font-weight: bold; margin-top: 20px;">
🚨 ${diagnostic.message}
</div>
<pre>
${stack}
</pre>
<div>
${diagnostic.hints.map(hint => '<div>' + hint + '</div>').join('')}
</div>
</div>
`;
}
errorHTML += '</div>';
overlay.innerHTML = errorHTML;
return overlay;
}
function getParents(bundle, id) /*: Array<[ParcelRequire, string]>*/
{
var modules = bundle.modules;
if (!modules) {
return [];
}
var parents = [];
var k, d, dep;
for (k in modules) {
for (d in modules[k][1]) {
dep = modules[k][1][d];
if (dep === id || Array.isArray(dep) && dep[dep.length - 1] === id) {
parents.push([bundle, k]);
}
}
}
if (bundle.parent) {
parents = parents.concat(getParents(bundle.parent, id));
}
return parents;
}
function updateLink(link) {
var newLink = link.cloneNode();
newLink.onload = function () {
if (link.parentNode !== null) {
// $FlowFixMe
link.parentNode.removeChild(link);
}
};
newLink.setAttribute('href', // $FlowFixMe
link.getAttribute('href').split('?')[0] + '?' + Date.now());
// $FlowFixMe
link.parentNode.insertBefore(newLink, link.nextSibling);
}
var cssTimeout = null;
function reloadCSS() {
if (cssTimeout) {
return;
}
cssTimeout = setTimeout(function () {
var links = document.querySelectorAll('link[rel="stylesheet"]');
for (var i = 0; i < links.length; i++) {
// $FlowFixMe[incompatible-type]
var href = /*: string*/
links[i].getAttribute('href');
var hostname = getHostname();
var servedFromHMRServer = hostname === 'localhost' ? new RegExp('^(https?:\\/\\/(0.0.0.0|127.0.0.1)|localhost):' + getPort()).test(href) : href.indexOf(hostname + ':' + getPort());
var absolute = (/^https?:\/\//i).test(href) && href.indexOf(window.location.origin) !== 0 && !servedFromHMRServer;
if (!absolute) {
updateLink(links[i]);
}
}
cssTimeout = null;
}, 50);
}
function hmrApply(bundle, /*: ParcelRequire*/
asset) /*: HMRAsset*/
{
var modules = bundle.modules;
if (!modules) {
return;
}
if (asset.type === 'css') {
reloadCSS();
return;
}
let deps = asset.depsByBundle[bundle.HMR_BUNDLE_ID];
if (deps) {
var fn = new Function('require', 'module', 'exports', asset.output);
modules[asset.id] = [fn, deps];
} else if (bundle.parent) {
hmrApply(bundle.parent, asset);
}
}
function hmrAcceptCheck(bundle, /*: ParcelRequire*/
id, /*: ParcelRequire*/
/*: string*/
depsByBundle) /*: ?{ [string]: { [string]: string } }*/
{
var modules = bundle.modules;
if (!modules) {
return;
}
if (depsByBundle && !depsByBundle[bundle.HMR_BUNDLE_ID]) {
// If we reached the root bundle without finding where the asset should go,
// there's nothing to do. Mark as "accepted" so we don't reload the page.
if (!bundle.parent) {
return true;
}
return hmrAcceptCheck(bundle.parent, id, depsByBundle);
}
if (checkedAssets[id]) {
return;
}
checkedAssets[id] = true;
var cached = bundle.cache[id];
assetsToAccept.push([bundle, id]);
if (cached && cached.hot && cached.hot._acceptCallbacks.length) {
return true;
}
return getParents(module.bundle.root, id).some(function (v) {
return hmrAcceptCheck(v[0], v[1], null);
});
}
function hmrAcceptRun(bundle, /*: ParcelRequire*/
id) /*: string*/
{
var cached = bundle.cache[id];
bundle.hotData = {};
if (cached && cached.hot) {
cached.hot.data = bundle.hotData;
}
if (cached && cached.hot && cached.hot._disposeCallbacks
|
if (overlay) {
overlay.remove();
|
random_line_split
|
index.b4f5078c.js
|
(name, jumped) {
if (!cache[name]) {
if (!modules[name]) {
// if we cannot find the module within our internal map or
// cache jump to the current global require ie. the last bundle
// that was added to the page.
var currentRequire =
typeof globalObject[parcelRequireName] === 'function' &&
globalObject[parcelRequireName];
if (!jumped && currentRequire) {
return currentRequire(name, true);
}
// If there are other bundles on this page the require from the
// previous one is saved to 'previousRequire'. Repeat this as
// many times as there are bundles until the module is found or
// we exhaust the require chain.
if (previousRequire) {
return previousRequire(name, true);
}
// Try the node require function if it exists.
if (nodeRequire && typeof name === 'string') {
return nodeRequire(name);
}
var err = new Error("Cannot find module '" + name + "'");
err.code = 'MODULE_NOT_FOUND';
throw err;
}
localRequire.resolve = resolve;
localRequire.cache = {};
var module = (cache[name] = new newRequire.Module(name));
modules[name][0].call(
module.exports,
localRequire,
module,
module.exports,
this
);
}
return cache[name].exports;
function localRequire(x) {
return newRequire(localRequire.resolve(x));
}
function resolve(x) {
return modules[name][1][x] || x;
}
}
function Module(moduleName) {
this.id = moduleName;
this.bundle = newRequire;
this.exports = {};
}
newRequire.isParcelRequire = true;
newRequire.Module = Module;
newRequire.modules = modules;
newRequire.cache = cache;
newRequire.parent = previousRequire;
newRequire.register = function(id, exports) {
modules[id] = [
function(require, module) {
module.exports = exports;
},
{},
];
};
Object.defineProperty(newRequire, 'root', {
get: function() {
return globalObject[parcelRequireName];
},
});
globalObject[parcelRequireName] = newRequire;
for (var i = 0; i < entry.length; i++) {
newRequire(entry[i]);
}
if (mainEntry) {
// Expose entry point to Node, AMD or browser globals
// Based on https://github.com/ForbesLindesay/umd/blob/master/template.js
var mainExports = newRequire(mainEntry);
// CommonJS
if (typeof exports === 'object' && typeof module !== 'undefined') {
module.exports = mainExports;
// RequireJS
} else if (typeof define === 'function' && define.amd) {
define(function() {
return mainExports;
});
// <script>
} else if (globalName) {
this[globalName] = mainExports;
}
}
})({"63iPG":[function(require,module,exports) {
var HMR_HOST = null;
var HMR_PORT = 1234;
var HMR_SECURE = false;
var HMR_ENV_HASH = "d751713988987e9331980363e24189ce";
module.bundle.HMR_BUNDLE_ID = "d231a23f43d60e28ed500b93b4f5078c";
// @flow
/*global HMR_HOST, HMR_PORT, HMR_ENV_HASH, HMR_SECURE*/
/*::
import type {
HMRAsset,
HMRMessage,
} from '@parcel/reporter-dev-server/src/HMRServer.js';
interface ParcelRequire {
(string): mixed;
cache: {|[string]: ParcelModule|};
hotData: mixed;
Module: any;
parent: ?ParcelRequire;
isParcelRequire: true;
modules: {|[string]: [Function, {|[string]: string|}]|};
HMR_BUNDLE_ID: string;
root: ParcelRequire;
}
interface ParcelModule {
hot: {|
data: mixed,
accept(cb: (Function) => void): void,
dispose(cb: (mixed) => void): void,
// accept(deps: Array<string> | string, cb: (Function) => void): void,
// decline(): void,
_acceptCallbacks: Array<(Function) => void>,
_disposeCallbacks: Array<(mixed) => void>,
|};
}
declare var module: {bundle: ParcelRequire, ...};
declare var HMR_HOST: string;
declare var HMR_PORT: string;
declare var HMR_ENV_HASH: string;
declare var HMR_SECURE: boolean;
*/
var OVERLAY_ID = '__parcel__error__overlay__';
var OldModule = module.bundle.Module;
function Module(moduleName) {
OldModule.call(this, moduleName);
this.hot = {
data: module.bundle.hotData,
_acceptCallbacks: [],
_disposeCallbacks: [],
accept: function (fn) {
this._acceptCallbacks.push(fn || (function () {}));
},
dispose: function (fn) {
this._disposeCallbacks.push(fn);
}
};
module.bundle.hotData = undefined;
}
module.bundle.Module = Module;
var checkedAssets, /*: {|[string]: boolean|}*/
acceptedAssets, /*: {|[string]: boolean|}*/
/*: {|[string]: boolean|}*/
assetsToAccept;
function getHostname() {
return HMR_HOST || (location.protocol.indexOf('http') === 0 ? location.hostname : 'localhost');
}
function getPort() {
return HMR_PORT || location.port;
}
// eslint-disable-next-line no-redeclare
var parent = module.bundle.parent;
if ((!parent || !parent.isParcelRequire) && typeof WebSocket !== 'undefined') {
var hostname = getHostname();
var port = getPort();
var protocol = HMR_SECURE || location.protocol == 'https:' && !(/localhost|127.0.0.1|0.0.0.0/).test(hostname) ? 'wss' : 'ws';
var ws = new WebSocket(protocol + '://' + hostname + (port ? ':' + port : '') + '/');
// $FlowFixMe
ws.onmessage = function (event) /*: {data: string, ...}*/
{
checkedAssets = {
/*: {|[string]: boolean|}*/
};
acceptedAssets = {
/*: {|[string]: boolean|}*/
};
assetsToAccept = [];
var data = /*: HMRMessage*/
JSON.parse(event.data);
if (data.type === 'update') {
// Remove error overlay if there is one
removeErrorOverlay();
let assets = data.assets.filter(asset => asset.envHash === HMR_ENV_HASH);
// Handle HMR Update
var handled = false;
assets.forEach(asset => {
var didAccept = asset.type === 'css' || asset.type === 'js' && hmrAcceptCheck(module.bundle.root, asset.id, asset.depsByBundle);
if (didAccept) {
handled = true;
}
});
if (handled) {
console.clear();
assets.forEach(function (asset) {
hmrApply(module.bundle.root, asset);
});
for (var i = 0; i < assetsToAccept.length; i++) {
var id = assetsToAccept[i][1];
if (!acceptedAssets[id]) {
hmrAcceptRun(assetsToAccept[i][0], id);
}
}
} else {
window.location.reload();
}
}
if (data.type === 'error') {
// Log parcel errors to console
for (let ansiDiagnostic of data.diagnostics.ansi) {
let stack = ansiDiagnostic.codeframe ? ansiDiagnostic.codeframe : ansiDiagnostic.stack;
console.error('🚨 [parcel]: ' + ansiDiagnostic.message + '\n' + stack + '\n\n' + ansiDiagnostic.hints.join('\n'));
}
// Render the fancy html overlay
removeErrorOverlay();
var overlay = createErrorOverlay(data.diagnostics.html);
// $FlowFixMe
document.body.appendChild(overlay);
}
};
ws.onerror = function (e) {
console.error(e.message);
};
ws.onclose = function (e) {
if (undefined !== 'test') {
console.warn('[parcel] 🚨 Connection to the HMR server was lost');
}
};
}
function removeErrorOverlay() {
var overlay = document.getElementById(OVERLAY_ID);
if (overlay) {
overlay.remove();
console.log('[parcel] ✨ Error resolved');
}
}
function createErrorOverlay(diagnostics) {
var overlay = document.createElement('div');
overlay.id = OVERLAY_ID;
let errorHTML = '<div style="background: black; opacity: 0.85; font-size: 16px; color: white; position: fixed; height: 100%; width: 100%; top: 0px; left: 0px; padding: 30px; font-family: Menlo, Consolas, monospace; z-index: 9999;">';
for (let diagnostic of diagnostics) {
let stack = diagnostic.codeframe ? diagnostic.codeframe : diagnostic.stack;
errorHTML += `
<div>
<div style="font-size: 18px; font-weight: bold; margin-top: 2
|
newRequire
|
identifier_name
|
|
index.b4f5078c.js
|
ParcelRequire, ...};
declare var HMR_HOST: string;
declare var HMR_PORT: string;
declare var HMR_ENV_HASH: string;
declare var HMR_SECURE: boolean;
*/
var OVERLAY_ID = '__parcel__error__overlay__';
var OldModule = module.bundle.Module;
function Module(moduleName) {
OldModule.call(this, moduleName);
this.hot = {
data: module.bundle.hotData,
_acceptCallbacks: [],
_disposeCallbacks: [],
accept: function (fn) {
this._acceptCallbacks.push(fn || (function () {}));
},
dispose: function (fn) {
this._disposeCallbacks.push(fn);
}
};
module.bundle.hotData = undefined;
}
module.bundle.Module = Module;
var checkedAssets, /*: {|[string]: boolean|}*/
acceptedAssets, /*: {|[string]: boolean|}*/
/*: {|[string]: boolean|}*/
assetsToAccept;
function getHostname() {
return HMR_HOST || (location.protocol.indexOf('http') === 0 ? location.hostname : 'localhost');
}
function getPort() {
return HMR_PORT || location.port;
}
// eslint-disable-next-line no-redeclare
var parent = module.bundle.parent;
if ((!parent || !parent.isParcelRequire) && typeof WebSocket !== 'undefined') {
var hostname = getHostname();
var port = getPort();
var protocol = HMR_SECURE || location.protocol == 'https:' && !(/localhost|127.0.0.1|0.0.0.0/).test(hostname) ? 'wss' : 'ws';
var ws = new WebSocket(protocol + '://' + hostname + (port ? ':' + port : '') + '/');
// $FlowFixMe
ws.onmessage = function (event) /*: {data: string, ...}*/
{
checkedAssets = {
/*: {|[string]: boolean|}*/
};
acceptedAssets = {
/*: {|[string]: boolean|}*/
};
assetsToAccept = [];
var data = /*: HMRMessage*/
JSON.parse(event.data);
if (data.type === 'update') {
// Remove error overlay if there is one
removeErrorOverlay();
let assets = data.assets.filter(asset => asset.envHash === HMR_ENV_HASH);
// Handle HMR Update
var handled = false;
assets.forEach(asset => {
var didAccept = asset.type === 'css' || asset.type === 'js' && hmrAcceptCheck(module.bundle.root, asset.id, asset.depsByBundle);
if (didAccept) {
handled = true;
}
});
if (handled) {
console.clear();
assets.forEach(function (asset) {
hmrApply(module.bundle.root, asset);
});
for (var i = 0; i < assetsToAccept.length; i++) {
var id = assetsToAccept[i][1];
if (!acceptedAssets[id]) {
hmrAcceptRun(assetsToAccept[i][0], id);
}
}
} else {
window.location.reload();
}
}
if (data.type === 'error') {
// Log parcel errors to console
for (let ansiDiagnostic of data.diagnostics.ansi) {
let stack = ansiDiagnostic.codeframe ? ansiDiagnostic.codeframe : ansiDiagnostic.stack;
console.error('🚨 [parcel]: ' + ansiDiagnostic.message + '\n' + stack + '\n\n' + ansiDiagnostic.hints.join('\n'));
}
// Render the fancy html overlay
removeErrorOverlay();
var overlay = createErrorOverlay(data.diagnostics.html);
// $FlowFixMe
document.body.appendChild(overlay);
}
};
ws.onerror = function (e) {
console.error(e.message);
};
ws.onclose = function (e) {
if (undefined !== 'test') {
console.warn('[parcel] 🚨 Connection to the HMR server was lost');
}
};
}
function removeErrorOverlay() {
va
|
n createErrorOverlay(diagnostics) {
var overlay = document.createElement('div');
overlay.id = OVERLAY_ID;
let errorHTML = '<div style="background: black; opacity: 0.85; font-size: 16px; color: white; position: fixed; height: 100%; width: 100%; top: 0px; left: 0px; padding: 30px; font-family: Menlo, Consolas, monospace; z-index: 9999;">';
for (let diagnostic of diagnostics) {
let stack = diagnostic.codeframe ? diagnostic.codeframe : diagnostic.stack;
errorHTML += `
<div>
<div style="font-size: 18px; font-weight: bold; margin-top: 20px;">
🚨 ${diagnostic.message}
</div>
<pre>
${stack}
</pre>
<div>
${diagnostic.hints.map(hint => '<div>' + hint + '</div>').join('')}
</div>
</div>
`;
}
errorHTML += '</div>';
overlay.innerHTML = errorHTML;
return overlay;
}
function getParents(bundle, id) /*: Array<[ParcelRequire, string]>*/
{
var modules = bundle.modules;
if (!modules) {
return [];
}
var parents = [];
var k, d, dep;
for (k in modules) {
for (d in modules[k][1]) {
dep = modules[k][1][d];
if (dep === id || Array.isArray(dep) && dep[dep.length - 1] === id) {
parents.push([bundle, k]);
}
}
}
if (bundle.parent) {
parents = parents.concat(getParents(bundle.parent, id));
}
return parents;
}
function updateLink(link) {
var newLink = link.cloneNode();
newLink.onload = function () {
if (link.parentNode !== null) {
// $FlowFixMe
link.parentNode.removeChild(link);
}
};
newLink.setAttribute('href', // $FlowFixMe
link.getAttribute('href').split('?')[0] + '?' + Date.now());
// $FlowFixMe
link.parentNode.insertBefore(newLink, link.nextSibling);
}
var cssTimeout = null;
function reloadCSS() {
if (cssTimeout) {
return;
}
cssTimeout = setTimeout(function () {
var links = document.querySelectorAll('link[rel="stylesheet"]');
for (var i = 0; i < links.length; i++) {
// $FlowFixMe[incompatible-type]
var href = /*: string*/
links[i].getAttribute('href');
var hostname = getHostname();
var servedFromHMRServer = hostname === 'localhost' ? new RegExp('^(https?:\\/\\/(0.0.0.0|127.0.0.1)|localhost):' + getPort()).test(href) : href.indexOf(hostname + ':' + getPort());
var absolute = (/^https?:\/\//i).test(href) && href.indexOf(window.location.origin) !== 0 && !servedFromHMRServer;
if (!absolute) {
updateLink(links[i]);
}
}
cssTimeout = null;
}, 50);
}
function hmrApply(bundle, /*: ParcelRequire*/
asset) /*: HMRAsset*/
{
var modules = bundle.modules;
if (!modules) {
return;
}
if (asset.type === 'css') {
reloadCSS();
return;
}
let deps = asset.depsByBundle[bundle.HMR_BUNDLE_ID];
if (deps) {
var fn = new Function('require', 'module', 'exports', asset.output);
modules[asset.id] = [fn, deps];
} else if (bundle.parent) {
hmrApply(bundle.parent, asset);
}
}
function hmrAcceptCheck(bundle, /*: ParcelRequire*/
id, /*: ParcelRequire*/
/*: string*/
depsByBundle) /*: ?{ [string]: { [string]: string } }*/
{
var modules = bundle.modules;
if (!modules) {
return;
}
if (depsByBundle && !depsByBundle[bundle.HMR_BUNDLE_ID]) {
// If we reached the root bundle without finding where the asset should go,
// there's nothing to do. Mark as "accepted" so we don't reload the page.
if (!bundle.parent) {
return true;
}
return hmrAcceptCheck(bundle.parent, id, depsByBundle);
}
if (checkedAssets[id]) {
return;
}
checkedAssets[id] = true;
var cached = bundle.cache[id];
assetsToAccept.push([bundle, id]);
if (cached && cached.hot && cached.hot._acceptCallbacks.length) {
return true;
}
return getParents(module.bundle.root, id).some(function (v) {
return hmrAcceptCheck(v[0], v[1], null);
});
}
function hmrAcceptRun(bundle, /*: ParcelRequire*/
id) /*: string*/
{
var cached = bundle.cache[id];
bundle.hotData = {};
if (cached && cached.hot) {
cached.hot.data = bundle.hotData;
}
if (cached && cached.hot && cached.hot
|
r overlay = document.getElementById(OVERLAY_ID);
if (overlay) {
overlay.remove();
console.log('[parcel] ✨ Error resolved');
}
}
functio
|
identifier_body
|
index.b4f5078c.js
|
string;
declare var HMR_SECURE: boolean;
*/
var OVERLAY_ID = '__parcel__error__overlay__';
var OldModule = module.bundle.Module;
function Module(moduleName) {
OldModule.call(this, moduleName);
this.hot = {
data: module.bundle.hotData,
_acceptCallbacks: [],
_disposeCallbacks: [],
accept: function (fn) {
this._acceptCallbacks.push(fn || (function () {}));
},
dispose: function (fn) {
this._disposeCallbacks.push(fn);
}
};
module.bundle.hotData = undefined;
}
module.bundle.Module = Module;
var checkedAssets, /*: {|[string]: boolean|}*/
acceptedAssets, /*: {|[string]: boolean|}*/
/*: {|[string]: boolean|}*/
assetsToAccept;
function getHostname() {
return HMR_HOST || (location.protocol.indexOf('http') === 0 ? location.hostname : 'localhost');
}
function getPort() {
return HMR_PORT || location.port;
}
// eslint-disable-next-line no-redeclare
var parent = module.bundle.parent;
if ((!parent || !parent.isParcelRequire) && typeof WebSocket !== 'undefined') {
var hostname = getHostname();
var port = getPort();
var protocol = HMR_SECURE || location.protocol == 'https:' && !(/localhost|127.0.0.1|0.0.0.0/).test(hostname) ? 'wss' : 'ws';
var ws = new WebSocket(protocol + '://' + hostname + (port ? ':' + port : '') + '/');
// $FlowFixMe
ws.onmessage = function (event) /*: {data: string, ...}*/
{
checkedAssets = {
/*: {|[string]: boolean|}*/
};
acceptedAssets = {
/*: {|[string]: boolean|}*/
};
assetsToAccept = [];
var data = /*: HMRMessage*/
JSON.parse(event.data);
if (data.type === 'update') {
// Remove error overlay if there is one
removeErrorOverlay();
let assets = data.assets.filter(asset => asset.envHash === HMR_ENV_HASH);
// Handle HMR Update
var handled = false;
assets.forEach(asset => {
var didAccept = asset.type === 'css' || asset.type === 'js' && hmrAcceptCheck(module.bundle.root, asset.id, asset.depsByBundle);
if (didAccept) {
handled = true;
}
});
if (handled) {
console.clear();
assets.forEach(function (asset) {
hmrApply(module.bundle.root, asset);
});
for (var i = 0; i < assetsToAccept.length; i++) {
var id = assetsToAccept[i][1];
if (!acceptedAssets[id]) {
hmrAcceptRun(assetsToAccept[i][0], id);
}
}
} else {
window.location.reload();
}
}
if (data.type === 'error') {
// Log parcel errors to console
for (let ansiDiagnostic of data.diagnostics.ansi) {
let stack = ansiDiagnostic.codeframe ? ansiDiagnostic.codeframe : ansiDiagnostic.stack;
console.error('🚨 [parcel]: ' + ansiDiagnostic.message + '\n' + stack + '\n\n' + ansiDiagnostic.hints.join('\n'));
}
// Render the fancy html overlay
removeErrorOverlay();
var overlay = createErrorOverlay(data.diagnostics.html);
// $FlowFixMe
document.body.appendChild(overlay);
}
};
ws.onerror = function (e) {
console.error(e.message);
};
ws.onclose = function (e) {
if (undefined !== 'test') {
console.warn('[parcel] 🚨 Connection to the HMR server was lost');
}
};
}
function removeErrorOverlay() {
var overlay = document.getElementById(OVERLAY_ID);
if (overlay) {
overlay.remove();
console.log('[parcel] ✨ Error resolved');
}
}
function createErrorOverlay(diagnostics) {
var overlay = document.createElement('div');
overlay.id = OVERLAY_ID;
let errorHTML = '<div style="background: black; opacity: 0.85; font-size: 16px; color: white; position: fixed; height: 100%; width: 100%; top: 0px; left: 0px; padding: 30px; font-family: Menlo, Consolas, monospace; z-index: 9999;">';
for (let diagnostic of diagnostics) {
let stack = diagnostic.codeframe ? diagnostic.codeframe : diagnostic.stack;
errorHTML += `
<div>
<div style="font-size: 18px; font-weight: bold; margin-top: 20px;">
🚨 ${diagnostic.message}
</div>
<pre>
${stack}
</pre>
<div>
${diagnostic.hints.map(hint => '<div>' + hint + '</div>').join('')}
</div>
</div>
`;
}
errorHTML += '</div>';
overlay.innerHTML = errorHTML;
return overlay;
}
function getParents(bundle, id) /*: Array<[ParcelRequire, string]>*/
{
var modules = bundle.modules;
if (!modules) {
return [];
}
var parents = [];
var k, d, dep;
for (k in modules) {
for (d in modules[k][1]) {
dep = modules[k][1][d];
if (dep === id || Array.isArray(dep) && dep[dep.length - 1] === id) {
parents.push([bundle, k]);
}
}
}
if (bundle.parent) {
parents = parents.concat(getParents(bundle.parent, id));
}
return parents;
}
function updateLink(link) {
var newLink = link.cloneNode();
newLink.onload = function () {
if (link.parentNode !== null) {
// $FlowFixMe
link.parentNode.removeChild(link);
}
};
newLink.setAttribute('href', // $FlowFixMe
link.getAttribute('href').split('?')[0] + '?' + Date.now());
// $FlowFixMe
link.parentNode.insertBefore(newLink, link.nextSibling);
}
var cssTimeout = null;
function reloadCSS() {
if (cssTimeout) {
return;
}
cssTimeout = setTimeout(function () {
var links = document.querySelectorAll('link[rel="stylesheet"]');
for (var i = 0; i < links.length; i++) {
// $FlowFixMe[incompatible-type]
var href = /*: string*/
links[i].getAttribute('href');
var hostname = getHostname();
var servedFromHMRServer = hostname === 'localhost' ? new RegExp('^(https?:\\/\\/(0.0.0.0|127.0.0.1)|localhost):' + getPort()).test(href) : href.indexOf(hostname + ':' + getPort());
var absolute = (/^https?:\/\//i).test(href) && href.indexOf(window.location.origin) !== 0 && !servedFromHMRServer;
if (!absolute) {
updateLink(links[i]);
}
}
cssTimeout = null;
}, 50);
}
function hmrApply(bundle, /*: ParcelRequire*/
asset) /*: HMRAsset*/
{
var modules = bundle.modules;
if (!modules) {
return;
}
if (asset.type === 'css') {
reloadCSS();
return;
}
let deps = asset.depsByBundle[bundle.HMR_BUNDLE_ID];
if (deps) {
var fn = new Function('require', 'module', 'exports', asset.output);
modules[asset.id] = [fn, deps];
} else if (bundle.parent) {
hmrApply(bundle.parent, asset);
}
}
function hmrAcceptCheck(bundle, /*: ParcelRequire*/
id, /*: ParcelRequire*/
/*: string*/
depsByBundle) /*: ?{ [string]: { [string]: string } }*/
{
var modules = bundle.modules;
if (!modules) {
return;
}
if (depsByBundle && !depsByBundle[bundle.HMR_BUNDLE_ID]) {
// If we reached the root bundle without finding where the asset should go,
// there's nothing to do. Mark as "accepted" so we don't reload the page.
if (!bundle.parent) {
return true;
}
return hmrAcceptCheck(bundle.parent, id, depsByBundle);
}
if (checkedAssets[id]) {
return;
}
checkedAssets[id] = true;
var cached = bundle.cache[id];
assetsToAccept.push([bundle, id]);
if (cached && cached.hot && cached.hot._acceptCallbacks.length) {
return true;
}
return getParents(module.bundle.root, id).some(function (v) {
return hmrAcceptCheck(v[0], v[1], null);
});
}
function hmrAcceptRun(bundle, /*: ParcelRequire*/
id) /*: string*/
{
var cached = bundle.cache[id];
bundle.hotData = {};
if (cached && cached.hot) {
cached.hot.data = bundle.hotData;
}
if (cached && cached.hot && cached.hot._disposeCallbacks.length) {
cache
|
d.hot._disposeCallbacks.forEach(function (cb) {
cb(bundle.hotData);
});
}
delete b
|
conditional_block
|
|
Simple-Linear-Regression.py
|
c + m_1x_1 + m_2x_2 + ... + m_nx_n$
#
# - $y$ is the response
# - $c$ is the intercept
# - $m_1$ is the coefficient for the first feature
# - $m_n$ is the coefficient for the nth feature<br>
#
# In our case:
#
# $y = c + m_1 \times TV$
#
# The $m$ values are called the model **coefficients** or **model parameters**.
#
# ---
# ### Generic Steps in model building using `statsmodels`
#
# We first assign the feature variable, `TV`, in this case, to the variable `X` and the response variable, `Sales`, to the variable `y`.
# In[10]:
X = advertising['TV']
y = advertising['Sales']
# #### Train-Test Split
#
# You now need to split our variable into training and testing sets. You'll perform this by importing `train_test_split` from the `sklearn.model_selection` library. It is usually a good practice to keep 70% of the data in your train dataset and the rest 30% in your test dataset
# In[11]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.7, test_size = 0.3, random_state = 100)
# In[12]:
# Let's now take a look at the train dataset
X_train.head()
# In[13]:
y_train.head()
# #### Building a Linear Model
#
# You first need to import the `statsmodel.api` library using which you'll perform the linear regression.
# In[14]:
import statsmodels.api as sm
# By default, the `statsmodels` library fits a line on the dataset which passes through the origin. But in order to have an intercept, you need to manually use the `add_constant` attribute of `statsmodels`. And once you've added the constant to your `X_train` dataset, you can go ahead and fit a regression line using the `OLS` (Ordinary Least Squares) attribute of `statsmodels` as shown below
# In[15]:
# Add a constant to get an intercept
X_train_sm = sm.add_constant(X_train)
# Fit the resgression line using 'OLS'
lr = sm.OLS(y_train, X_train_sm).fit()
# In[16]:
# Print the parameters, i.e. the intercept and the slope of the regression line fitted
lr.params
# In[17]:
# Performing a summary operation lists out all the different parameters of the regression line fitted
print(lr.summary())
# #### Looking at some key statistics from the summary
# The values we are concerned with are -
# 1. The coefficients and significance (p-values)
# 2. R-squared
# 3. F statistic and its significance
# ##### 1. The coefficient for TV is 0.054, with a very low p value
# The coefficient is statistically significant. So the association is not purely by chance.
# ##### 2. R - squared is 0.816
# Meaning that 81.6% of the variance in `Sales` is explained by `TV`
#
# This is a decent R-squared value.
# ###### 3. F statistic has a very low p value (practically low)
# Meaning that the model fit is statistically significant, and the explained variance isn't purely by chance.
# ---
# The fit is significant. Let's visualize how well the model fit the data.
#
# From the parameters that we get, our linear regression equation becomes:
#
# $ Sales = 6.948 + 0.054 \times TV $
# In[23]:
plt.scatter(X_train, y_train)
plt.plot(X_train, 6.948 + 0.054*X_train, 'r')
plt.show()
# ## Step 4: Residual analysis
# To validate assumptions of the model, and hence the reliability for inference
# #### Distribution of the error terms
# We need to heck if the error terms are also normally distributed (which is infact, one of the major assumptions of linear regression), let us plot the histogram of the error terms and see what it looks like.
# In[19]:
y_train_pred = lr.predict(X_train_sm)
res = (y_train - y_train_pred)
# In[24]:
fig = plt.figure()
sns.distplot(res, bins = 15)
fig.suptitle('Error Terms', fontsize = 15) # Plot heading
plt.xlabel('y_train - y_train_pred', fontsize = 15) # X-label
plt.show()
# The residuals are following the normally distributed with a mean 0. All good!
# #### Looking for patterns in the residuals
# In[22]:
plt.scatter(X_train,res)
plt.show()
# We are confident that the model fit isn't by chance, and has decent predictive power. The normality of residual terms allows some inference on the coefficients.
#
# Although, the variance of residuals increasing with X indicates that there is significant variation that this model is unable to explain.
# As you can see, the regression line is a pretty good fit to the data
# ## Step 5: Predictions on the Test Set
#
# Now that you have fitted a regression line on your train dataset, it's time to make some predictions on the test data. For this, you first need to add a constant to the `X_test` data like you did for `X_train` and then you can simply go on and predict the y values corresponding to `X_test` using the `predict` attribute of the fitted regression line.
# In[25]:
# Add a constant to X_test
X_test_sm = sm.add_constant(X_test)
# Predict the y values corresponding to X_test_sm
y_pred = lr.predict(X_test_sm)
# In[26]:
y_pred.head()
# In[27]:
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# ##### Looking at the RMSE
# In[28]:
#Returns the mean squared error; we'll take a square root
np.sqrt(mean_squared_error(y_test, y_pred))
# ###### Checking the R-squared on the test set
# In[29]:
r_squared = r2_score(y_test, y_pred)
r_squared
# ##### Visualizing the fit on the test set
# In[27]:
plt.scatter(X_test, y_test)
plt.plot(X_test, 6.948 + 0.054 * X_test, 'r')
plt.show()
#
#
#
#
# ### Optional Step: Linear Regression using `linear_model` in `sklearn`
#
# Apart from `statsmodels`, there is another package namely `sklearn` that can be used to perform linear regression. We will use the `linear_model` library from `sklearn` to build the model. Since, we hae already performed a train-test split, we don't need to do it again.
#
# There's one small step that we need to add, though. When there's only a single feature, we need to add an additional column in order for the linear regression fit to be performed successfully.
# In[37]:
from sklearn.model_selection import train_test_split
X_train_lm, X_test_lm, y_train_lm, y_test_lm = train_test_split(X, y, train_size = 0.7, test_size = 0.3, random_state = 100)
# In[38]:
X_train_lm.shape
# In[39]:
print(X_train.head())
print(X_train_lm[:5,])
# In[40]:
X_train_lm = X_train_lm.reshape(-1,1)
X_test_lm = X_test_lm.reshape(-1,1)
# In[41]:
print(X_train_lm[:5,])
# In[42]:
print(X_train_lm.shape)
print(y_train_lm.shape)
print(X_test_lm.shape)
print(y_test_lm.shape)
# In[43]:
from sklearn.linear_model import LinearRegression
# Representing LinearRegression as lr(Creating LinearRegression Object)
lm = LinearRegression()
# Fit the model using lr.fit()
lm.fit(X_train_lm, y_train_lm)
# In[44]:
print(lm.intercept_)
print(lm.coef_)
# The equationwe get is the same as what we got before!
#
# $ Sales = 6.948 + 0.054* TV $
# Sklearn linear model is useful as it is compatible with a lot of sklearn utilites (cross validation, grid search etc.)
# ---
# ## Addressing some common questions/doubts on Simple Linear Regression
# ---
# ### Q: Why is it called 'R-squared'?
# Based on what we learnt so far, do you see it? Can you answer this?
#
#
#
#
#
#
# .
#
# .
#
# .
#
# .
#
# #### Drumroll...
#
# .
#
# .
#
# .
#
# .
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
|
#
#
#
# In[45]:
|
random_line_split
|
|
ctl.rs
|
action: NtpCtlAction,
}
impl NtpDaemonOptions {
const TAKES_ARGUMENT: &[&'static str] = &["--config", "--format"];
const TAKES_ARGUMENT_SHORT: &[char] = &['c', 'f'];
/// parse an iterator over command line arguments
pub fn try_parse_from<I, T>(iter: I) -> Result<Self, String>
where
I: IntoIterator<Item = T>,
T: AsRef<str> + Clone,
{
let mut options = NtpDaemonOptions::default();
let mut it = iter.into_iter().map(|x| x.as_ref().to_string()).peekable();
match it.peek().map(|x| x.as_str()) {
Some("validate") => {
let _ = it.next();
options.validate = true;
}
Some("status") => {
let _ = it.next();
options.status = true;
}
_ => { /* do nothing */ }
};
let arg_iter =
CliArg::normalize_arguments(Self::TAKES_ARGUMENT, Self::TAKES_ARGUMENT_SHORT, it)?
.into_iter()
.peekable();
for arg in arg_iter {
match arg {
CliArg::Flag(flag) => match flag.as_str() {
"-h" | "--help" => {
options.help = true;
}
"-v" | "--version" => {
options.version = true;
}
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Argument(option, value) => match option.as_str() {
"-c" | "--config" => {
options.config = Some(PathBuf::from(value));
}
"-f" | "--format" => match value.as_str() {
"plain" => options.format = Format::Plain,
"prometheus" => options.format = Format::Prometheus,
_ => Err(format!("invalid format option provided: {value}"))?,
},
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Rest(_rest) => { /* do nothing, drop remaining arguments */ }
}
}
options.resolve_action();
// nothing to validate at the moment
Ok(options)
}
/// from the arguments resolve which action should be performed
fn resolve_action(&mut self) {
if self.help {
self.action = NtpCtlAction::Help;
} else if self.version {
self.action = NtpCtlAction::Version;
} else if self.validate {
self.action = NtpCtlAction::Validate;
} else if self.status {
self.action = NtpCtlAction::Status;
} else {
self.action = NtpCtlAction::Help;
}
}
}
async fn validate(config: Option<PathBuf>) -> std::io::Result<ExitCode> {
// Late completion not needed, so ignore result.
crate::daemon::tracing::tracing_init(LogLevel::Info).init();
match Config::from_args(config, vec![], vec![]).await {
Ok(config) => {
if config.check() {
eprintln!("Config looks good");
Ok(ExitCode::SUCCESS)
} else {
Ok(ExitCode::FAILURE)
}
}
Err(e) => {
eprintln!("Error: Could not load configuration: {e}");
Ok(ExitCode::FAILURE)
}
}
}
const VERSION: &str = env!("CARGO_PKG_VERSION");
pub async fn
|
() -> std::io::Result<ExitCode> {
let options = match NtpDaemonOptions::try_parse_from(std::env::args()) {
Ok(options) => options,
Err(msg) => return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)),
};
match options.action {
NtpCtlAction::Help => {
println!("{}", long_help_message());
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Version => {
eprintln!("ntp-ctl {VERSION}");
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Validate => validate(options.config).await,
NtpCtlAction::Status => {
let config = Config::from_args(options.config, vec![], vec![]).await;
if let Err(ref e) = config {
println!("Warning: Unable to load configuration file: {e}");
}
let config = config.unwrap_or_default();
let observation = config
.observability
.observe
.observation_path
.unwrap_or_else(|| PathBuf::from("/run/ntpd-rs/observe"));
match options.format {
Format::Plain => print_state(Format::Plain, observation).await,
Format::Prometheus => print_state(Format::Prometheus, observation).await,
}
}
}
}
async fn print_state(print: Format, observe_socket: PathBuf) -> Result<ExitCode, std::io::Error> {
let mut stream = match tokio::net::UnixStream::connect(&observe_socket).await {
Ok(stream) => stream,
Err(e) => {
eprintln!("Could not open socket at {}: {e}", observe_socket.display(),);
return Ok(ExitCode::FAILURE);
}
};
let mut msg = Vec::with_capacity(16 * 1024);
let output =
match crate::daemon::sockets::read_json::<ObservableState>(&mut stream, &mut msg).await {
Ok(output) => output,
Err(e) => {
eprintln!("Failed to read state from observation socket: {e}");
return Ok(ExitCode::FAILURE);
}
};
match print {
Format::Plain => {
println!("Synchronization status:");
println!(
"Dispersion: {}s, Delay: {}s",
output.system.time_snapshot.root_dispersion.to_seconds(),
output.system.time_snapshot.root_delay.to_seconds()
);
println!(
"Desired poll interval: {}s",
output
.system
.time_snapshot
.poll_interval
.as_duration()
.to_seconds()
);
println!("Stratum: {}", output.system.stratum);
println!();
println!("Peers:");
for peer in &output.peers {
match peer {
crate::daemon::ObservablePeerState::Nothing => {}
crate::daemon::ObservablePeerState::Observable(
crate::daemon::ObservedPeerState {
timedata,
unanswered_polls,
poll_interval,
address,
id,
},
) => {
println!(
"{} ({}): {}±{}(±{})s\n pollinterval: {}s, missing polls: {}",
address,
id,
timedata.offset.to_seconds(),
timedata.uncertainty.to_seconds(),
timedata.delay.to_seconds(),
poll_interval.as_duration().to_seconds(),
unanswered_polls
);
}
}
}
let in_startup = output
.peers
.iter()
.filter(|peer| matches!(peer, crate::daemon::ObservablePeerState::Nothing))
.count();
match in_startup {
0 => {} // no peers in startup, so no line for that
1 => println!("1 peer still in startup"),
_ => println!("{} peers still in startup", in_startup),
}
println!();
println!("Servers:");
for server in &output.servers {
println!(
"{}: received {}, accepted {}, errors {}",
server.address,
server.stats.received_packets.get(),
server.stats.accepted_packets.get(),
server.stats.response_send_errors.get()
);
println!(
" denied {}, rate limited {}, ignored {}",
server.stats.denied_packets.get(),
server.stats.rate_limited_packets.get(),
server.stats.ignored_packets.get()
);
}
}
Format::Prometheus => {
let mut buf = String::new();
if let Err(e) = crate::metrics::format_state(&mut buf, &output) {
eprintln!("Failed to encode prometheus data: {e}");
return Ok(ExitCode::FAILURE);
}
println!("{buf}");
}
}
Ok(ExitCode::SUCCESS)
}
#[cfg(test)]
mod tests {
use std::os::unix::prelude::PermissionsExt;
use std::path::Path;
use crate::daemon::{
config::ObserveConfig,
sockets::{create_unix_socket, write_json},
};
use super::*;
async fn write_socket_helper(
command: Format,
socket_name: &str,
) -> std::io::Result<Result<ExitCode, std::io::Error>> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join(socket_name);
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(command, path);
let handle = tokio::spawn(fut);
let value = ObservableState {
system: Default::default(),
peers: vec![],
servers: vec![],
};
let (mut stream, _
|
main
|
identifier_name
|
ctl.rs
|
,
action: NtpCtlAction,
}
impl NtpDaemonOptions {
const TAKES_ARGUMENT: &[&'static str] = &["--config", "--format"];
const TAKES_ARGUMENT_SHORT: &[char] = &['c', 'f'];
/// parse an iterator over command line arguments
pub fn try_parse_from<I, T>(iter: I) -> Result<Self, String>
where
I: IntoIterator<Item = T>,
T: AsRef<str> + Clone,
{
let mut options = NtpDaemonOptions::default();
let mut it = iter.into_iter().map(|x| x.as_ref().to_string()).peekable();
match it.peek().map(|x| x.as_str()) {
Some("validate") => {
let _ = it.next();
options.validate = true;
}
Some("status") => {
let _ = it.next();
options.status = true;
}
_ => { /* do nothing */ }
};
let arg_iter =
CliArg::normalize_arguments(Self::TAKES_ARGUMENT, Self::TAKES_ARGUMENT_SHORT, it)?
.into_iter()
.peekable();
for arg in arg_iter {
match arg {
CliArg::Flag(flag) => match flag.as_str() {
"-h" | "--help" => {
options.help = true;
}
"-v" | "--version" => {
options.version = true;
}
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Argument(option, value) => match option.as_str() {
"-c" | "--config" => {
options.config = Some(PathBuf::from(value));
}
"-f" | "--format" => match value.as_str() {
"plain" => options.format = Format::Plain,
"prometheus" => options.format = Format::Prometheus,
_ => Err(format!("invalid format option provided: {value}"))?,
},
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Rest(_rest) => { /* do nothing, drop remaining arguments */ }
}
}
options.resolve_action();
// nothing to validate at the moment
Ok(options)
}
/// from the arguments resolve which action should be performed
fn resolve_action(&mut self) {
if self.help {
self.action = NtpCtlAction::Help;
} else if self.version {
self.action = NtpCtlAction::Version;
} else if self.validate {
self.action = NtpCtlAction::Validate;
} else if self.status {
self.action = NtpCtlAction::Status;
} else {
self.action = NtpCtlAction::Help;
}
}
}
async fn validate(config: Option<PathBuf>) -> std::io::Result<ExitCode> {
// Late completion not needed, so ignore result.
crate::daemon::tracing::tracing_init(LogLevel::Info).init();
match Config::from_args(config, vec![], vec![]).await {
Ok(config) => {
if config.check() {
eprintln!("Config looks good");
Ok(ExitCode::SUCCESS)
} else {
Ok(ExitCode::FAILURE)
}
}
Err(e) => {
eprintln!("Error: Could not load configuration: {e}");
Ok(ExitCode::FAILURE)
}
}
}
const VERSION: &str = env!("CARGO_PKG_VERSION");
pub async fn main() -> std::io::Result<ExitCode> {
let options = match NtpDaemonOptions::try_parse_from(std::env::args()) {
Ok(options) => options,
Err(msg) => return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)),
};
match options.action {
NtpCtlAction::Help => {
println!("{}", long_help_message());
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Version => {
eprintln!("ntp-ctl {VERSION}");
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Validate => validate(options.config).await,
NtpCtlAction::Status => {
let config = Config::from_args(options.config, vec![], vec![]).await;
if let Err(ref e) = config {
println!("Warning: Unable to load configuration file: {e}");
}
|
let observation = config
.observability
.observe
.observation_path
.unwrap_or_else(|| PathBuf::from("/run/ntpd-rs/observe"));
match options.format {
Format::Plain => print_state(Format::Plain, observation).await,
Format::Prometheus => print_state(Format::Prometheus, observation).await,
}
}
}
}
async fn print_state(print: Format, observe_socket: PathBuf) -> Result<ExitCode, std::io::Error> {
let mut stream = match tokio::net::UnixStream::connect(&observe_socket).await {
Ok(stream) => stream,
Err(e) => {
eprintln!("Could not open socket at {}: {e}", observe_socket.display(),);
return Ok(ExitCode::FAILURE);
}
};
let mut msg = Vec::with_capacity(16 * 1024);
let output =
match crate::daemon::sockets::read_json::<ObservableState>(&mut stream, &mut msg).await {
Ok(output) => output,
Err(e) => {
eprintln!("Failed to read state from observation socket: {e}");
return Ok(ExitCode::FAILURE);
}
};
match print {
Format::Plain => {
println!("Synchronization status:");
println!(
"Dispersion: {}s, Delay: {}s",
output.system.time_snapshot.root_dispersion.to_seconds(),
output.system.time_snapshot.root_delay.to_seconds()
);
println!(
"Desired poll interval: {}s",
output
.system
.time_snapshot
.poll_interval
.as_duration()
.to_seconds()
);
println!("Stratum: {}", output.system.stratum);
println!();
println!("Peers:");
for peer in &output.peers {
match peer {
crate::daemon::ObservablePeerState::Nothing => {}
crate::daemon::ObservablePeerState::Observable(
crate::daemon::ObservedPeerState {
timedata,
unanswered_polls,
poll_interval,
address,
id,
},
) => {
println!(
"{} ({}): {}±{}(±{})s\n pollinterval: {}s, missing polls: {}",
address,
id,
timedata.offset.to_seconds(),
timedata.uncertainty.to_seconds(),
timedata.delay.to_seconds(),
poll_interval.as_duration().to_seconds(),
unanswered_polls
);
}
}
}
let in_startup = output
.peers
.iter()
.filter(|peer| matches!(peer, crate::daemon::ObservablePeerState::Nothing))
.count();
match in_startup {
0 => {} // no peers in startup, so no line for that
1 => println!("1 peer still in startup"),
_ => println!("{} peers still in startup", in_startup),
}
println!();
println!("Servers:");
for server in &output.servers {
println!(
"{}: received {}, accepted {}, errors {}",
server.address,
server.stats.received_packets.get(),
server.stats.accepted_packets.get(),
server.stats.response_send_errors.get()
);
println!(
" denied {}, rate limited {}, ignored {}",
server.stats.denied_packets.get(),
server.stats.rate_limited_packets.get(),
server.stats.ignored_packets.get()
);
}
}
Format::Prometheus => {
let mut buf = String::new();
if let Err(e) = crate::metrics::format_state(&mut buf, &output) {
eprintln!("Failed to encode prometheus data: {e}");
return Ok(ExitCode::FAILURE);
}
println!("{buf}");
}
}
Ok(ExitCode::SUCCESS)
}
#[cfg(test)]
mod tests {
use std::os::unix::prelude::PermissionsExt;
use std::path::Path;
use crate::daemon::{
config::ObserveConfig,
sockets::{create_unix_socket, write_json},
};
use super::*;
async fn write_socket_helper(
command: Format,
socket_name: &str,
) -> std::io::Result<Result<ExitCode, std::io::Error>> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join(socket_name);
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(command, path);
let handle = tokio::spawn(fut);
let value = ObservableState {
system: Default::default(),
peers: vec![],
servers: vec![],
};
let (mut stream, _
|
let config = config.unwrap_or_default();
|
random_line_split
|
ctl.rs
|
action: NtpCtlAction,
}
impl NtpDaemonOptions {
const TAKES_ARGUMENT: &[&'static str] = &["--config", "--format"];
const TAKES_ARGUMENT_SHORT: &[char] = &['c', 'f'];
/// parse an iterator over command line arguments
pub fn try_parse_from<I, T>(iter: I) -> Result<Self, String>
where
I: IntoIterator<Item = T>,
T: AsRef<str> + Clone,
{
let mut options = NtpDaemonOptions::default();
let mut it = iter.into_iter().map(|x| x.as_ref().to_string()).peekable();
match it.peek().map(|x| x.as_str()) {
Some("validate") => {
let _ = it.next();
options.validate = true;
}
Some("status") => {
let _ = it.next();
options.status = true;
}
_ => { /* do nothing */ }
};
let arg_iter =
CliArg::normalize_arguments(Self::TAKES_ARGUMENT, Self::TAKES_ARGUMENT_SHORT, it)?
.into_iter()
.peekable();
for arg in arg_iter {
match arg {
CliArg::Flag(flag) => match flag.as_str() {
"-h" | "--help" => {
options.help = true;
}
"-v" | "--version" => {
options.version = true;
}
option =>
|
},
CliArg::Argument(option, value) => match option.as_str() {
"-c" | "--config" => {
options.config = Some(PathBuf::from(value));
}
"-f" | "--format" => match value.as_str() {
"plain" => options.format = Format::Plain,
"prometheus" => options.format = Format::Prometheus,
_ => Err(format!("invalid format option provided: {value}"))?,
},
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Rest(_rest) => { /* do nothing, drop remaining arguments */ }
}
}
options.resolve_action();
// nothing to validate at the moment
Ok(options)
}
/// from the arguments resolve which action should be performed
fn resolve_action(&mut self) {
if self.help {
self.action = NtpCtlAction::Help;
} else if self.version {
self.action = NtpCtlAction::Version;
} else if self.validate {
self.action = NtpCtlAction::Validate;
} else if self.status {
self.action = NtpCtlAction::Status;
} else {
self.action = NtpCtlAction::Help;
}
}
}
async fn validate(config: Option<PathBuf>) -> std::io::Result<ExitCode> {
// Late completion not needed, so ignore result.
crate::daemon::tracing::tracing_init(LogLevel::Info).init();
match Config::from_args(config, vec![], vec![]).await {
Ok(config) => {
if config.check() {
eprintln!("Config looks good");
Ok(ExitCode::SUCCESS)
} else {
Ok(ExitCode::FAILURE)
}
}
Err(e) => {
eprintln!("Error: Could not load configuration: {e}");
Ok(ExitCode::FAILURE)
}
}
}
const VERSION: &str = env!("CARGO_PKG_VERSION");
pub async fn main() -> std::io::Result<ExitCode> {
let options = match NtpDaemonOptions::try_parse_from(std::env::args()) {
Ok(options) => options,
Err(msg) => return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)),
};
match options.action {
NtpCtlAction::Help => {
println!("{}", long_help_message());
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Version => {
eprintln!("ntp-ctl {VERSION}");
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Validate => validate(options.config).await,
NtpCtlAction::Status => {
let config = Config::from_args(options.config, vec![], vec![]).await;
if let Err(ref e) = config {
println!("Warning: Unable to load configuration file: {e}");
}
let config = config.unwrap_or_default();
let observation = config
.observability
.observe
.observation_path
.unwrap_or_else(|| PathBuf::from("/run/ntpd-rs/observe"));
match options.format {
Format::Plain => print_state(Format::Plain, observation).await,
Format::Prometheus => print_state(Format::Prometheus, observation).await,
}
}
}
}
async fn print_state(print: Format, observe_socket: PathBuf) -> Result<ExitCode, std::io::Error> {
let mut stream = match tokio::net::UnixStream::connect(&observe_socket).await {
Ok(stream) => stream,
Err(e) => {
eprintln!("Could not open socket at {}: {e}", observe_socket.display(),);
return Ok(ExitCode::FAILURE);
}
};
let mut msg = Vec::with_capacity(16 * 1024);
let output =
match crate::daemon::sockets::read_json::<ObservableState>(&mut stream, &mut msg).await {
Ok(output) => output,
Err(e) => {
eprintln!("Failed to read state from observation socket: {e}");
return Ok(ExitCode::FAILURE);
}
};
match print {
Format::Plain => {
println!("Synchronization status:");
println!(
"Dispersion: {}s, Delay: {}s",
output.system.time_snapshot.root_dispersion.to_seconds(),
output.system.time_snapshot.root_delay.to_seconds()
);
println!(
"Desired poll interval: {}s",
output
.system
.time_snapshot
.poll_interval
.as_duration()
.to_seconds()
);
println!("Stratum: {}", output.system.stratum);
println!();
println!("Peers:");
for peer in &output.peers {
match peer {
crate::daemon::ObservablePeerState::Nothing => {}
crate::daemon::ObservablePeerState::Observable(
crate::daemon::ObservedPeerState {
timedata,
unanswered_polls,
poll_interval,
address,
id,
},
) => {
println!(
"{} ({}): {}±{}(±{})s\n pollinterval: {}s, missing polls: {}",
address,
id,
timedata.offset.to_seconds(),
timedata.uncertainty.to_seconds(),
timedata.delay.to_seconds(),
poll_interval.as_duration().to_seconds(),
unanswered_polls
);
}
}
}
let in_startup = output
.peers
.iter()
.filter(|peer| matches!(peer, crate::daemon::ObservablePeerState::Nothing))
.count();
match in_startup {
0 => {} // no peers in startup, so no line for that
1 => println!("1 peer still in startup"),
_ => println!("{} peers still in startup", in_startup),
}
println!();
println!("Servers:");
for server in &output.servers {
println!(
"{}: received {}, accepted {}, errors {}",
server.address,
server.stats.received_packets.get(),
server.stats.accepted_packets.get(),
server.stats.response_send_errors.get()
);
println!(
" denied {}, rate limited {}, ignored {}",
server.stats.denied_packets.get(),
server.stats.rate_limited_packets.get(),
server.stats.ignored_packets.get()
);
}
}
Format::Prometheus => {
let mut buf = String::new();
if let Err(e) = crate::metrics::format_state(&mut buf, &output) {
eprintln!("Failed to encode prometheus data: {e}");
return Ok(ExitCode::FAILURE);
}
println!("{buf}");
}
}
Ok(ExitCode::SUCCESS)
}
#[cfg(test)]
mod tests {
use std::os::unix::prelude::PermissionsExt;
use std::path::Path;
use crate::daemon::{
config::ObserveConfig,
sockets::{create_unix_socket, write_json},
};
use super::*;
async fn write_socket_helper(
command: Format,
socket_name: &str,
) -> std::io::Result<Result<ExitCode, std::io::Error>> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join(socket_name);
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(command, path);
let handle = tokio::spawn(fut);
let value = ObservableState {
system: Default::default(),
peers: vec![],
servers: vec![],
};
let (mut stream,
|
{
Err(format!("invalid option provided: {option}"))?;
}
|
conditional_block
|
MRGnodeHFS.py
|
D': 10,
'n_na': 200,
'HFSreferenceNode': 25,
'HFSdur': 50.0,
'HFSfrequency': 200,
'HFSpolarity': 1.0,
'HFSdelay': 0,
'HFSpulsewidth':0.09,
'HFSamp': 1.154,
'HFSwaveform': 0,
'HFSx': 0.0,
'HFSy': 0.0,
'HFSz': 1000.0,
'intrinsicNode': 0,
'intrinsicDur': 0.1,
'intrinsicAmp': 2.0,
'pattern': np.array([40.0]),
'patternLag': np.array([39.9])
}
g_recpar = {
'record': True,
'plot': False,
'nodes':np.array(range(0,g_par['axonnodes'])),
'filename': 'data/simulation.h5',
'recordVoltage':True,
'downsampleFactor':30
}
def insert_nrn_recorders(segment, labels, rec=None):
'''
Inserts recorders for NEURON state variables.
Use one per segment.
"labels" is a dictionary.
Example {'v': '_ref_v'}.
Specify 'rec' to append to previous recorders.
Records also time if 'rec' is 'None' (default).
(Aknowledgements: Daniele Linaro)
'''
if rec is None:
rec = {'t': h.Vector()}
rec['t'].record(h._ref_t)
for k,v in labels.items():
rec[k] = h.Vector()
rec[k].record(getattr(segment, v))
return rec
def pass_parameters_to_nrn(parameters, exception = [], verb=False):
'''
Passes parameters from a dictionary to NEURON.
If the the element is a vector it assumes that the a vector
has been created as objref and new Vector() in the hoc code.
Items in 'exception' list are not submitted.
Set 'verb' to True for verbose.
'''
for k,v in parameters.iteritems():
if k not in exception:
if type(v) is not type(np.array([])):
h("{"+ k + " = "+str(v)+"}")
if verb:
print(k + " = "+str(v))
else:
#exec("h."+ k +".from_python("+str(v)+")")
getattr(h,k).from_python(v)
if verb:
print("h."+k+".from_python("+str(v)+")")
def fix_patternLag_vector(parameters):
'''
Changes the patternLag variable according to the pattern vector.
This is used by the Play method of the vector.
Ideally this should be done in the NEURON script.
'''
if 'pattern' in parameters.keys():
parameters["patternLag"] = np.array([ii-0.005
for ii in
parameters["pattern"]])
else:
print('Key "pattern" not found in this dict. Did nothing.')
def record_node_spikes(nodenumber, rec=None,
apc=None, threshold = -15):
'''
Records the action potentials of a particular set of nodes.
Returns a "rec" dictionary.
'''
if rec is None:
rec = {}
if apc is None:
apc = {}
for n in nodenumber:
apc['apc'+str(n)] = h.APCount(h.node[int(n)](0.5))
apc['apc'+str(n)].thresh = threshold
rec['spk'+str(n)] = h.Vector()
apc['apc'+str(n)].record(rec['spk'+str(n)])
return rec,apc
def record_node_voltage(nodenumber, rec=None):
'''
Records the membrane potential of a particular set of nodes.
'''
rec = None
segments = []
for n in nodenumber:
segments.append(h.node[n](0.5))
for seg,n in zip(segments,nodenumber):
rec = insert_nrn_recorders(seg,{'v_node'+str(n):'_ref_v'},rec)
return rec
def createMRGaxon(par, verbose):
'''
Initializes the model.
Creates the axon and stimulation according to the parameters.
'''
h('{load_file("stdgui.hoc")}')
fix_patternLag_vector(par)
pass_parameters_to_nrn(par,['pattern','patternLag'],verb=verbose)
h('{load_file("MRGnodeHFS.hoc")}')
pass_parameters_to_nrn(par, verb=verbose)
h('{buildModel()}')
def updateMRGaxon(par,verbose):
'''
Updates the parameters of the model.
'''
fix_patternLag_vector(par)
pass_parameters_to_nrn(par,verb=verbose)
h.resetModel()
def recordMRGaxon(recpar,verbose):
'''
Inserts the recorders as specified in recpar.
'''
k = recpar['nodes']
rec = {}
if recpar['recordVoltage']:
|
rec['spiketimes'],rec['apcount'] = record_node_spikes(k)
if verbose:
print('Now recording from '+str(k))
return rec
def resetRecorder(rec,verbose=False):
'''
Clears hoc vectors in spiketimes and voltage and resets apcounts.
'''
for k,o in rec['spiketimes'].iteritems():
if verbose:
print('Reseting ' + k)
o.clear()
if 'voltage' in rec.keys():
for k,o in rec['voltage'].iteritems():
if verbose:
print('Reseting ' + k)
o.clear()
for k,o in rec['apcount'].iteritems():
if verbose:
print('Setting apcount ' + k + ' to zero.' )
o.n = 0
def plotMRGaxon(plt, rec, recpar,color=[0,0,0]):
'''
Plots the voltage traces and a rastergram of the
spikes counting ordered by node.
'''
from plot_utils import plotRastergram
fig = plt.figure(figsize=(10,5))
ax = []
ax.append(fig.add_axes([0.1,0.1,0.8,0.2]))
spiketimes = []
n_sptrain = len(spiketimes)
counter = 0
for offset,ii in enumerate(recpar['nodes']):
spiketimes.append(rec['spiketimes']['spk'+str(ii)].to_python())
counter += len(spiketimes[-1])
if counter:
plt.axes(ax[-1])
plotRastergram(spiketimes, 0, None, color)
ax.append(fig.add_axes([0.1,0.4,0.8,0.6]))
voltages = rec['voltage']
time = voltages['t']
n_voltage = len(voltages) - 1
for ii in recpar['nodes']:
ax[-1].plot(time,voltages['v_node'+str(ii)])
return fig
def process_configuration(cp, cfg, metadata, section):
'''
Parse configuration file from default values.
- "cfg" is a ConfigParser file object
- "metadata" a dictionary with default parameters
- "section" is the section to look for in the cfg file
This function looks for the same type of the value in the
metadata dict.
If it is a float or a list, it will look for a value or
evaluate an expression.
'''
output = metadata.copy()
if not cfg.has_section(section):
return output
for option in metadata.keys():
if cfg.has_option(section,option):
if type(metadata[option]) is str:
output[option] = cfg.get(section,option)
elif type(metadata[option]) is int or \
type(metadata[option]) is float or \
type(metadata[option]) is list or \
type(metadata[option])==type(np.array([1])):
try:
output[option] = cfg.getfloat(section,option)
except ValueError:
output[option] = eval(cfg.get(section,option))
elif type(metadata[option]) == bool:
output[option] = cfg.getboolean(section,option)
return output
def append_fiber_to_file(rec,par,recpar,group=None,verbose=False):
'''
Uses h5py to append a fiber to a file.
'''
if verbose:
print('Recording to file '+recpar['filename'])
foldername = path.dirname(os.path.realpath(recpar['filename']))
if not path.isdir(foldername):
os.makedirs(foldername)
fid = h5.File(os.path.realpath(recpar['filename']),'a')
n_fiber = len(fid.keys())
if group is None:
gid = fid.create_group('fiber'+str(n_fiber))
else:
gid = fid.create_group(group)
for k,v in par.iteritems():
gid.attrs[k]=v
for k,v in recpar.iteritems():
gid.attrs[k]=v
if 'spik
|
rec['voltage'] = record_node_voltage(k)
|
conditional_block
|
MRGnodeHFS.py
|
=None):
'''
Inserts recorders for NEURON state variables.
Use one per segment.
"labels" is a dictionary.
Example {'v': '_ref_v'}.
Specify 'rec' to append to previous recorders.
Records also time if 'rec' is 'None' (default).
(Aknowledgements: Daniele Linaro)
'''
if rec is None:
rec = {'t': h.Vector()}
rec['t'].record(h._ref_t)
for k,v in labels.items():
rec[k] = h.Vector()
rec[k].record(getattr(segment, v))
return rec
def pass_parameters_to_nrn(parameters, exception = [], verb=False):
'''
Passes parameters from a dictionary to NEURON.
If the the element is a vector it assumes that the a vector
has been created as objref and new Vector() in the hoc code.
Items in 'exception' list are not submitted.
Set 'verb' to True for verbose.
'''
for k,v in parameters.iteritems():
if k not in exception:
if type(v) is not type(np.array([])):
h("{"+ k + " = "+str(v)+"}")
if verb:
print(k + " = "+str(v))
else:
#exec("h."+ k +".from_python("+str(v)+")")
getattr(h,k).from_python(v)
if verb:
print("h."+k+".from_python("+str(v)+")")
def fix_patternLag_vector(parameters):
'''
Changes the patternLag variable according to the pattern vector.
This is used by the Play method of the vector.
Ideally this should be done in the NEURON script.
'''
if 'pattern' in parameters.keys():
parameters["patternLag"] = np.array([ii-0.005
for ii in
parameters["pattern"]])
else:
print('Key "pattern" not found in this dict. Did nothing.')
def record_node_spikes(nodenumber, rec=None,
apc=None, threshold = -15):
'''
Records the action potentials of a particular set of nodes.
Returns a "rec" dictionary.
'''
if rec is None:
rec = {}
if apc is None:
apc = {}
for n in nodenumber:
apc['apc'+str(n)] = h.APCount(h.node[int(n)](0.5))
apc['apc'+str(n)].thresh = threshold
rec['spk'+str(n)] = h.Vector()
apc['apc'+str(n)].record(rec['spk'+str(n)])
return rec,apc
def record_node_voltage(nodenumber, rec=None):
'''
Records the membrane potential of a particular set of nodes.
'''
rec = None
segments = []
for n in nodenumber:
segments.append(h.node[n](0.5))
for seg,n in zip(segments,nodenumber):
rec = insert_nrn_recorders(seg,{'v_node'+str(n):'_ref_v'},rec)
return rec
def createMRGaxon(par, verbose):
'''
Initializes the model.
Creates the axon and stimulation according to the parameters.
'''
h('{load_file("stdgui.hoc")}')
fix_patternLag_vector(par)
pass_parameters_to_nrn(par,['pattern','patternLag'],verb=verbose)
h('{load_file("MRGnodeHFS.hoc")}')
pass_parameters_to_nrn(par, verb=verbose)
h('{buildModel()}')
def updateMRGaxon(par,verbose):
'''
Updates the parameters of the model.
'''
fix_patternLag_vector(par)
pass_parameters_to_nrn(par,verb=verbose)
h.resetModel()
def recordMRGaxon(recpar,verbose):
'''
Inserts the recorders as specified in recpar.
'''
k = recpar['nodes']
rec = {}
if recpar['recordVoltage']:
rec['voltage'] = record_node_voltage(k)
rec['spiketimes'],rec['apcount'] = record_node_spikes(k)
if verbose:
print('Now recording from '+str(k))
return rec
def resetRecorder(rec,verbose=False):
'''
Clears hoc vectors in spiketimes and voltage and resets apcounts.
'''
for k,o in rec['spiketimes'].iteritems():
if verbose:
print('Reseting ' + k)
o.clear()
if 'voltage' in rec.keys():
for k,o in rec['voltage'].iteritems():
if verbose:
print('Reseting ' + k)
o.clear()
for k,o in rec['apcount'].iteritems():
if verbose:
print('Setting apcount ' + k + ' to zero.' )
o.n = 0
def plotMRGaxon(plt, rec, recpar,color=[0,0,0]):
'''
Plots the voltage traces and a rastergram of the
spikes counting ordered by node.
'''
from plot_utils import plotRastergram
fig = plt.figure(figsize=(10,5))
ax = []
ax.append(fig.add_axes([0.1,0.1,0.8,0.2]))
spiketimes = []
n_sptrain = len(spiketimes)
counter = 0
for offset,ii in enumerate(recpar['nodes']):
spiketimes.append(rec['spiketimes']['spk'+str(ii)].to_python())
counter += len(spiketimes[-1])
if counter:
plt.axes(ax[-1])
plotRastergram(spiketimes, 0, None, color)
ax.append(fig.add_axes([0.1,0.4,0.8,0.6]))
voltages = rec['voltage']
time = voltages['t']
n_voltage = len(voltages) - 1
for ii in recpar['nodes']:
ax[-1].plot(time,voltages['v_node'+str(ii)])
return fig
def process_configuration(cp, cfg, metadata, section):
'''
Parse configuration file from default values.
- "cfg" is a ConfigParser file object
- "metadata" a dictionary with default parameters
- "section" is the section to look for in the cfg file
This function looks for the same type of the value in the
metadata dict.
If it is a float or a list, it will look for a value or
evaluate an expression.
'''
output = metadata.copy()
if not cfg.has_section(section):
return output
for option in metadata.keys():
if cfg.has_option(section,option):
if type(metadata[option]) is str:
output[option] = cfg.get(section,option)
elif type(metadata[option]) is int or \
type(metadata[option]) is float or \
type(metadata[option]) is list or \
type(metadata[option])==type(np.array([1])):
try:
output[option] = cfg.getfloat(section,option)
except ValueError:
output[option] = eval(cfg.get(section,option))
elif type(metadata[option]) == bool:
output[option] = cfg.getboolean(section,option)
return output
def append_fiber_to_file(rec,par,recpar,group=None,verbose=False):
'''
Uses h5py to append a fiber to a file.
'''
if verbose:
print('Recording to file '+recpar['filename'])
foldername = path.dirname(os.path.realpath(recpar['filename']))
if not path.isdir(foldername):
os.makedirs(foldername)
fid = h5.File(os.path.realpath(recpar['filename']),'a')
n_fiber = len(fid.keys())
if group is None:
gid = fid.create_group('fiber'+str(n_fiber))
else:
gid = fid.create_group(group)
for k,v in par.iteritems():
gid.attrs[k]=v
for k,v in recpar.iteritems():
gid.attrs[k]=v
if 'spiketimes' in rec.keys():
tmp=gid.create_group('spiketimes')
for k,v in rec['spiketimes'].iteritems():
ds = tmp.create_dataset(k,data=v,compression='gzip')
if 'voltage' in rec.keys():
tmp=gid.create_group('voltage')
for k,v in rec['voltage'].iteritems():
vv = np.array(v)
ds = tmp.create_dataset(k,
data=vv[0:-1:int(recpar['downsampleFactor'])],
compression='gzip')
def runMRGaxon():
h.resetModel()
h.run()
def readConfigurations(filename):
'''
Reads the parameters from the configuration file.
Returns par and recpar or defaults if filename is not a valid file.
'''
if path.isfile(filename):
fid = open(filename)
else:
print('File "'+filename+'" does not exist.')
sys,exit(1)
return g_par,g_recpar
import ConfigParser as cp
cfg = cp.ConfigParser()
cfg.readfp(fid)
fid.close()
par = process_configuration(cp, cfg, g_par, 'MRGnode')
recpar = process_configuration(cp, cfg, g_recpar, 'Recording')
return par, recpar
def main():
|
'''
|
random_line_split
|
|
MRGnodeHFS.py
|
D': 10,
'n_na': 200,
'HFSreferenceNode': 25,
'HFSdur': 50.0,
'HFSfrequency': 200,
'HFSpolarity': 1.0,
'HFSdelay': 0,
'HFSpulsewidth':0.09,
'HFSamp': 1.154,
'HFSwaveform': 0,
'HFSx': 0.0,
'HFSy': 0.0,
'HFSz': 1000.0,
'intrinsicNode': 0,
'intrinsicDur': 0.1,
'intrinsicAmp': 2.0,
'pattern': np.array([40.0]),
'patternLag': np.array([39.9])
}
g_recpar = {
'record': True,
'plot': False,
'nodes':np.array(range(0,g_par['axonnodes'])),
'filename': 'data/simulation.h5',
'recordVoltage':True,
'downsampleFactor':30
}
def insert_nrn_recorders(segment, labels, rec=None):
'''
Inserts recorders for NEURON state variables.
Use one per segment.
"labels" is a dictionary.
Example {'v': '_ref_v'}.
Specify 'rec' to append to previous recorders.
Records also time if 'rec' is 'None' (default).
(Aknowledgements: Daniele Linaro)
'''
if rec is None:
rec = {'t': h.Vector()}
rec['t'].record(h._ref_t)
for k,v in labels.items():
rec[k] = h.Vector()
rec[k].record(getattr(segment, v))
return rec
def pass_parameters_to_nrn(parameters, exception = [], verb=False):
'''
Passes parameters from a dictionary to NEURON.
If the the element is a vector it assumes that the a vector
has been created as objref and new Vector() in the hoc code.
Items in 'exception' list are not submitted.
Set 'verb' to True for verbose.
'''
for k,v in parameters.iteritems():
if k not in exception:
if type(v) is not type(np.array([])):
h("{"+ k + " = "+str(v)+"}")
if verb:
print(k + " = "+str(v))
else:
#exec("h."+ k +".from_python("+str(v)+")")
getattr(h,k).from_python(v)
if verb:
print("h."+k+".from_python("+str(v)+")")
def fix_patternLag_vector(parameters):
'''
Changes the patternLag variable according to the pattern vector.
This is used by the Play method of the vector.
Ideally this should be done in the NEURON script.
'''
if 'pattern' in parameters.keys():
parameters["patternLag"] = np.array([ii-0.005
for ii in
parameters["pattern"]])
else:
print('Key "pattern" not found in this dict. Did nothing.')
def record_node_spikes(nodenumber, rec=None,
apc=None, threshold = -15):
'''
Records the action potentials of a particular set of nodes.
Returns a "rec" dictionary.
'''
if rec is None:
rec = {}
if apc is None:
apc = {}
for n in nodenumber:
apc['apc'+str(n)] = h.APCount(h.node[int(n)](0.5))
apc['apc'+str(n)].thresh = threshold
rec['spk'+str(n)] = h.Vector()
apc['apc'+str(n)].record(rec['spk'+str(n)])
return rec,apc
def record_node_voltage(nodenumber, rec=None):
|
def createMRGaxon(par, verbose):
'''
Initializes the model.
Creates the axon and stimulation according to the parameters.
'''
h('{load_file("stdgui.hoc")}')
fix_patternLag_vector(par)
pass_parameters_to_nrn(par,['pattern','patternLag'],verb=verbose)
h('{load_file("MRGnodeHFS.hoc")}')
pass_parameters_to_nrn(par, verb=verbose)
h('{buildModel()}')
def updateMRGaxon(par,verbose):
'''
Updates the parameters of the model.
'''
fix_patternLag_vector(par)
pass_parameters_to_nrn(par,verb=verbose)
h.resetModel()
def recordMRGaxon(recpar,verbose):
'''
Inserts the recorders as specified in recpar.
'''
k = recpar['nodes']
rec = {}
if recpar['recordVoltage']:
rec['voltage'] = record_node_voltage(k)
rec['spiketimes'],rec['apcount'] = record_node_spikes(k)
if verbose:
print('Now recording from '+str(k))
return rec
def resetRecorder(rec,verbose=False):
'''
Clears hoc vectors in spiketimes and voltage and resets apcounts.
'''
for k,o in rec['spiketimes'].iteritems():
if verbose:
print('Reseting ' + k)
o.clear()
if 'voltage' in rec.keys():
for k,o in rec['voltage'].iteritems():
if verbose:
print('Reseting ' + k)
o.clear()
for k,o in rec['apcount'].iteritems():
if verbose:
print('Setting apcount ' + k + ' to zero.' )
o.n = 0
def plotMRGaxon(plt, rec, recpar,color=[0,0,0]):
'''
Plots the voltage traces and a rastergram of the
spikes counting ordered by node.
'''
from plot_utils import plotRastergram
fig = plt.figure(figsize=(10,5))
ax = []
ax.append(fig.add_axes([0.1,0.1,0.8,0.2]))
spiketimes = []
n_sptrain = len(spiketimes)
counter = 0
for offset,ii in enumerate(recpar['nodes']):
spiketimes.append(rec['spiketimes']['spk'+str(ii)].to_python())
counter += len(spiketimes[-1])
if counter:
plt.axes(ax[-1])
plotRastergram(spiketimes, 0, None, color)
ax.append(fig.add_axes([0.1,0.4,0.8,0.6]))
voltages = rec['voltage']
time = voltages['t']
n_voltage = len(voltages) - 1
for ii in recpar['nodes']:
ax[-1].plot(time,voltages['v_node'+str(ii)])
return fig
def process_configuration(cp, cfg, metadata, section):
'''
Parse configuration file from default values.
- "cfg" is a ConfigParser file object
- "metadata" a dictionary with default parameters
- "section" is the section to look for in the cfg file
This function looks for the same type of the value in the
metadata dict.
If it is a float or a list, it will look for a value or
evaluate an expression.
'''
output = metadata.copy()
if not cfg.has_section(section):
return output
for option in metadata.keys():
if cfg.has_option(section,option):
if type(metadata[option]) is str:
output[option] = cfg.get(section,option)
elif type(metadata[option]) is int or \
type(metadata[option]) is float or \
type(metadata[option]) is list or \
type(metadata[option])==type(np.array([1])):
try:
output[option] = cfg.getfloat(section,option)
except ValueError:
output[option] = eval(cfg.get(section,option))
elif type(metadata[option]) == bool:
output[option] = cfg.getboolean(section,option)
return output
def append_fiber_to_file(rec,par,recpar,group=None,verbose=False):
'''
Uses h5py to append a fiber to a file.
'''
if verbose:
print('Recording to file '+recpar['filename'])
foldername = path.dirname(os.path.realpath(recpar['filename']))
if not path.isdir(foldername):
os.makedirs(foldername)
fid = h5.File(os.path.realpath(recpar['filename']),'a')
n_fiber = len(fid.keys())
if group is None:
gid = fid.create_group('fiber'+str(n_fiber))
else:
gid = fid.create_group(group)
for k,v in par.iteritems():
gid.attrs[k]=v
for k,v in recpar.iteritems():
gid.attrs[k]=v
if 'spiketimes
|
'''
Records the membrane potential of a particular set of nodes.
'''
rec = None
segments = []
for n in nodenumber:
segments.append(h.node[n](0.5))
for seg,n in zip(segments,nodenumber):
rec = insert_nrn_recorders(seg,{'v_node'+str(n):'_ref_v'},rec)
return rec
|
identifier_body
|
MRGnodeHFS.py
|
y': 0.0,
'HFSz': 1000.0,
'intrinsicNode': 0,
'intrinsicDur': 0.1,
'intrinsicAmp': 2.0,
'pattern': np.array([40.0]),
'patternLag': np.array([39.9])
}
g_recpar = {
'record': True,
'plot': False,
'nodes':np.array(range(0,g_par['axonnodes'])),
'filename': 'data/simulation.h5',
'recordVoltage':True,
'downsampleFactor':30
}
def insert_nrn_recorders(segment, labels, rec=None):
'''
Inserts recorders for NEURON state variables.
Use one per segment.
"labels" is a dictionary.
Example {'v': '_ref_v'}.
Specify 'rec' to append to previous recorders.
Records also time if 'rec' is 'None' (default).
(Aknowledgements: Daniele Linaro)
'''
if rec is None:
rec = {'t': h.Vector()}
rec['t'].record(h._ref_t)
for k,v in labels.items():
rec[k] = h.Vector()
rec[k].record(getattr(segment, v))
return rec
def pass_parameters_to_nrn(parameters, exception = [], verb=False):
'''
Passes parameters from a dictionary to NEURON.
If the the element is a vector it assumes that the a vector
has been created as objref and new Vector() in the hoc code.
Items in 'exception' list are not submitted.
Set 'verb' to True for verbose.
'''
for k,v in parameters.iteritems():
if k not in exception:
if type(v) is not type(np.array([])):
h("{"+ k + " = "+str(v)+"}")
if verb:
print(k + " = "+str(v))
else:
#exec("h."+ k +".from_python("+str(v)+")")
getattr(h,k).from_python(v)
if verb:
print("h."+k+".from_python("+str(v)+")")
def fix_patternLag_vector(parameters):
'''
Changes the patternLag variable according to the pattern vector.
This is used by the Play method of the vector.
Ideally this should be done in the NEURON script.
'''
if 'pattern' in parameters.keys():
parameters["patternLag"] = np.array([ii-0.005
for ii in
parameters["pattern"]])
else:
print('Key "pattern" not found in this dict. Did nothing.')
def record_node_spikes(nodenumber, rec=None,
apc=None, threshold = -15):
'''
Records the action potentials of a particular set of nodes.
Returns a "rec" dictionary.
'''
if rec is None:
rec = {}
if apc is None:
apc = {}
for n in nodenumber:
apc['apc'+str(n)] = h.APCount(h.node[int(n)](0.5))
apc['apc'+str(n)].thresh = threshold
rec['spk'+str(n)] = h.Vector()
apc['apc'+str(n)].record(rec['spk'+str(n)])
return rec,apc
def record_node_voltage(nodenumber, rec=None):
'''
Records the membrane potential of a particular set of nodes.
'''
rec = None
segments = []
for n in nodenumber:
segments.append(h.node[n](0.5))
for seg,n in zip(segments,nodenumber):
rec = insert_nrn_recorders(seg,{'v_node'+str(n):'_ref_v'},rec)
return rec
def createMRGaxon(par, verbose):
'''
Initializes the model.
Creates the axon and stimulation according to the parameters.
'''
h('{load_file("stdgui.hoc")}')
fix_patternLag_vector(par)
pass_parameters_to_nrn(par,['pattern','patternLag'],verb=verbose)
h('{load_file("MRGnodeHFS.hoc")}')
pass_parameters_to_nrn(par, verb=verbose)
h('{buildModel()}')
def updateMRGaxon(par,verbose):
'''
Updates the parameters of the model.
'''
fix_patternLag_vector(par)
pass_parameters_to_nrn(par,verb=verbose)
h.resetModel()
def recordMRGaxon(recpar,verbose):
'''
Inserts the recorders as specified in recpar.
'''
k = recpar['nodes']
rec = {}
if recpar['recordVoltage']:
rec['voltage'] = record_node_voltage(k)
rec['spiketimes'],rec['apcount'] = record_node_spikes(k)
if verbose:
print('Now recording from '+str(k))
return rec
def resetRecorder(rec,verbose=False):
'''
Clears hoc vectors in spiketimes and voltage and resets apcounts.
'''
for k,o in rec['spiketimes'].iteritems():
if verbose:
print('Reseting ' + k)
o.clear()
if 'voltage' in rec.keys():
for k,o in rec['voltage'].iteritems():
if verbose:
print('Reseting ' + k)
o.clear()
for k,o in rec['apcount'].iteritems():
if verbose:
print('Setting apcount ' + k + ' to zero.' )
o.n = 0
def plotMRGaxon(plt, rec, recpar,color=[0,0,0]):
'''
Plots the voltage traces and a rastergram of the
spikes counting ordered by node.
'''
from plot_utils import plotRastergram
fig = plt.figure(figsize=(10,5))
ax = []
ax.append(fig.add_axes([0.1,0.1,0.8,0.2]))
spiketimes = []
n_sptrain = len(spiketimes)
counter = 0
for offset,ii in enumerate(recpar['nodes']):
spiketimes.append(rec['spiketimes']['spk'+str(ii)].to_python())
counter += len(spiketimes[-1])
if counter:
plt.axes(ax[-1])
plotRastergram(spiketimes, 0, None, color)
ax.append(fig.add_axes([0.1,0.4,0.8,0.6]))
voltages = rec['voltage']
time = voltages['t']
n_voltage = len(voltages) - 1
for ii in recpar['nodes']:
ax[-1].plot(time,voltages['v_node'+str(ii)])
return fig
def process_configuration(cp, cfg, metadata, section):
'''
Parse configuration file from default values.
- "cfg" is a ConfigParser file object
- "metadata" a dictionary with default parameters
- "section" is the section to look for in the cfg file
This function looks for the same type of the value in the
metadata dict.
If it is a float or a list, it will look for a value or
evaluate an expression.
'''
output = metadata.copy()
if not cfg.has_section(section):
return output
for option in metadata.keys():
if cfg.has_option(section,option):
if type(metadata[option]) is str:
output[option] = cfg.get(section,option)
elif type(metadata[option]) is int or \
type(metadata[option]) is float or \
type(metadata[option]) is list or \
type(metadata[option])==type(np.array([1])):
try:
output[option] = cfg.getfloat(section,option)
except ValueError:
output[option] = eval(cfg.get(section,option))
elif type(metadata[option]) == bool:
output[option] = cfg.getboolean(section,option)
return output
def append_fiber_to_file(rec,par,recpar,group=None,verbose=False):
'''
Uses h5py to append a fiber to a file.
'''
if verbose:
print('Recording to file '+recpar['filename'])
foldername = path.dirname(os.path.realpath(recpar['filename']))
if not path.isdir(foldername):
os.makedirs(foldername)
fid = h5.File(os.path.realpath(recpar['filename']),'a')
n_fiber = len(fid.keys())
if group is None:
gid = fid.create_group('fiber'+str(n_fiber))
else:
gid = fid.create_group(group)
for k,v in par.iteritems():
gid.attrs[k]=v
for k,v in recpar.iteritems():
gid.attrs[k]=v
if 'spiketimes' in rec.keys():
tmp=gid.create_group('spiketimes')
for k,v in rec['spiketimes'].iteritems():
ds = tmp.create_dataset(k,data=v,compression='gzip')
if 'voltage' in rec.keys():
tmp=gid.create_group('voltage')
for k,v in rec['voltage'].iteritems():
vv = np.array(v)
ds = tmp.create_dataset(k,
data=vv[0:-1:int(recpar['downsampleFactor'])],
compression='gzip')
def
|
runMRGaxon
|
identifier_name
|
|
template_tool.py
|
}之间的)会原样输出,
但是被mark_safe修饰过就能被浏览器翻译成html语言
默认机制的好处是:
如果黑客在可输入框中输入html语句,提交后存到数据库,再次拿到时不会被翻译成html语句,html代码就无法植入
如果不这么做,那么他想要插入什么语句就是什么语句,会被植入任何广告等等
'''
return mark_safe("".join(ret_html))
@register.simple_tag
def make_url(path_info, filter_dict, action):
'''
对已有的url和get进行重构
:param path_info:
:return:
'''
param_dict = {}
for k, v in filter_dict.items():
param_dict[k] = "%s=%s" % (k, v)
url = "%s%s?%s" % (path_info, action, "&".join(param_dict.values()))
return url
@register.simple_tag
def make_delete_url(path_info, filter_dict):
path_info = path_info.rsplit("/", 2)[0]
url = make_url(path_info, filter_dict, "/delete/")
return url
@register.simple_tag
def merge_url(path_info, filter_dict, page_value):
"""拼凑成url,给页码"""
param_dict = {}
for k,v in filter_dict.items():
param_dict[k] = "%s=%s" % (k, v)
param_dict["_page"] = "%s=%s" % ("_page", page_value)
url = "%s?%s" % (path_info, "&".join(param_dict.values()))
return url
def get_url_dict(p_dict):
"""此函数生成字典,形式是k:k=v"""
param_dict = {}
for k, v in p_dict.items():
param_dict[k] = "%s=%s" % (k, v)
return param_dict
@register.simple_tag
def get_order_url(request, field):
"""
生成排序的url,
排序看_o,如果_o没有,直接加_o:field,如果有_o:field,field,
还要比对一下,如果该feild不在里边,直接添加,如果在里边,判断正负
field传过来是字段名,不存在正负,肯定是正的,在列表中,如果存在该字段,无论正负都应该删除
"""
url_dict = {}
for k, v in request.GET.items():
url_dict[k] = v
if url_dict.get("_o"):
flag = check_order_field(request.GET, field)
order_list=url_dict["_o"].split(",")
# field可能是-xx,xx
if flag == "+":
order_list.remove(field)
field = "-%s" % field
if flag == "-":
order_list.remove("-%s" % field)
field = field
order_list.append(field)
order_list = set(order_list)
url_dict["_o"] = ",".join(order_list)
else:
url_dict["_o"] = field
url_dict = get_url_dict(url_dict)
return "&".join(url_dict.values())
@register.simple_tag
def check_order_field(source,field):
"""根据GET中的_o和field,判断是否在里面"""
source=source.get("_o")
if source:
order_list=source.split(",")
if field in order_list:
return "+" # 肯定有正的该字段存在
elif "-%s" % field in order_list:
return "-" # 肯定有负的该字段存在
else:
return None # 说明该字段不存在
@register.simple_tag
def get_m2m_selected_fields(form_obj, field_name):
# form_obj是这个记录的form的对象,form_obj.instance是这个form_obj对应的表 如Account表
# print(field_name)
# if field_name != "id":
# if hasattr(form_obj.instance,field_name):
# print(bool(form_obj.instance),dir(form_obj.instance))
# m2m_obj=getattr(form_obj.instance,field_name) #m2m这个记录的字段对象,是多对多字段。直接all(拿到该记录的所有多对多)
# 如果有句柄,说明是更新,没有,,去_meta拿model,。。。空
# print("ddddd",form_obj.instance._meta.get_field("name")) # model_obj
if form_obj.instance.id: # id没有,说明不是model对象
m2m_obj = getattr(form_obj.instance,field_name)
# form_obj[field_name].field.widget.attrs['class']=''
# def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
return m2m_obj.all() # m2m字段直接all找到对应表的所有
else:
return []
@register.simple_tag
def get_m2m_fields(model, field_name, selected):
field_obj = model._meta.get_field(field_name)
obj_all = field_obj.rel.to.objects.all()
return set(obj_all) - set(selected)
@register.simple_tag
def get_description(admin, action):
"""找到每个函数,和它的short_description"""
print(admin)
if hasattr(admin, action):
act_fun = getattr(admin, action)
if hasattr(act_fun, "short_description"):
return getattr(act_fun, "short_description")
else:
return action
else:
raise AttributeError("%s function not found" % action)
@register.simple_tag
def get_relate_depth(model_obj, depth_flag=False):
"""
find all has related node
被关联,rel
find related field
model.relatedfield.all() 拿到和该记录相关的对方的记录
循环每一个
对于many_to_many只是做一些提示,
:param model_obj:
:param depth_flag: 是否是下一层递归
:return:
"""
node_list = []
if not depth_flag:
node_list.append("<dl class='dl-horizontal'>")
node_list.append("<dd style='margin-left:0'>")
# if has model bond to admin then get a link,else
if model_obj._meta.app_label in site.app_dict:
print(model_obj._meta.model_name in site.app_dict[model_obj._meta.app_label])
if model_obj._meta.model_name in site.app_dict[model_obj._meta.app_label]:
node_list.append(
("%s:<a href='/admin/{app}/{table}/{id}/change'>%s</a>" %
(model_obj._meta.verbose_name,str(model_obj))).format(
app=model_obj._meta.app_label, table=model_obj._meta.model_name, id=model_obj.id
)
)
else:
node_list.append(
"%s:%s" % (model_obj._meta.verbose_name, str(model_obj))
)
related_nodes = model_obj._meta.get_fields()
for rel_node in related_nodes:
if not hasattr(rel_node,"column"): # 被关联
rel_field = rel_node.get_accessor_name()
sub_model_objs = getattr(model_obj, rel_field).all()
if sub_model_objs:
node_list.append("<dl>")
node_list.append("<dt>many to one</dt>")
for sub_model_obj in sub_model_objs:
node_list.append(get_relate_depth(sub_model_obj, depth_flag=True))
if sub_model_objs:
node_list.append("</dl>")
else:
if rel_node.get_internal_type() == "ManyToManyField":
sub_model_objs = getattr(model_obj, rel_node.column).all()
if sub_model_objs:
node_list.append("<dl>")
node_list.append("<dt>many to many</dt>")
for sub_model_obj in sub_model_objs:
node_list.append("<dd>%s</dd>" % str(sub_model_obj))
if sub_model_objs:
node_list.append("</dl>")
node_list.append("</dd>")
if not depth_flag:
node_list.append("</dl>")
return "".join(node_list)
@register.simple_tag
def get_bool(s1, s2):
print(s1, s2)
print(type(s1), type(s2))
return s1 == s2
@register.simple_tag
def get_read_only_readable(field, instance):
'''
:param field: 字段表单
:return:
'''
ret_html = field
ret_html.field.widget.attrs.update({"disabled": "disabled", "class": "hide"})
ret_html = "<p>%s</p>%s" % (ret_html.value(), unicode(ret_html))
field_type = instance._meta.get_field(field.name).get_internal_type()
if field_type == "ManyToManyField":
value = getattr(instance, field.name).all()
# ret_html = value + "<input type='hidden' name='%s' value='%s' />" % (field, value)
ret_html = ""
else:
field_obj = getattr(instance, field.name)
if field_type == "ForeignKey":
ret_html = demark_safe(str(field_obj)) + "<input type='hidden' name='%s' value='%s' />" % \
(field.name, instance.id)
else:
ret_html = demark_safe(field_obj) + "<input type='hidden' name='%s' value='%s' />" % (field.name, field_obj)
return mark_safe(ret_html)
|
identifier_name
|
||
template_tool.py
|
:
choice = None
if choice:
value = getattr(obj, "get_%s_display" % field)()
else:
value = getattr(obj, field)
return value
@register.filter
def get_model_value(obj, field):
value = get_depth_value(obj, field)
return value
@register.simple_tag
def get_model_url(request, table_obj, field):
"""单个url的制作,目的给前端和内部都能调用"""
url = "<td><a href='%s/change/?%s' class='btn-link'>%s</a></td>" % (
table_obj.id,
"&".join(["%s=%s" % (k, v) for k, v in request.GET.items()]),
get_model_value(table_obj, field)
)
return mark_safe(url)
@register.simple_tag
def get_model_item(request, table_obj, admin_class):
"""
穿表对象和admin对象,根据field取表的每个记录,第一个字段是可链接的,不可修改
list_editable不可用双下划线
"""
fields = admin_class.list_display
editfields
|
get_model_value(table_obj, field)
ret_html.append("<td>%s</td>" % (get_model_value(table_obj, field)))
return mark_safe("".join(ret_html))
@register.simple_tag
def get_filter_options(table, field):
"""
拿到该字段的对象,找get_choices
如果有__ 如:group__groupname 列出所有groupname字段的数据
:param table:
:param field:
:return:
table._meta.ge
t_field(group).model.objects.value_list('groupname')
table._meta.get_field(group).model._meta.get_field(group).model.objects.value_list('groupname')
"""
def get_depth_filter(table, field):
fields = field.split("__", 1)
field_obj = table._meta.get_field(fields[0])
if len(fields) == 2:
# print(field_obj.related_model)
ret_html = get_filter_options(field_obj.related_model, fields[1])
else:
ret_html = []
try:
for choice in field_obj.get_choices():
chose = (choice[0], demark_safe(choice[1]))
ret_html.append("<option value='%s'>%s</option>" % chose)
except AttributeError: # 没有拿到choice说明是普通字段
choice_list = [('', '---------'), ]
choice_list.extend(field_obj.model.objects.all().values_list(field, field))
for choice in choice_list:
ret_html.append("<option value='%s'>%s</option>" % choice)
pass
return ret_html
ret_html = get_depth_filter(table, field)
'''
默认:模板引擎向下转义一层,浏览器向上转义一层,等到的是原来的字符串
mark_safe: 如果模板引擎没有向下转换,但浏览器向上转换,会被翻译成html语言
所以直接return回去的字符串或者在模板内(两个大括号或{%%}之间的)会原样输出,
但是被mark_safe修饰过就能被浏览器翻译成html语言
默认机制的好处是:
如果黑客在可输入框中输入html语句,提交后存到数据库,再次拿到时不会被翻译成html语句,html代码就无法植入
如果不这么做,那么他想要插入什么语句就是什么语句,会被植入任何广告等等
'''
return mark_safe("".join(ret_html))
@register.simple_tag
def make_url(path_info, filter_dict, action):
'''
对已有的url和get进行重构
:param path_info:
:return:
'''
param_dict = {}
for k, v in filter_dict.items():
param_dict[k] = "%s=%s" % (k, v)
url = "%s%s?%s" % (path_info, action, "&".join(param_dict.values()))
return url
@register.simple_tag
def make_delete_url(path_info, filter_dict):
path_info = path_info.rsplit("/", 2)[0]
url = make_url(path_info, filter_dict, "/delete/")
return url
@register.simple_tag
def merge_url(path_info, filter_dict, page_value):
"""拼凑成url,给页码"""
param_dict = {}
for k,v in filter_dict.items():
param_dict[k] = "%s=%s" % (k, v)
param_dict["_page"] = "%s=%s" % ("_page", page_value)
url = "%s?%s" % (path_info, "&".join(param_dict.values()))
return url
def get_url_dict(p_dict):
"""此函数生成字典,形式是k:k=v"""
param_dict = {}
for k, v in p_dict.items():
param_dict[k] = "%s=%s" % (k, v)
return param_dict
@register.simple_tag
def get_order_url(request, field):
"""
生成排序的url,
排序看_o,如果_o没有,直接加_o:field,如果有_o:field,field,
还要比对一下,如果该feild不在里边,直接添加,如果在里边,判断正负
field传过来是字段名,不存在正负,肯定是正的,在列表中,如果存在该字段,无论正负都应该删除
"""
url_dict = {}
for k, v in request.GET.items():
url_dict[k] = v
if url_dict.get("_o"):
flag = check_order_field(request.GET, field)
order_list=url_dict["_o"].split(",")
# field可能是-xx,xx
if flag == "+":
order_list.remove(field)
field = "-%s" % field
if flag == "-":
order_list.remove("-%s" % field)
field = field
order_list.append(field)
order_list = set(order_list)
url_dict["_o"] = ",".join(order_list)
else:
url_dict["_o"] = field
url_dict = get_url_dict(url_dict)
return "&".join(url_dict.values())
@register.simple_tag
def check_order_field(source,field):
"""根据GET中的_o和field,判断是否在里面"""
source=source.get("_o")
if source:
order_list=source.split(",")
if field in order_list:
return "+" # 肯定有正的该字段存在
elif "-%s" % field in order_list:
return "-" # 肯定有负的该字段存在
else:
return None # 说明该字段不存在
@register.simple_tag
def get_m2m_selected_fields(form_obj, field_name):
# form_obj是这个记录的form的对象,form_obj.instance是这个form_obj对应的表 如Account表
# print(field_name)
# if field_name != "id":
# if hasattr(form_obj.instance,field_name):
# print(bool(form_obj.instance),dir(form_obj.instance))
# m2m_obj=getattr(form_obj.instance,field_name) #m2m这个记录的字段对象,是多对多字段。直接all(拿到该记录的所有多对多)
# 如果有句柄,说明是更新,没有,,去_meta拿model,。。。空
# print("ddddd",form_obj.instance._meta.get_field("name")) # model_obj
if form_obj.instance.id: # id没有,说明不是model对象
m2m_obj = getattr(form_obj.instance,field_name)
# form_obj[field_name].field.widget.attrs['class']=''
# def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
return m2m_obj.all() # m2m字段直接all找到对应表的所有
else:
return []
@register.simple_tag
def get_m2m_fields(model, field_name, selected):
field_obj = model._meta.get_field(field_name)
obj_all = field_obj.rel.to.objects.all()
return set(obj_all) - set(selected)
@register.simple_tag
def get_description(admin, action):
"""找到每个函数,和它的short_description"""
print(admin)
if hasattr(admin, action):
act_fun = getattr(admin, action)
if hasattr(act_fun, "short_description"):
return getattr(act_fun, "short_description")
else:
return action
else:
raise AttributeError("%s function not found" % action)
@register.simple_tag
def get_relate_depth(model_obj, depth_flag=False):
"""
|
= admin_class.list_editable
form_obj = admin_class.model_change_form(instance=table_obj)
ret_html = []
ret_html.append('''<td>
<div class="checkbox check-transparent">
<input type="checkbox" class="magic-checkbox" id="check_%s" name="check_item" value="%s" onclick="check_component('check_item','checkall')">
<label for="check_%s"></label>
</div></td>
''' % ((table_obj.id, ) * 3))
for field in fields:
if fields.index(field) == 0:
ret_html.append(get_model_url(request, table_obj, field))
else:
if field in editfields:
ret_html.append("<td>%s</td>" % form_obj[field])
else:
|
identifier_body
|
template_tool.py
|
:
choice = None
if choice:
value = getattr(obj, "get_%s_display" % field)()
else:
value = getattr(obj, field)
return value
@register.filter
def get_model_value(obj, field):
value = get_depth_value(obj, field)
return value
@register.simple_tag
def get_model_url(request, table_obj, field):
"""单个url的制作,目的给前端和内部都能调用"""
url = "<td><a href='%s/change/?%s' class='btn-link'>%s</a></td>" % (
table_obj.id,
"&".join(["%s=%s" % (k, v) for k, v in request.GET.items()]),
get_model_value(table_obj, field)
)
return mark_safe(url)
@register.simple_tag
def get_model_item(request, table_obj, admin_class):
"""
穿表对象和admin对象,根据field取表的每个记录,第一个字段是可链接的,不可修改
list_editable不可用双下划线
"""
fields = admin_class.list_display
editfields = admin_class.list_editable
form_obj = admin_class.model_change_form(instance=table_obj)
ret_html = []
ret_html.append('''<td>
<div class="checkbox check-transparent">
<input type="checkbox" class="magic-checkbox" id="check_%s" name="check_item" value="%s" onclick="check_component('check_item','checkall')">
<label for="check_%s"></label>
</div></td>
''' % ((table_obj.id, ) * 3))
for field in fields:
if fields.index(field) == 0:
ret_html.append(get_model_url(request, table_obj, field))
else:
if field in editfields:
ret_html.append("<td>%s</td>" % form_obj[field])
else:
get_model_value(table_obj, field)
ret_html.append("<td>%s</td>" % (get_model_value(table_obj, field)))
return mark_safe("".join(ret_html))
@register.simple_tag
def get_filter_options(table, field):
"""
拿到该字段的对象,找get_choices
如果有__ 如:group__groupname 列出所有groupname字段的数据
:param table:
:param field:
:return:
table._meta.get_field(group).model.objects.value_list('groupname')
table._meta.get_field(group).model._meta.get_field(group).model.objects.value_list('groupname')
"""
def get_depth_filter(table, field):
fields = field.split("__", 1)
field_obj = table._meta.get_field(fields[0])
if len(fields) == 2:
# print(field_obj.related_model)
ret_html = get_filter_options(field_obj.related_model, fields[1])
else:
ret_html = []
try:
for choice in field_obj.get_choices():
chose = (choice[0], demark_safe(choice[1]))
ret_html.append("<option value='%s'>%s</option>" % chose)
except AttributeError: # 没有拿到choice说明是普通字段
choice_list = [('', '---------'), ]
choice_list.extend(field_obj.model.objects.all().values_list(field, field))
for choice in choice_list:
ret_html.append("<option value='%s'>%s</option>" % choice)
pass
return ret_html
ret_html = get_depth_filter(table, field)
'''
默认:模板引擎向下转义一层,浏览器向上转义一层,等到的是原来的字符串
mark_safe: 如果模板引擎没有向下转换,但浏览器向上转换,会被翻译成html语言
所以直接return回去的字符串或者在模板内(两个大括号或{%%}之间的)会原样输出,
但是被mark_safe修饰过就能被浏览器翻译成html语言
默认机制的好处是:
如果黑客在可输入框中输入html语句,提交后存到数据库,再次拿到时不会被翻译成html语句,html代码就无法植入
如果不这么做,那么他想要插入什么语句就是什么语句,会被植入任何广告等等
|
@register.simple_tag
def make_url(path_info, filter_dict, action):
'''
对已有的url和get进行重构
:param path_info:
:return:
'''
param_dict = {}
for k, v in filter_dict.items():
param_dict[k] = "%s=%s" % (k, v)
url = "%s%s?%s" % (path_info, action, "&".join(param_dict.values()))
return url
@register.simple_tag
def make_delete_url(path_info, filter_dict):
path_info = path_info.rsplit("/", 2)[0]
url = make_url(path_info, filter_dict, "/delete/")
return url
@register.simple_tag
def merge_url(path_info, filter_dict, page_value):
"""拼凑成url,给页码"""
param_dict = {}
for k,v in filter_dict.items():
param_dict[k] = "%s=%s" % (k, v)
param_dict["_page"] = "%s=%s" % ("_page", page_value)
url = "%s?%s" % (path_info, "&".join(param_dict.values()))
return url
def get_url_dict(p_dict):
"""此函数生成字典,形式是k:k=v"""
param_dict = {}
for k, v in p_dict.items():
param_dict[k] = "%s=%s" % (k, v)
return param_dict
@register.simple_tag
def get_order_url(request, field):
"""
生成排序的url,
排序看_o,如果_o没有,直接加_o:field,如果有_o:field,field,
还要比对一下,如果该feild不在里边,直接添加,如果在里边,判断正负
field传过来是字段名,不存在正负,肯定是正的,在列表中,如果存在该字段,无论正负都应该删除
"""
url_dict = {}
for k, v in request.GET.items():
url_dict[k] = v
if url_dict.get("_o"):
flag = check_order_field(request.GET, field)
order_list=url_dict["_o"].split(",")
# field可能是-xx,xx
if flag == "+":
order_list.remove(field)
field = "-%s" % field
if flag == "-":
order_list.remove("-%s" % field)
field = field
order_list.append(field)
order_list = set(order_list)
url_dict["_o"] = ",".join(order_list)
else:
url_dict["_o"] = field
url_dict = get_url_dict(url_dict)
return "&".join(url_dict.values())
@register.simple_tag
def check_order_field(source,field):
"""根据GET中的_o和field,判断是否在里面"""
source=source.get("_o")
if source:
order_list=source.split(",")
if field in order_list:
return "+" # 肯定有正的该字段存在
elif "-%s" % field in order_list:
return "-" # 肯定有负的该字段存在
else:
return None # 说明该字段不存在
@register.simple_tag
def get_m2m_selected_fields(form_obj, field_name):
# form_obj是这个记录的form的对象,form_obj.instance是这个form_obj对应的表 如Account表
# print(field_name)
# if field_name != "id":
# if hasattr(form_obj.instance,field_name):
# print(bool(form_obj.instance),dir(form_obj.instance))
# m2m_obj=getattr(form_obj.instance,field_name) #m2m这个记录的字段对象,是多对多字段。直接all(拿到该记录的所有多对多)
# 如果有句柄,说明是更新,没有,,去_meta拿model,。。。空
# print("ddddd",form_obj.instance._meta.get_field("name")) # model_obj
if form_obj.instance.id: # id没有,说明不是model对象
m2m_obj = getattr(form_obj.instance,field_name)
# form_obj[field_name].field.widget.attrs['class']=''
# def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
return m2m_obj.all() # m2m字段直接all找到对应表的所有
else:
return []
@register.simple_tag
def get_m2m_fields(model, field_name, selected):
field_obj = model._meta.get_field(field_name)
obj_all = field_obj.rel.to.objects.all()
return set(obj_all) - set(selected)
@register.simple_tag
def get_description(admin, action):
"""找到每个函数,和它的short_description"""
print(admin)
if hasattr(admin, action):
act_fun = getattr(admin, action)
if hasattr(act_fun, "short_description"):
return getattr(act_fun, "short_description")
else:
return action
else:
raise AttributeError("%s function not found" % action)
@register.simple_tag
def get_relate_depth(model_obj, depth_flag=False):
"""
find
|
'''
return mark_safe("".join(ret_html))
|
random_line_split
|
template_tool.py
|
else:
verbose_name = model_class._meta.get_field(field).verbose_name
return verbose_name
def get_depth_value(obj, field):
"""
递归读取
aa__bb__cc
如果有__说明是外键,往里面走
如果找到最里面的,看是否是choice,如果是,get_该字段_display找值,如果不是,直接按照该字段找值
a__b__c
"""
fields = field.split("__", 1)
if len(fields) == 2:
obj = getattr(obj, fields[0])
value = get_depth_value(obj, fields[1]) if obj else ""
else:
field_obj = obj._meta.get_field(field)
try:
choice = field_obj.choices
except AttributeError:
choice = None
if choice:
value = getattr(obj, "get_%s_display" % field)()
else:
value = getattr(obj, field)
return value
@register.filter
def get_model_value(obj, field):
value = get_depth_value(obj, field)
return value
@register.simple_tag
def get_model_url(request, table_obj, field):
"""单个url的制作,目的给前端和内部都能调用"""
url = "<td><a href='%s/change/?%s' class='btn-link'>%s</a></td>" % (
table_obj.id,
"&".join(["%s=%s" % (k, v) for k, v in request.GET.items()]),
get_model_value(table_obj, field)
)
return mark_safe(url)
@register.simple_tag
def get_model_item(request, table_obj, admin_class):
"""
穿表对象和admin对象,根据field取表的每个记录,第一个字段是可链接的,不可修改
list_editable不可用双下划线
"""
fields = admin_class.list_display
editfields = admin_class.list_editable
form_obj = admin_class.model_change_form(instance=table_obj)
ret_html = []
ret_html.append('''<td>
<div class="checkbox check-transparent">
<input type="checkbox" class="magic-checkbox" id="check_%s" name="check_item" value="%s" onclick="check_component('check_item','checkall')">
<label for="check_%s"></label>
</div></td>
''' % ((table_obj.id, ) * 3))
for field in fields:
if fields.index(field) == 0:
ret_html.append(get_model_url(request, table_obj, field))
else:
if field in editfields:
ret_html.append("<td>%s</td>" % form_obj[field])
else:
get_model_value(table_obj, field)
ret_html.append("<td>%s</td>" % (get_model_value(table_obj, field)))
return mark_safe("".join(ret_html))
@register.simple_tag
def get_filter_options(table, field):
"""
拿到该字段的对象,找get_choices
如果有__ 如:group__groupname 列出所有groupname字段的数据
:param table:
:param field:
:return:
table._meta.get_field(group).model.objects.value_list('groupname')
table._meta.get_field(group).model._meta.get_field(group).model.objects.value_list('groupname')
"""
def get_depth_filter(table, field):
fields = field.split("__", 1)
field_obj = table._meta.get_field(fields[0])
if len(fields) == 2:
# print(field_obj.related_model)
ret_html = get_filter_options(field_obj.related_model, fields[1])
else:
ret_html = []
try:
for choice in field_obj.get_choices():
chose = (choice[0], demark_safe(choice[1]))
ret_html.append("<option value='%s'>%s</option>" % chose)
except AttributeError: # 没有拿到choice说明是普通字段
choice_list = [('', '---------'), ]
choice_list.extend(field_obj.model.objects.all().values_list(field, field))
for choice in choice_list:
ret_html.append("<option value='%s'>%s</option>" % choice)
pass
return ret_html
ret_html = get_depth_filter(table, field)
'''
默认:模板引擎向下转义一层,浏览器向上转义一层,等到的是原来的字符串
mark_safe: 如果模板引擎没有向下转换,但浏览器向上转换,会被翻译成html语言
所以直接return回去的字符串或者在模板内(两个大括号或{%%}之间的)会原样输出,
但是被mark_safe修饰过就能被浏览器翻译成html语言
默认机制的好处是:
如果黑客在可输入框中输入html语句,提交后存到数据库,再次拿到时不会被翻译成html语句,html代码就无法植入
如果不这么做,那么他想要插入什么语句就是什么语句,会被植入任何广告等等
'''
return mark_safe("".join(ret_html))
@register.simple_tag
def make_url(path_info, filter_dict, action):
'''
对已有的url和get进行重构
:param path_info:
:return:
'''
param_dict = {}
for k, v in filter_dict.items():
param_dict[k] = "%s=%s" % (k, v)
url = "%s%s?%s" % (path_info, action, "&".join(param_dict.values()))
return url
@register.simple_tag
def make_delete_url(path_info, filter_dict):
path_info = path_info.rsplit("/", 2)[0]
url = make_url(path_info, filter_dict, "/delete/")
return url
@register.simple_tag
def merge_url(path_info, filter_dict, page_value):
"""拼凑成url,给页码"""
param_dict = {}
for k,v in filter_dict.items():
param_dict[k] = "%s=%s" % (k, v)
param_dict["_page"] = "%s=%s" % ("_page", page_value)
url = "%s?%s" % (path_info, "&".join(param_dict.values()))
return url
def get_url_dict(p_dict):
"""此函数生成字典,形式是k:k=v"""
param_dict = {}
for k, v in p_dict.items():
param_dict[k] = "%s=%s" % (k, v)
return param_dict
@register.simple_tag
def get_order_url(request, field):
"""
生成排序的url,
排序看_o,如果_o没有,直接加_o:field,如果有_o:field,field,
还要比对一下,如果该feild不在里边,直接添加,如果在里边,判断正负
field传过来是字段名,不存在正负,肯定是正的,在列表中,如果存在该字段,无论正负都应该删除
"""
url_dict = {}
for k, v in request.GET.items():
url_dict[k] = v
if url_dict.get("_o"):
flag = check_order_field(request.GET, field)
order_list=url_dict["_o"].split(",")
# field可能是-xx,xx
if flag == "+":
order_list.remove(field)
field = "-%s" % field
if flag == "-":
order_list.remove("-%s" % field)
field = field
order_list.append(field)
order_list = set(order_list)
url_dict["_o"] = ",".join(order_list)
else:
url_dict["_o"] = field
url_dict = get_url_dict(url_dict)
return "&".join(url_dict.values())
@register.simple_tag
def check_order_field(source,field):
"""根据GET中的_o和field,判断是否在里面"""
source=source.get("_o")
if source:
order_list=source.split(",")
if field in order_list:
return "+" # 肯定有正的该字段存在
elif "-%s" % field in order_list:
return "-" # 肯定有负的该字段存在
else:
return None # 说明该字段不存在
@register.simple_tag
def get_m2m_selected_fields(form_obj, field_name):
# form_obj是这个记录的form的对象,form_obj.instance是这个form_obj对应的表 如Account表
# print(field_name)
# if field_name != "id":
# if hasattr(form_obj.instance,field_name):
# print(bool(form_obj.instance),dir(form_obj.instance))
# m2m_obj=getattr(form_obj.instance,field_name) #m2m这个记录的字段对象,是多对多字段。直接all(拿到该记录的所有多对多)
# 如果有句柄,说明是更新,没有,,去_meta拿model,。。。空
# print("ddddd",form_obj.instance._meta.get_field("name")) # model_obj
if form_obj.instance.id: # id没有,说明不是model对象
m2m_obj = getattr(form_obj.instance,field_name)
# form_obj[field_name].field.widget.attrs['class']=''
# def create_option(self, name, value, label, selected, index, subindex
|
verbose_name = get_field_verbose_name(field_obj.related_model, fields[1])
|
conditional_block
|
|
updateDeps.js
|
*/
const _nextPreHighestVersion = (latestTag, lastVersion, pkgPreRelease) => {
const bumpFromTags = latestTag ? semver.inc(latestTag, "prerelease", pkgPreRelease) : null;
const bumpFromLast = semver.inc(lastVersion, "prerelease", pkgPreRelease);
return bumpFromTags ? getHighestVersion(bumpFromLast, bumpFromTags) : bumpFromLast;
};
/**
* Resolve package release type taking into account the cascading dependency update.
*
* @param {Package} pkg Package object.
* @param {string|undefined} bumpStrategy Dependency resolution strategy: override, satisfy, inherit.
* @param {string|undefined} releaseStrategy Release type triggered by deps updating: patch, minor, major, inherit.
* @param {Package[]} ignore=[] Packages to ignore (to prevent infinite loops).
* @param {string} prefix Dependency version prefix to be attached if `bumpStrategy='override'`. ^ | ~ | '' (defaults to empty string)
* @returns {string|undefined} Resolved release type.
* @internal
*/
const resolveReleaseType = (pkg, bumpStrategy = "override", releaseStrategy = "patch", ignore = [], prefix = "") => {
// NOTE This fn also updates pkg deps, so it must be invoked anyway.
const dependentReleaseType = getDependentRelease(pkg, bumpStrategy, releaseStrategy, ignore, prefix);
// Release type found by commitAnalyzer.
if (pkg._nextType) {
return pkg._nextType;
}
if (!dependentReleaseType) {
return undefined;
}
// Define release type for dependent package if any of its deps changes.
// `patch`, `minor`, `major` — strictly declare the release type that occurs when any dependency is updated.
// `inherit` — applies the "highest" release of updated deps to the package.
// For example, if any dep has a breaking change, `major` release will be applied to the all dependants up the chain.
pkg._nextType = releaseStrategy === "inherit" ? dependentReleaseType : releaseStrategy;
return pkg._nextType;
};
/**
* Get dependent release type by recursive scanning and updating pkg deps.
*
* @param {Package} pkg The package with local deps to check.
* @param {string} bumpStrategy Dependency resolution strategy: override, satisfy, inherit.
* @param {string} releaseStrategy Release type triggered by deps updating: patch, minor, major, inherit.
* @param {Package[]} ignore Packages to ignore (to prevent infinite loops).
* @param {string} prefix Dependency version prefix to be attached if `bumpStrategy='override'`. ^ | ~ | '' (defaults to empty string)
* @returns {string|undefined} Returns the highest release type if found, undefined otherwise
* @internal
*/
const getDependentRelease = (pkg, bumpStrategy, releaseStrategy, ignore, prefix) => {
const severityOrder = ["patch", "minor", "major"];
const { localDeps, manifest = {} } = pkg;
const lastVersion = pkg._lastRelease && pkg._lastRelease.version;
const { dependencies = {}, devDependencies = {}, peerDependencies = {}, optionalDependencies = {} } = manifest;
const scopes = [dependencies, devDependencies, peerDependencies, optionalDependencies];
const bumpDependency = (scope, name, nextVersion) => {
const currentVersion = scope[name];
if (!nextVersion || !currentVersion) {
return false;
}
const resolvedVersion = resolveNextVersion(currentVersion, nextVersion, releaseStrategy, prefix);
if (currentVersion !== resolvedVersion) {
scope[name] = resolvedVersion;
return true;
}
return false;
};
// prettier-ignore
return localDeps
.filter((p) => !ignore.includes(p))
.reduce((releaseType, p) => {
// Has changed if...
// 1. Any local dep package itself has changed
// 2. Any local dep package has local deps that have changed.
const nextType = resolveReleaseType(p, bumpStrategy, releaseStrategy,[...ignore, pkg], prefix);
const nextVersion =
nextType
// Update the nextVersion only if there is a next type to be bumped
? p._preRelease ? getNextPreVersion(p) : getNextVersion(p)
// Set the nextVersion fallback to the last local dependency package last version
: p._lastRelease && p._lastRelease.version
// 3. And this change should correspond to the manifest updating rule.
const requireRelease = scopes
.reduce((res, scope) => bumpDependency(scope, p.name, nextVersion) || res, !lastVersion)
return requireRelease && (severityOrder.indexOf(nextType) > severityOrder.indexOf(releaseType))
? nextType
: releaseType;
}, undefined);
};
/**
* Resolve next version of dependency.
*
* @param {string} currentVersion Current dep version
* @param {string} nextVersion Next release type: patch, minor, major
* @param {string|undefined} strategy Resolution strategy: inherit, override, satisfy
* @param {string} prefix Dependency version prefix to be attached if `bumpStrategy='override'`. ^ | ~ | '' (defaults to empty string)
* @returns {string} Next dependency version
* @internal
*/
const resolveNextVersion = (currentVersion, nextVersion, strategy = "override", prefix = "") => {
// Check the next pkg version against its current references.
// If it matches (`*` matches to any, `1.1.0` matches `1.1.x`, `1.5.0` matches to `^1.0.0` and so on)
// release will not be triggered, if not `override` strategy will be applied instead.
if ((strategy === "satisfy" || strategy === "inherit") && semver.satisfies(nextVersion, currentVersion)) {
return currentVersion;
}
// `inherit` will try to follow the current declaration version/range.
// `~1.0.0` + `minor` turns into `~1.1.0`, `1.x` + `major` gives `2.x`,
// but `1.x` + `minor` gives `1.x` so there will be no release, etc.
if (strategy === "inherit") {
const sep = ".";
const nextChunks = nextVersion.split(sep);
const currentChunks = currentVersion.split(sep);
// prettier-ignore
const resolvedChunks = currentChunks.map((chunk, i) =>
nextChunks[i]
? chunk.replace(/\d+/, nextChunks[i])
: chunk
);
return resolvedChunks.join(sep);
}
// "override"
// By default next package version would be set as is for the all dependants.
return prefix + nextVersion;
};
/**
* Update pkg deps.
*
* @param {Package} pkg The package this function is being called on.
* @returns {undefined}
* @internal
*/
const updateManifestDeps = (pkg) => {
const { manifest, path } = pkg;
const { indent, trailingWhitespace } = recognizeFormat(manifest.__contents__);
// We need to bump pkg.version for correct yarn.lock update
// https://github.com/qiwi/multi-semantic-release/issues/58
manifest.version = pkg._nextRelease.version || manifest.version;
// Loop through localDeps to verify release consistency.
pkg.localDeps.forEach((d) => {
// Get version of dependency.
const release = d._nextRelease || d._lastRelease;
// Cannot establish version.
if (!release || !release.version)
throw Error(`Cannot release ${pkg.name} because dependency ${d.name} has not been released yet`);
});
if (!auditManifestChanges(manifest, path)) {
return;
}
// Write package.json back out.
writeFileSync(path, JSON.stringify(manifest, null, indent) + trailingWhitespace);
};
// https://gist.github.com/Yimiprod/7ee176597fef230d1451
const difference = (object, base) =>
transform(object, (result, value, key) => {
if (!isEqual(value, base[key])) {
result[key] =
isObject(value) && isObject(base[key]) ? difference(value, base[key]) : `${base[key]} → ${value}`;
}
});
/**
* Clarify what exactly was changed in manifest file.
* @param {object} actualManifest manifest object
* @param {string} path manifest path
* @returns {boolean} has changed or not
* @internal
*/
const auditManifestChanges = (actualManifest, path) => {
const debugPrefix = `[${actualManifest.name}]`;
const oldManifest = getManifest(path);
const depScopes = ["dependencies", "devDependencies", "peerDependencies", "optionalDependencies"];
const changes = depScopes.reduce((res, scope) => {
const diff = difference(actualManifest[scope], oldManifest[scope]);
if (Object.keys(diff).length) {
res[scope] = diff;
}
return res;
}, {});
debug(debugPrefix, "package.json path=", path);
if (Object.keys(changes).length) {
debug(debugPrefix, "changes=", changes);
return true;
}
debug(debugPrefix, "no deps changes");
return false;
};
export {
getNextVersion,
getNextPreVersion,
|
getPreReleaseTag,
updateManifestDeps,
|
random_line_split
|
|
train.py
|
bs:]
# target = torch.ones(bs, requires_grad=True).to(device)
loss = loss_fn(pos_sim, neg_sim, target)
total_loss.append(loss.item())
pbar.set_postfix(batch_loss=loss.item())
loss.backward()
optimizer.step()
return np.mean(total_loss)
def evaluate(date_loader, model, topk):
"""
在dev上进行测试
"""
model.eval()
pbar = tqdm(date_loader, desc=f'Evaluate')
# 记录预测结果,计算Top-1正确率
qids = []
predictions = []
true_labels = []
with torch.no_grad():
for batch in pbar:
qids.extend(batch.id.cpu().numpy())
true_labels.extend(batch.label.cpu().numpy())
output = model(batch.question, batch.answer)
predictions.extend(output.cpu().numpy())
|
if isinstance(topk, int):
accuracy = get_accuracy(qids, predictions, true_labels, 1)
return accuracy
elif isinstance(topk, list):
accuracies = {}
for i in topk:
accuracy = get_accuracy(qids, predictions, true_labels, i)
accuracies[i] = accuracy
return accuracies
else:
raise ValueError('Error topk')
def run():
args = parse_args()
# 初始化随机数种子,以便于复现实验结果
start_epoch = 1
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
device = torch.device(f'cuda:{args.device}' if torch.cuda.is_available() and args.device >= 0 else 'cpu')
if torch.cuda.is_available() and args.device >= 0:
# 开启这个flag需要保证输入数据的维度不变,不然每次cudnn都要重新优化,反而更加耗时
# 现在RNN部分输入会进行fit length,CNN那里可以启用这个参数
if args.arch in ['stack', 'multi', 'stack_multi']:
torch.backends.cudnn.benchmark = True
# 输出目录
if args.resume_snapshot:
# 判断文件是否存在
assert os.path.exists(args.resume_snapshot), f'{args.resume_snapshot} don"t exist!'
model_dir, model_file = os.path.split(args.resume_snapshot)
output_dir, _ = os.path.split(model_dir)
else:
base_dir = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
output_dir = os.path.join(args.out_dir, base_dir)
model_dir = os.path.join(output_dir, 'save_model')
os.makedirs(output_dir) # 创建输出根目录
os.makedirs(model_dir)
# 输出参数
logger = get_logger(output_dir)
logger.info(pprint.pformat(vars(args)))
logger.info(f'output dir is {output_dir}')
# 获取数据集
train_dataset, dev_dataset, test_dataset, vocab, vectors = get_dataset(args, logger)
vectors_dim = 300 if vectors is None else vectors.size(1)
# 创建迭代器
train_loader = torchtext.data.BucketIterator(train_dataset, args.batch_size, device=device, train=True,
shuffle=True, sort=False, repeat=False)
dev_loader = torchtext.data.BucketIterator(dev_dataset, args.batch_size, device=device, train=False,
shuffle=False, sort=False, repeat=False)
test_loader = torchtext.data.BucketIterator(test_dataset, args.batch_size, device=device, train=False,
shuffle=False, sort=False, repeat=False)
# 创建模型,优化器,损失函数
if args.arch == 'stack':
model = StackCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
kernel_sizes=args.stack_kernel_sizes, out_channels=args.stack_out_channels).to(device)
elif args.arch == 'multi':
model = MultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
kernel_sizes=args.multi_kernel_sizes, out_channels=args.multi_out_channels).to(device)
elif args.arch == 'stack_multi':
model = StackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
stack_kernel_sizes=args.stack_kernel_sizes, stack_out_channels=args.stack_out_channels,
multi_kernel_sizes=args.multi_kernel_sizes, multi_out_channels=args.multi_out_channels
).to(device)
elif args.arch == 'bigru':
assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
hidden_size = int(args.hidden_size)
model = BiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
dropout_r=args.dropout, embed_weight=vectors).to(device)
elif args.arch == 'bigru_cnn':
assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
hidden_size = int(args.hidden_size)
model = BiGRUCNN(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
cnn_channel=args.cnn_channel, dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'norm_stack_multi':
# model = NormStackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, sent_length=args.fix_length,
# embed_weight=vectors).to(device)
# elif args.arch == 'stack_multi_atten':
# model = QA_StackMultiAttentionCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors).to(
# device)
# elif args.arch == 'ap_stack_multi':
# model = QA_AP_StackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors).to(
# device)
# elif args.arch == 'bilstm':
# assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for LSTM model'
# hidden_size = int(args.hidden_size)
# model = BiLSTM(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'stack_bilstm':
# hidden_size = [int(i) for i in args.hidden_size.split(',')]
# model = StackBiLSTM(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# mlp_d=args.mlp_d, dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'bigru':
# assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
# hidden_size = int(args.hidden_size)
# model = BiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'stack_bigru':
# hidden_size = [int(i) for i in args.hidden_size.split(',')]
# model = StackBiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# mlp_d=args.mlp_d,
# sent_max_length=args.fix_length, dropout_r=args.dropout, embed_weight=vectors).to(device)
else:
raise ValueError("--arch is unknown")
# 为特定模型指定特殊的优化函数
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
elif args.optimizer == 'adagrad':
optimizer = torch.optim.Adagrad(model.parameters(), lr=args.lr)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
else:
raise ValueError("--optimizer is unknown")
loss_fn = torch.nn.MarginRankingLoss(margin=args.margin)
architecture = model.__class__.__name__
# 载入以训练的数据
if args.resume_snapshot:
state = torch.load(args.resume_snapshot)
model.load_state_dict(state['model'])
optimizer.load_state_dict(state['optimizer'])
epoch = state['epoch']
start_epoch = state['epoch'] + 1
if 'best_dev_score' in state:
# 适配旧版本保存的模型参数
dev_acc = state['best_dev_score']
test_acc = 0
else:
dev_acc = state['dev_accuracy']
test_acc = state['test_accuracy']
logger.info(f"load state {args.resume_snapshot}, dev accuracy {dev_acc}, test accuracy {test_acc}")
# 记录参数
with open(f'{output_dir}/arguments.csv', 'a') as f:
for k, v in vars(args).items():
f.write(f'{k},{v}\n')
# 将日志写入到TensorBoard中
writer = SummaryWriter(output_dir)
# 记录模型的计算图
try:
q = torch.randint_like(torch.Tensor(1, args.fix_length), 2, 100, dtype=torch.long)
ql = torch.Tensor([args.fix
|
random_line_split
|
|
train.py
|
:]
# target = torch.ones(bs, requires_grad=True).to(device)
loss = loss_fn(pos_sim, neg_sim, target)
total_loss.append(loss.item())
pbar.set_postfix(batch_loss=loss.item())
loss.backward()
optimizer.step()
return np.mean(total_loss)
def evaluate(date_loader, model, topk):
"""
在dev上进行测试
"""
model.eval()
pbar = tqdm(date_loader, desc=f'Evaluate')
# 记录预测结果,计算Top-1正确率
qids = []
predictions = []
true_labels = []
with torch.no_grad():
for batch in pbar:
qids.extend(batch.id.cpu().numpy())
true_labels.extend(batch.label.cpu().numpy())
output = model(batch.question, batch.answer)
predictions.extend(output.cpu().numpy())
if isinstance(topk, int):
accuracy = get_accuracy(qids, predictions, true_
|
cies = {}
for i in topk:
accuracy = get_accuracy(qids, predictions, true_labels, i)
accuracies[i] = accuracy
return accuracies
else:
raise ValueError('Error topk')
def run():
args = parse_args()
# 初始化随机数种子,以便于复现实验结果
start_epoch = 1
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
device = torch.device(f'cuda:{args.device}' if torch.cuda.is_available() and args.device >= 0 else 'cpu')
if torch.cuda.is_available() and args.device >= 0:
# 开启这个flag需要保证输入数据的维度不变,不然每次cudnn都要重新优化,反而更加耗时
# 现在RNN部分输入会进行fit length,CNN那里可以启用这个参数
if args.arch in ['stack', 'multi', 'stack_multi']:
torch.backends.cudnn.benchmark = True
# 输出目录
if args.resume_snapshot:
# 判断文件是否存在
assert os.path.exists(args.resume_snapshot), f'{args.resume_snapshot} don"t exist!'
model_dir, model_file = os.path.split(args.resume_snapshot)
output_dir, _ = os.path.split(model_dir)
else:
base_dir = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
output_dir = os.path.join(args.out_dir, base_dir)
model_dir = os.path.join(output_dir, 'save_model')
os.makedirs(output_dir) # 创建输出根目录
os.makedirs(model_dir)
# 输出参数
logger = get_logger(output_dir)
logger.info(pprint.pformat(vars(args)))
logger.info(f'output dir is {output_dir}')
# 获取数据集
train_dataset, dev_dataset, test_dataset, vocab, vectors = get_dataset(args, logger)
vectors_dim = 300 if vectors is None else vectors.size(1)
# 创建迭代器
train_loader = torchtext.data.BucketIterator(train_dataset, args.batch_size, device=device, train=True,
shuffle=True, sort=False, repeat=False)
dev_loader = torchtext.data.BucketIterator(dev_dataset, args.batch_size, device=device, train=False,
shuffle=False, sort=False, repeat=False)
test_loader = torchtext.data.BucketIterator(test_dataset, args.batch_size, device=device, train=False,
shuffle=False, sort=False, repeat=False)
# 创建模型,优化器,损失函数
if args.arch == 'stack':
model = StackCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
kernel_sizes=args.stack_kernel_sizes, out_channels=args.stack_out_channels).to(device)
elif args.arch == 'multi':
model = MultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
kernel_sizes=args.multi_kernel_sizes, out_channels=args.multi_out_channels).to(device)
elif args.arch == 'stack_multi':
model = StackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
stack_kernel_sizes=args.stack_kernel_sizes, stack_out_channels=args.stack_out_channels,
multi_kernel_sizes=args.multi_kernel_sizes, multi_out_channels=args.multi_out_channels
).to(device)
elif args.arch == 'bigru':
assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
hidden_size = int(args.hidden_size)
model = BiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
dropout_r=args.dropout, embed_weight=vectors).to(device)
elif args.arch == 'bigru_cnn':
assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
hidden_size = int(args.hidden_size)
model = BiGRUCNN(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
cnn_channel=args.cnn_channel, dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'norm_stack_multi':
# model = NormStackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, sent_length=args.fix_length,
# embed_weight=vectors).to(device)
# elif args.arch == 'stack_multi_atten':
# model = QA_StackMultiAttentionCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors).to(
# device)
# elif args.arch == 'ap_stack_multi':
# model = QA_AP_StackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors).to(
# device)
# elif args.arch == 'bilstm':
# assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for LSTM model'
# hidden_size = int(args.hidden_size)
# model = BiLSTM(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'stack_bilstm':
# hidden_size = [int(i) for i in args.hidden_size.split(',')]
# model = StackBiLSTM(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# mlp_d=args.mlp_d, dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'bigru':
# assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
# hidden_size = int(args.hidden_size)
# model = BiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'stack_bigru':
# hidden_size = [int(i) for i in args.hidden_size.split(',')]
# model = StackBiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# mlp_d=args.mlp_d,
# sent_max_length=args.fix_length, dropout_r=args.dropout, embed_weight=vectors).to(device)
else:
raise ValueError("--arch is unknown")
# 为特定模型指定特殊的优化函数
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
elif args.optimizer == 'adagrad':
optimizer = torch.optim.Adagrad(model.parameters(), lr=args.lr)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
else:
raise ValueError("--optimizer is unknown")
loss_fn = torch.nn.MarginRankingLoss(margin=args.margin)
architecture = model.__class__.__name__
# 载入以训练的数据
if args.resume_snapshot:
state = torch.load(args.resume_snapshot)
model.load_state_dict(state['model'])
optimizer.load_state_dict(state['optimizer'])
epoch = state['epoch']
start_epoch = state['epoch'] + 1
if 'best_dev_score' in state:
# 适配旧版本保存的模型参数
dev_acc = state['best_dev_score']
test_acc = 0
else:
dev_acc = state['dev_accuracy']
test_acc = state['test_accuracy']
logger.info(f"load state {args.resume_snapshot}, dev accuracy {dev_acc}, test accuracy {test_acc}")
# 记录参数
with open(f'{output_dir}/arguments.csv', 'a') as f:
for k, v in vars(args).items():
f.write(f'{k},{v}\n')
# 将日志写入到TensorBoard中
writer = SummaryWriter(output_dir)
# 记录模型的计算图
try:
q = torch.randint_like(torch.Tensor(1, args.fix_length), 2, 100, dtype=torch.long)
ql = torch.Tensor([args
|
labels, 1)
return accuracy
elif isinstance(topk, list):
accura
|
conditional_block
|
train.py
|
bs:]
# target = torch.ones(bs, requires_grad=True).to(device)
loss = loss_fn(pos_sim, neg_sim, target)
total_loss.append(loss.item())
pbar.set_postfix(batch_loss=loss.item())
loss.backward()
optimizer.step()
return np.mean(total_loss)
def evaluate(date_loader, model, topk):
"""
在dev上进行测试
"""
model.eval()
pbar = tqdm(date_loader, desc=f'Evaluate')
# 记录预测结果,计算Top-1正确率
qids = []
predictions = []
true_labels = []
with torch.no_grad():
for batch in pbar:
qids.extend(batch.id.cpu().numpy())
true_labels.extend(batch.label.cpu().numpy())
output = model(batch.question, batch.answer)
predictions.extend(output.cpu().numpy())
if isinstance(topk, int):
accuracy = get_accuracy(qids, predictions, true_labels, 1)
return accuracy
elif isinstance(topk, list):
accuracies = {}
for i in topk:
accuracy = get_accuracy(qids, predictions, true_labels, i)
accuracies[i] = accuracy
return accuracies
else:
raise ValueError('Error topk')
def run():
args = parse_args()
# 初始化随机数种子,以便于复现实验结果
|
output_dir = os.path.join(args.out_dir, base_dir)
model_dir = os.path.join(output_dir, 'save_model')
os.makedirs(output_dir) # 创建输出根目录
os.makedirs(model_dir)
# 输出参数
logger = get_logger(output_dir)
logger.info(pprint.pformat(vars(args)))
logger.info(f'output dir is {output_dir}')
# 获取数据集
train_dataset, dev_dataset, test_dataset, vocab, vectors = get_dataset(args, logger)
vectors_dim = 300 if vectors is None else vectors.size(1)
# 创建迭代器
train_loader = torchtext.data.BucketIterator(train_dataset, args.batch_size, device=device, train=True,
shuffle=True, sort=False, repeat=False)
dev_loader = torchtext.data.BucketIterator(dev_dataset, args.batch_size, device=device, train=False,
shuffle=False, sort=False, repeat=False)
test_loader = torchtext.data.BucketIterator(test_dataset, args.batch_size, device=device, train=False,
shuffle=False, sort=False, repeat=False)
# 创建模型,优化器,损失函数
if args.arch == 'stack':
model = StackCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
kernel_sizes=args.stack_kernel_sizes, out_channels=args.stack_out_channels).to(device)
elif args.arch == 'multi':
model = MultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
kernel_sizes=args.multi_kernel_sizes, out_channels=args.multi_out_channels).to(device)
elif args.arch == 'stack_multi':
model = StackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
stack_kernel_sizes=args.stack_kernel_sizes, stack_out_channels=args.stack_out_channels,
multi_kernel_sizes=args.multi_kernel_sizes, multi_out_channels=args.multi_out_channels
).to(device)
elif args.arch == 'bigru':
assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
hidden_size = int(args.hidden_size)
model = BiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
dropout_r=args.dropout, embed_weight=vectors).to(device)
elif args.arch == 'bigru_cnn':
assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
hidden_size = int(args.hidden_size)
model = BiGRUCNN(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
cnn_channel=args.cnn_channel, dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'norm_stack_multi':
# model = NormStackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, sent_length=args.fix_length,
# embed_weight=vectors).to(device)
# elif args.arch == 'stack_multi_atten':
# model = QA_StackMultiAttentionCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors).to(
# device)
# elif args.arch == 'ap_stack_multi':
# model = QA_AP_StackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors).to(
# device)
# elif args.arch == 'bilstm':
# assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for LSTM model'
# hidden_size = int(args.hidden_size)
# model = BiLSTM(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'stack_bilstm':
# hidden_size = [int(i) for i in args.hidden_size.split(',')]
# model = StackBiLSTM(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# mlp_d=args.mlp_d, dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'bigru':
# assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
# hidden_size = int(args.hidden_size)
# model = BiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'stack_bigru':
# hidden_size = [int(i) for i in args.hidden_size.split(',')]
# model = StackBiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# mlp_d=args.mlp_d,
# sent_max_length=args.fix_length, dropout_r=args.dropout, embed_weight=vectors).to(device)
else:
raise ValueError("--arch is unknown")
# 为特定模型指定特殊的优化函数
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
elif args.optimizer == 'adagrad':
optimizer = torch.optim.Adagrad(model.parameters(), lr=args.lr)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
else:
raise ValueError("--optimizer is unknown")
loss_fn = torch.nn.MarginRankingLoss(margin=args.margin)
architecture = model.__class__.__name__
# 载入以训练的数据
if args.resume_snapshot:
state = torch.load(args.resume_snapshot)
model.load_state_dict(state['model'])
optimizer.load_state_dict(state['optimizer'])
epoch = state['epoch']
start_epoch = state['epoch'] + 1
if 'best_dev_score' in state:
# 适配旧版本保存的模型参数
dev_acc = state['best_dev_score']
test_acc = 0
else:
dev_acc = state['dev_accuracy']
test_acc = state['test_accuracy']
logger.info(f"load state {args.resume_snapshot}, dev accuracy {dev_acc}, test accuracy {test_acc}")
# 记录参数
with open(f'{output_dir}/arguments.csv', 'a') as f:
for k, v in vars(args).items():
f.write(f'{k},{v}\n')
# 将日志写入到TensorBoard中
writer = SummaryWriter(output_dir)
# 记录模型的计算图
try:
q = torch.randint_like(torch.Tensor(1, args.fix_length), 2, 100, dtype=torch.long)
ql = torch.Tensor([args.fix
|
start_epoch = 1
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
device = torch.device(f'cuda:{args.device}' if torch.cuda.is_available() and args.device >= 0 else 'cpu')
if torch.cuda.is_available() and args.device >= 0:
# 开启这个flag需要保证输入数据的维度不变,不然每次cudnn都要重新优化,反而更加耗时
# 现在RNN部分输入会进行fit length,CNN那里可以启用这个参数
if args.arch in ['stack', 'multi', 'stack_multi']:
torch.backends.cudnn.benchmark = True
# 输出目录
if args.resume_snapshot:
# 判断文件是否存在
assert os.path.exists(args.resume_snapshot), f'{args.resume_snapshot} don"t exist!'
model_dir, model_file = os.path.split(args.resume_snapshot)
output_dir, _ = os.path.split(model_dir)
else:
base_dir = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
|
identifier_body
|
train.py
|
(epoch, data_loader, model, optimizer, loss_fn, device):
"""
进行一次迭代
"""
model.train()
pbar = tqdm(data_loader, desc='Train Epoch {}'.format(epoch))
total_loss = []
for batch_idx, batch in enumerate(pbar):
optimizer.zero_grad()
target = torch.ones(batch.batch_size, requires_grad=True).to(device)
pos_sim = model(batch.question, batch.pos_answer)
neg_sim = model(batch.question, batch.neg_answer)
# bs = batch.batch_size
# question = batch.question[0].repeat(2, 1)
# question_len = batch.question[1].repeat(2)
# answer = torch.cat([batch.pos_answer[0], batch.neg_answer[0]], dim=0)
# answer_len = torch.cat([batch.pos_answer[1], batch.neg_answer[1]], dim=0)
# sim = model((question,question_len), (answer,answer_len))
# pos_sim, neg_sim = sim[:bs], sim[bs:]
# target = torch.ones(bs, requires_grad=True).to(device)
loss = loss_fn(pos_sim, neg_sim, target)
total_loss.append(loss.item())
pbar.set_postfix(batch_loss=loss.item())
loss.backward()
optimizer.step()
return np.mean(total_loss)
def evaluate(date_loader, model, topk):
"""
在dev上进行测试
"""
model.eval()
pbar = tqdm(date_loader, desc=f'Evaluate')
# 记录预测结果,计算Top-1正确率
qids = []
predictions = []
true_labels = []
with torch.no_grad():
for batch in pbar:
qids.extend(batch.id.cpu().numpy())
true_labels.extend(batch.label.cpu().numpy())
output = model(batch.question, batch.answer)
predictions.extend(output.cpu().numpy())
if isinstance(topk, int):
accuracy = get_accuracy(qids, predictions, true_labels, 1)
return accuracy
elif isinstance(topk, list):
accuracies = {}
for i in topk:
accuracy = get_accuracy(qids, predictions, true_labels, i)
accuracies[i] = accuracy
return accuracies
else:
raise ValueError('Error topk')
def run():
args = parse_args()
# 初始化随机数种子,以便于复现实验结果
start_epoch = 1
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
device = torch.device(f'cuda:{args.device}' if torch.cuda.is_available() and args.device >= 0 else 'cpu')
if torch.cuda.is_available() and args.device >= 0:
# 开启这个flag需要保证输入数据的维度不变,不然每次cudnn都要重新优化,反而更加耗时
# 现在RNN部分输入会进行fit length,CNN那里可以启用这个参数
if args.arch in ['stack', 'multi', 'stack_multi']:
torch.backends.cudnn.benchmark = True
# 输出目录
if args.resume_snapshot:
# 判断文件是否存在
assert os.path.exists(args.resume_snapshot), f'{args.resume_snapshot} don"t exist!'
model_dir, model_file = os.path.split(args.resume_snapshot)
output_dir, _ = os.path.split(model_dir)
else:
base_dir = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
output_dir = os.path.join(args.out_dir, base_dir)
model_dir = os.path.join(output_dir, 'save_model')
os.makedirs(output_dir) # 创建输出根目录
os.makedirs(model_dir)
# 输出参数
logger = get_logger(output_dir)
logger.info(pprint.pformat(vars(args)))
logger.info(f'output dir is {output_dir}')
# 获取数据集
train_dataset, dev_dataset, test_dataset, vocab, vectors = get_dataset(args, logger)
vectors_dim = 300 if vectors is None else vectors.size(1)
# 创建迭代器
train_loader = torchtext.data.BucketIterator(train_dataset, args.batch_size, device=device, train=True,
shuffle=True, sort=False, repeat=False)
dev_loader = torchtext.data.BucketIterator(dev_dataset, args.batch_size, device=device, train=False,
shuffle=False, sort=False, repeat=False)
test_loader = torchtext.data.BucketIterator(test_dataset, args.batch_size, device=device, train=False,
shuffle=False, sort=False, repeat=False)
# 创建模型,优化器,损失函数
if args.arch == 'stack':
model = StackCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
kernel_sizes=args.stack_kernel_sizes, out_channels=args.stack_out_channels).to(device)
elif args.arch == 'multi':
model = MultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
kernel_sizes=args.multi_kernel_sizes, out_channels=args.multi_out_channels).to(device)
elif args.arch == 'stack_multi':
model = StackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors,
stack_kernel_sizes=args.stack_kernel_sizes, stack_out_channels=args.stack_out_channels,
multi_kernel_sizes=args.multi_kernel_sizes, multi_out_channels=args.multi_out_channels
).to(device)
elif args.arch == 'bigru':
assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
hidden_size = int(args.hidden_size)
model = BiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
dropout_r=args.dropout, embed_weight=vectors).to(device)
elif args.arch == 'bigru_cnn':
assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
hidden_size = int(args.hidden_size)
model = BiGRUCNN(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
cnn_channel=args.cnn_channel, dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'norm_stack_multi':
# model = NormStackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, sent_length=args.fix_length,
# embed_weight=vectors).to(device)
# elif args.arch == 'stack_multi_atten':
# model = QA_StackMultiAttentionCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors).to(
# device)
# elif args.arch == 'ap_stack_multi':
# model = QA_AP_StackMultiCNN(vocab_size=len(vocab), embed_dim=vectors_dim, embed_weight=vectors).to(
# device)
# elif args.arch == 'bilstm':
# assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for LSTM model'
# hidden_size = int(args.hidden_size)
# model = BiLSTM(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'stack_bilstm':
# hidden_size = [int(i) for i in args.hidden_size.split(',')]
# model = StackBiLSTM(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# mlp_d=args.mlp_d, dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'bigru':
# assert args.hidden_size.find(',') == -1, '--hidden-size must be a int for BiLSTM/BiGRU model'
# hidden_size = int(args.hidden_size)
# model = BiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# dropout_r=args.dropout, embed_weight=vectors).to(device)
# elif args.arch == 'stack_bigru':
# hidden_size = [int(i) for i in args.hidden_size.split(',')]
# model = StackBiGRU(vocab_size=len(vocab), embedding_dim=vectors_dim, hidden_size=hidden_size,
# mlp_d=args.mlp_d,
# sent_max_length=args.fix_length, dropout_r=args.dropout, embed_weight=vectors).to(device)
else:
raise ValueError("--arch is unknown")
# 为特定模型指定特殊的优化函数
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
elif args.optimizer == 'adagrad':
optimizer = torch.optim.Adagrad(model.parameters(), lr=args.lr)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
else:
raise ValueError("--optimizer is unknown")
loss_fn = torch.nn.MarginRankingLoss(margin=args.margin)
architecture = model.__class__.__name__
# 载入以训练的数据
if args.resume_snapshot:
state = torch.load(args.resume_snapshot)
model.load_state_dict(state['model'])
optimizer.load
|
train_epoch
|
identifier_name
|
|
util.rs
|
match file.write_all(data) {
Ok(()) => Ok(data.len()),
Err(_) => Ok(0), // signals to cURL that the writing failed
}
})?;
transfer.perform()?;
Ok(())
}
////////////////////////////////////////////////////////////////////////////////
// PathExt trait
////////////////////////////////////////////////////////////////////////////////
/// An extension trait for [`Path`] types.
///
/// [`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
pub trait PathExt {
fn metadata_modified(&self) -> Option<time::SystemTime>;
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>;
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for Path {
/// Returns the modified time of the file if available.
fn metadata_modified(&self) -> Option<time::SystemTime> {
fs::metadata(&self).and_then(|m| m.modified()).ok()
}
/// Returns whether the file at this path is newer than the file at the
/// given one. If either file does not exist, this method returns `false`.
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>,
{
match (self.metadata_modified(), other.as_ref().metadata_modified()) {
(Some(self_time), Some(other_time)) => self_time > other_time,
_ => false,
}
}
/// Expands the tilde in the path with the given home directory.
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix("~") {
home.as_ref().join(path)
} else {
self.to_path_buf()
}
}
/// Replaces the home directory in the path with a tilde.
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix(home) {
Self::new("~").join(path)
} else {
self.to_path_buf()
}
}
}
////////////////////////////////////////////////////////////////////////////////
// TempPath type
////////////////////////////////////////////////////////////////////////////////
/// Holds a temporary directory or file path that is removed when dropped.
pub struct TempPath {
/// The temporary directory or file path.
path: Option<PathBuf>,
}
impl TempPath {
/// Create a new `TempPath` based on an original path, the temporary
/// filename will be placed in the same directory with a deterministic name.
///
/// # Errors
///
/// If the temporary path already exists.
pub fn new(original_path: &Path) -> result::Result<Self, Self> {
let mut path = original_path.parent().unwrap().to_path_buf();
let mut file_name = ffi::OsString::from("~");
file_name.push(original_path.file_name().unwrap());
path.push(file_name);
let temp = Self { path: Some(path) };
if temp.path().exists() {
Err(temp)
} else {
Ok(temp)
}
}
/// Create a new `TempPath` based on an original path, if something exists
/// at that temporary path is will be deleted.
pub fn new_force(original_path: &Path) -> Result<Self> {
match Self::new(original_path) {
Ok(temp) => Ok(temp),
Err(temp) => {
nuke_path(temp.path())?;
Ok(temp)
}
}
}
/// Access the underlying `Path`.
pub fn path(&self) -> &Path {
self.path.as_ref().unwrap()
}
/// Move the temporary path to a new location.
pub fn rename(mut self, new_path: &Path) -> io::Result<()> {
if let Err(err) = nuke_path(new_path) {
if err.kind() != io::ErrorKind::NotFound {
return Err(err);
}
};
if let Some(path) = &self.path {
fs::rename(path, new_path)?;
// This is so that the Drop impl doesn't try delete a non-existent file.
self.path = None;
}
Ok(())
}
}
impl Drop for TempPath {
fn drop(&mut self) {
if let Some(path) = &self.path {
nuke_path(&path).ok();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Mutex type
////////////////////////////////////////////////////////////////////////////////
#[derive(Debug)]
pub struct Mutex(File);
impl Mutex {
/// Create a new `Mutex` at the given path and attempt to acquire it.
pub fn acquire(ctx: &Context, path: &Path) -> Result<Self> {
let file = fs::OpenOptions::new()
.read(true)
.open(path)
.with_context(s!("failed to open `{}`", path.display()))?;
if let Err(e) = file.try_lock_exclusive() {
let msg = s!("failed to acquire file lock `{}`", path.display());
if e.raw_os_error() == lock_contended_error().raw_os_error() {
warning!(
ctx,
"Blocking",
&format!(
"waiting for file lock on {}",
ctx.replace_home(path).display()
)
);
file.lock_exclusive().with_context(msg)?;
} else {
return Err(e).with_context(msg);
}
}
Ok(Self(file))
}
}
impl Drop for Mutex {
fn drop(&mut self) {
self.0.unlock().ok();
}
}
////////////////////////////////////////////////////////////////////////////////
// Git module
////////////////////////////////////////////////////////////////////////////////
pub mod git {
use std::path::Path;
use git2::{
BranchType, Cred, CredentialType, Error, FetchOptions, Oid, RemoteCallbacks, Repository,
ResetType,
};
use once_cell::sync::Lazy;
use url::Url;
use anyhow::Context as ResultExt;
/// Call a function with generated fetch options.
fn with_fetch_options<T, F>(f: F) -> anyhow::Result<T>
where
F: FnOnce(FetchOptions<'_>) -> anyhow::Result<T>,
{
let mut rcb = RemoteCallbacks::new();
rcb.credentials(|_, username, allowed| {
if allowed.contains(CredentialType::SSH_KEY) {
if let Some(username) = username {
return Cred::ssh_key_from_agent(username);
}
}
if allowed.contains(CredentialType::DEFAULT) {
return Cred::default();
}
Err(Error::from_str(
"remote authentication required but none available",
))
});
let mut opts = FetchOptions::new();
opts.remote_callbacks(rcb);
f(opts)
}
/// Open a Git repository.
pub fn open(dir: &Path) -> anyhow::Result<Repository> {
let repo = Repository::open(dir)
.with_context(s!("failed to open repository at `{}`", dir.display()))?;
Ok(repo)
}
static DEFAULT_REFSPECS: Lazy<Vec<String>> = Lazy::new(|| {
vec_into![
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD"
]
});
/// Clone a Git repository.
pub fn clone(url: &Url, dir: &Path) -> anyhow::Result<Repository> {
with_fetch_options(|mut opts| {
let repo = Repository::init(dir)?;
repo.remote("origin", url.as_str())?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(repo)
})
.with_context(s!("failed to git clone `{}`", url))
}
/// Fetch a Git repository.
pub fn fetch(repo: &Repository) -> anyhow::Result<()> {
with_fetch_options(|mut opts| {
repo.find_remote("origin")
.context("failed to find remote `origin`")?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(())
})
.context("failed to git fetch")
}
/// Checkout at repository at a particular revision.
pub fn checkout(repo: &Repository, oid: Oid) -> anyhow::Result<()> {
let obj = repo
.find_object(oid, None)
.with_context(s!("failed to find `{}`", oid))?;
repo.reset(&obj, ResetType::Hard, None)
.with_context(s!("failed to set HEAD to `{}`", oid))?;
repo.checkout_tree(&obj, None)
.with_context(s!("failed to checkout `{}`", oid))
|
fn _submodule_update(repo: &Repository, todo: &mut Vec<Repository>) -> Result<(), Error> {
for mut submodule in repo.submodules()? {
submodule.update(true, None)?;
todo.push(submodule.open()?);
}
Ok(())
}
let mut repos = Vec::new();
_submodule_update(&repo, &mut repos)?;
while let Some(repo) = repos.pop() {
_submodule_update(&repo, &mut repos)?;
}
Ok(())
}
fn resolve_refname(repo: &Repository, refname: &str) -> Result<Oid, Error> {
let ref_id = repo.refname_to_id(refname)?;
let obj = repo.find_object(ref_id, None)?;
let obj =
|
}
/// Recursively update Git submodules.
pub fn submodule_update(repo: &Repository) -> Result<(), Error> {
|
random_line_split
|
util.rs
|
match file.write_all(data) {
Ok(()) => Ok(data.len()),
Err(_) => Ok(0), // signals to cURL that the writing failed
}
})?;
transfer.perform()?;
Ok(())
}
////////////////////////////////////////////////////////////////////////////////
// PathExt trait
////////////////////////////////////////////////////////////////////////////////
/// An extension trait for [`Path`] types.
///
/// [`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
pub trait PathExt {
fn metadata_modified(&self) -> Option<time::SystemTime>;
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>;
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for Path {
/// Returns the modified time of the file if available.
fn metadata_modified(&self) -> Option<time::SystemTime> {
fs::metadata(&self).and_then(|m| m.modified()).ok()
}
/// Returns whether the file at this path is newer than the file at the
/// given one. If either file does not exist, this method returns `false`.
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>,
{
match (self.metadata_modified(), other.as_ref().metadata_modified()) {
(Some(self_time), Some(other_time)) => self_time > other_time,
_ => false,
}
}
/// Expands the tilde in the path with the given home directory.
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix("~") {
home.as_ref().join(path)
} else {
self.to_path_buf()
}
}
/// Replaces the home directory in the path with a tilde.
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix(home) {
Self::new("~").join(path)
} else {
self.to_path_buf()
}
}
}
////////////////////////////////////////////////////////////////////////////////
// TempPath type
////////////////////////////////////////////////////////////////////////////////
/// Holds a temporary directory or file path that is removed when dropped.
pub struct
|
{
/// The temporary directory or file path.
path: Option<PathBuf>,
}
impl TempPath {
/// Create a new `TempPath` based on an original path, the temporary
/// filename will be placed in the same directory with a deterministic name.
///
/// # Errors
///
/// If the temporary path already exists.
pub fn new(original_path: &Path) -> result::Result<Self, Self> {
let mut path = original_path.parent().unwrap().to_path_buf();
let mut file_name = ffi::OsString::from("~");
file_name.push(original_path.file_name().unwrap());
path.push(file_name);
let temp = Self { path: Some(path) };
if temp.path().exists() {
Err(temp)
} else {
Ok(temp)
}
}
/// Create a new `TempPath` based on an original path, if something exists
/// at that temporary path is will be deleted.
pub fn new_force(original_path: &Path) -> Result<Self> {
match Self::new(original_path) {
Ok(temp) => Ok(temp),
Err(temp) => {
nuke_path(temp.path())?;
Ok(temp)
}
}
}
/// Access the underlying `Path`.
pub fn path(&self) -> &Path {
self.path.as_ref().unwrap()
}
/// Move the temporary path to a new location.
pub fn rename(mut self, new_path: &Path) -> io::Result<()> {
if let Err(err) = nuke_path(new_path) {
if err.kind() != io::ErrorKind::NotFound {
return Err(err);
}
};
if let Some(path) = &self.path {
fs::rename(path, new_path)?;
// This is so that the Drop impl doesn't try delete a non-existent file.
self.path = None;
}
Ok(())
}
}
impl Drop for TempPath {
fn drop(&mut self) {
if let Some(path) = &self.path {
nuke_path(&path).ok();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Mutex type
////////////////////////////////////////////////////////////////////////////////
#[derive(Debug)]
pub struct Mutex(File);
impl Mutex {
/// Create a new `Mutex` at the given path and attempt to acquire it.
pub fn acquire(ctx: &Context, path: &Path) -> Result<Self> {
let file = fs::OpenOptions::new()
.read(true)
.open(path)
.with_context(s!("failed to open `{}`", path.display()))?;
if let Err(e) = file.try_lock_exclusive() {
let msg = s!("failed to acquire file lock `{}`", path.display());
if e.raw_os_error() == lock_contended_error().raw_os_error() {
warning!(
ctx,
"Blocking",
&format!(
"waiting for file lock on {}",
ctx.replace_home(path).display()
)
);
file.lock_exclusive().with_context(msg)?;
} else {
return Err(e).with_context(msg);
}
}
Ok(Self(file))
}
}
impl Drop for Mutex {
fn drop(&mut self) {
self.0.unlock().ok();
}
}
////////////////////////////////////////////////////////////////////////////////
// Git module
////////////////////////////////////////////////////////////////////////////////
pub mod git {
use std::path::Path;
use git2::{
BranchType, Cred, CredentialType, Error, FetchOptions, Oid, RemoteCallbacks, Repository,
ResetType,
};
use once_cell::sync::Lazy;
use url::Url;
use anyhow::Context as ResultExt;
/// Call a function with generated fetch options.
fn with_fetch_options<T, F>(f: F) -> anyhow::Result<T>
where
F: FnOnce(FetchOptions<'_>) -> anyhow::Result<T>,
{
let mut rcb = RemoteCallbacks::new();
rcb.credentials(|_, username, allowed| {
if allowed.contains(CredentialType::SSH_KEY) {
if let Some(username) = username {
return Cred::ssh_key_from_agent(username);
}
}
if allowed.contains(CredentialType::DEFAULT) {
return Cred::default();
}
Err(Error::from_str(
"remote authentication required but none available",
))
});
let mut opts = FetchOptions::new();
opts.remote_callbacks(rcb);
f(opts)
}
/// Open a Git repository.
pub fn open(dir: &Path) -> anyhow::Result<Repository> {
let repo = Repository::open(dir)
.with_context(s!("failed to open repository at `{}`", dir.display()))?;
Ok(repo)
}
static DEFAULT_REFSPECS: Lazy<Vec<String>> = Lazy::new(|| {
vec_into![
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD"
]
});
/// Clone a Git repository.
pub fn clone(url: &Url, dir: &Path) -> anyhow::Result<Repository> {
with_fetch_options(|mut opts| {
let repo = Repository::init(dir)?;
repo.remote("origin", url.as_str())?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(repo)
})
.with_context(s!("failed to git clone `{}`", url))
}
/// Fetch a Git repository.
pub fn fetch(repo: &Repository) -> anyhow::Result<()> {
with_fetch_options(|mut opts| {
repo.find_remote("origin")
.context("failed to find remote `origin`")?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(())
})
.context("failed to git fetch")
}
/// Checkout at repository at a particular revision.
pub fn checkout(repo: &Repository, oid: Oid) -> anyhow::Result<()> {
let obj = repo
.find_object(oid, None)
.with_context(s!("failed to find `{}`", oid))?;
repo.reset(&obj, ResetType::Hard, None)
.with_context(s!("failed to set HEAD to `{}`", oid))?;
repo.checkout_tree(&obj, None)
.with_context(s!("failed to checkout `{}`", oid))
}
/// Recursively update Git submodules.
pub fn submodule_update(repo: &Repository) -> Result<(), Error> {
fn _submodule_update(repo: &Repository, todo: &mut Vec<Repository>) -> Result<(), Error> {
for mut submodule in repo.submodules()? {
submodule.update(true, None)?;
todo.push(submodule.open()?);
}
Ok(())
}
let mut repos = Vec::new();
_submodule_update(&repo, &mut repos)?;
while let Some(repo) = repos.pop() {
_submodule_update(&repo, &mut repos)?;
}
Ok(())
}
fn resolve_refname(repo: &Repository, refname: &str) -> Result<Oid, Error> {
let ref_id = repo.refname_to_id(refname)?;
let obj = repo.find_object(ref_id, None)?;
let obj =
|
TempPath
|
identifier_name
|
util.rs
|
match file.write_all(data) {
Ok(()) => Ok(data.len()),
Err(_) => Ok(0), // signals to cURL that the writing failed
}
})?;
transfer.perform()?;
Ok(())
}
////////////////////////////////////////////////////////////////////////////////
// PathExt trait
////////////////////////////////////////////////////////////////////////////////
/// An extension trait for [`Path`] types.
///
/// [`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
pub trait PathExt {
fn metadata_modified(&self) -> Option<time::SystemTime>;
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>;
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for Path {
/// Returns the modified time of the file if available.
fn metadata_modified(&self) -> Option<time::SystemTime> {
fs::metadata(&self).and_then(|m| m.modified()).ok()
}
/// Returns whether the file at this path is newer than the file at the
/// given one. If either file does not exist, this method returns `false`.
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>,
{
match (self.metadata_modified(), other.as_ref().metadata_modified()) {
(Some(self_time), Some(other_time)) => self_time > other_time,
_ => false,
}
}
/// Expands the tilde in the path with the given home directory.
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix("~") {
home.as_ref().join(path)
} else {
self.to_path_buf()
}
}
/// Replaces the home directory in the path with a tilde.
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix(home) {
Self::new("~").join(path)
} else {
self.to_path_buf()
}
}
}
////////////////////////////////////////////////////////////////////////////////
// TempPath type
////////////////////////////////////////////////////////////////////////////////
/// Holds a temporary directory or file path that is removed when dropped.
pub struct TempPath {
/// The temporary directory or file path.
path: Option<PathBuf>,
}
impl TempPath {
/// Create a new `TempPath` based on an original path, the temporary
/// filename will be placed in the same directory with a deterministic name.
///
/// # Errors
///
/// If the temporary path already exists.
pub fn new(original_path: &Path) -> result::Result<Self, Self> {
let mut path = original_path.parent().unwrap().to_path_buf();
let mut file_name = ffi::OsString::from("~");
file_name.push(original_path.file_name().unwrap());
path.push(file_name);
let temp = Self { path: Some(path) };
if temp.path().exists() {
Err(temp)
} else {
Ok(temp)
}
}
/// Create a new `TempPath` based on an original path, if something exists
/// at that temporary path is will be deleted.
pub fn new_force(original_path: &Path) -> Result<Self> {
match Self::new(original_path) {
Ok(temp) => Ok(temp),
Err(temp) => {
nuke_path(temp.path())?;
Ok(temp)
}
}
}
/// Access the underlying `Path`.
pub fn path(&self) -> &Path {
self.path.as_ref().unwrap()
}
/// Move the temporary path to a new location.
pub fn rename(mut self, new_path: &Path) -> io::Result<()> {
if let Err(err) = nuke_path(new_path) {
if err.kind() != io::ErrorKind::NotFound {
return Err(err);
}
};
if let Some(path) = &self.path {
fs::rename(path, new_path)?;
// This is so that the Drop impl doesn't try delete a non-existent file.
self.path = None;
}
Ok(())
}
}
impl Drop for TempPath {
fn drop(&mut self) {
if let Some(path) = &self.path {
nuke_path(&path).ok();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Mutex type
////////////////////////////////////////////////////////////////////////////////
#[derive(Debug)]
pub struct Mutex(File);
impl Mutex {
/// Create a new `Mutex` at the given path and attempt to acquire it.
pub fn acquire(ctx: &Context, path: &Path) -> Result<Self> {
let file = fs::OpenOptions::new()
.read(true)
.open(path)
.with_context(s!("failed to open `{}`", path.display()))?;
if let Err(e) = file.try_lock_exclusive() {
let msg = s!("failed to acquire file lock `{}`", path.display());
if e.raw_os_error() == lock_contended_error().raw_os_error() {
warning!(
ctx,
"Blocking",
&format!(
"waiting for file lock on {}",
ctx.replace_home(path).display()
)
);
file.lock_exclusive().with_context(msg)?;
} else {
return Err(e).with_context(msg);
}
}
Ok(Self(file))
}
}
impl Drop for Mutex {
fn drop(&mut self) {
self.0.unlock().ok();
}
}
////////////////////////////////////////////////////////////////////////////////
// Git module
////////////////////////////////////////////////////////////////////////////////
pub mod git {
use std::path::Path;
use git2::{
BranchType, Cred, CredentialType, Error, FetchOptions, Oid, RemoteCallbacks, Repository,
ResetType,
};
use once_cell::sync::Lazy;
use url::Url;
use anyhow::Context as ResultExt;
/// Call a function with generated fetch options.
fn with_fetch_options<T, F>(f: F) -> anyhow::Result<T>
where
F: FnOnce(FetchOptions<'_>) -> anyhow::Result<T>,
{
let mut rcb = RemoteCallbacks::new();
rcb.credentials(|_, username, allowed| {
if allowed.contains(CredentialType::SSH_KEY) {
if let Some(username) = username {
return Cred::ssh_key_from_agent(username);
}
}
if allowed.contains(CredentialType::DEFAULT) {
return Cred::default();
}
Err(Error::from_str(
"remote authentication required but none available",
))
});
let mut opts = FetchOptions::new();
opts.remote_callbacks(rcb);
f(opts)
}
/// Open a Git repository.
pub fn open(dir: &Path) -> anyhow::Result<Repository> {
let repo = Repository::open(dir)
.with_context(s!("failed to open repository at `{}`", dir.display()))?;
Ok(repo)
}
static DEFAULT_REFSPECS: Lazy<Vec<String>> = Lazy::new(|| {
vec_into![
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD"
]
});
/// Clone a Git repository.
pub fn clone(url: &Url, dir: &Path) -> anyhow::Result<Repository>
|
/// Fetch a Git repository.
pub fn fetch(repo: &Repository) -> anyhow::Result<()> {
with_fetch_options(|mut opts| {
repo.find_remote("origin")
.context("failed to find remote `origin`")?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(())
})
.context("failed to git fetch")
}
/// Checkout at repository at a particular revision.
pub fn checkout(repo: &Repository, oid: Oid) -> anyhow::Result<()> {
let obj = repo
.find_object(oid, None)
.with_context(s!("failed to find `{}`", oid))?;
repo.reset(&obj, ResetType::Hard, None)
.with_context(s!("failed to set HEAD to `{}`", oid))?;
repo.checkout_tree(&obj, None)
.with_context(s!("failed to checkout `{}`", oid))
}
/// Recursively update Git submodules.
pub fn submodule_update(repo: &Repository) -> Result<(), Error> {
fn _submodule_update(repo: &Repository, todo: &mut Vec<Repository>) -> Result<(), Error> {
for mut submodule in repo.submodules()? {
submodule.update(true, None)?;
todo.push(submodule.open()?);
}
Ok(())
}
let mut repos = Vec::new();
_submodule_update(&repo, &mut repos)?;
while let Some(repo) = repos.pop() {
_submodule_update(&repo, &mut repos)?;
}
Ok(())
}
fn resolve_refname(repo: &Repository, refname: &str) -> Result<Oid, Error> {
let ref_id = repo.refname_to_id(refname)?;
let obj = repo.find_object(ref_id, None)?;
let obj
|
{
with_fetch_options(|mut opts| {
let repo = Repository::init(dir)?;
repo.remote("origin", url.as_str())?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(repo)
})
.with_context(s!("failed to git clone `{}`", url))
}
|
identifier_body
|
Main.py
|
.players + 1):
link_entrances(world, player)
mark_light_world_regions(world)
else:
for player in range(1, world.players + 1):
link_inverted_entrances(world, player)
mark_dark_world_regions(world)
logger.info('Generating Item Pool.')
for player in range(1, world.players + 1):
generate_itempool(world, player)
logger.info('Calculating Access Rules.')
for player in range(1, world.players + 1):
set_rules(world, player)
logger.info('Placing Dungeon Prizes.')
fill_prizes(world)
logger.info('Placing Dungeon Items.')
shuffled_locations = None
if args.algorithm in ['balanced', 'vt26'] or args.keysanity:
shuffled_locations = world.get_unfilled_locations()
random.shuffle(shuffled_locations)
fill_dungeons_restrictive(world, shuffled_locations)
else:
fill_dungeons(world)
logger.info('Fill the world.')
if args.algorithm == 'flood':
flood_items(world) # different algo, biased towards early game progress items
elif args.algorithm == 'vt21':
distribute_items_cutoff(world, 1)
elif args.algorithm == 'vt22':
distribute_items_cutoff(world, 0.66)
elif args.algorithm == 'freshness':
|
elif args.algorithm == 'vt25':
distribute_items_restrictive(world, 0)
elif args.algorithm == 'vt26':
distribute_items_restrictive(world, gt_filler(world), shuffled_locations)
elif args.algorithm == 'balanced':
distribute_items_restrictive(world, gt_filler(world))
if world.players > 1:
logger.info('Balancing multiworld progression.')
balance_multiworld_progression(world)
logger.info('Patching ROM.')
if args.sprite is not None:
if isinstance(args.sprite, Sprite):
sprite = args.sprite
else:
sprite = Sprite(args.sprite)
else:
sprite = None
outfilebase = 'ER_%s_%s-%s-%s-%s%s_%s-%s%s%s%s%s_%s' % (world.logic, world.difficulty, world.difficulty_adjustments, world.mode, world.goal, "" if world.timer in ['none', 'display'] else "-" + world.timer, world.shuffle, world.algorithm, "-keysanity" if world.keysanity else "", "-retro" if world.retro else "", "-prog_" + world.progressive if world.progressive in ['off', 'random'] else "", "-nohints" if not world.hints else "", world.seed)
use_enemizer = args.enemizercli and (args.shufflebosses != 'none' or args.shuffleenemies or args.enemy_health != 'default' or args.enemy_health != 'default' or args.enemy_damage or args.shufflepalette or args.shufflepots)
jsonout = {}
if not args.suppress_rom:
if world.players > 1:
raise NotImplementedError("Multiworld rom writes have not been implemented")
else:
player = 1
local_rom = None
if args.jsonout:
rom = JsonRom()
else:
if use_enemizer:
local_rom = LocalRom(args.rom)
rom = JsonRom()
else:
rom = LocalRom(args.rom)
patch_rom(world, player, rom)
enemizer_patch = []
if use_enemizer:
enemizer_patch = get_enemizer_patch(world, player, rom, args.rom, args.enemizercli, args.shuffleenemies, args.enemy_health, args.enemy_damage, args.shufflepalette, args.shufflepots)
if args.jsonout:
jsonout['patch'] = rom.patches
if use_enemizer:
jsonout['enemizer' % player] = enemizer_patch
else:
if use_enemizer:
local_rom.patch_enemizer(rom.patches, os.path.join(os.path.dirname(args.enemizercli), "enemizerBasePatch.json"), enemizer_patch)
rom = local_rom
apply_rom_settings(rom, args.heartbeep, args.heartcolor, world.quickswap, world.fastmenu, world.disable_music, sprite)
rom.write_to_file(output_path('%s.sfc' % outfilebase))
if args.create_spoiler and not args.jsonout:
world.spoiler.to_file(output_path('%s_Spoiler.txt' % outfilebase))
if not args.skip_playthrough:
logger.info('Calculating playthrough.')
create_playthrough(world)
if args.jsonout:
print(json.dumps({**jsonout, 'spoiler': world.spoiler.to_json()}))
elif args.create_spoiler and not args.skip_playthrough:
world.spoiler.to_file(output_path('%s_Spoiler.txt' % outfilebase))
logger.info('Done. Enjoy.')
logger.debug('Total Time: %s', time.perf_counter() - start)
return world
def gt_filler(world):
if world.goal == 'triforcehunt':
return random.randint(15, 50)
return random.randint(0, 15)
def copy_world(world):
# ToDo: Not good yet
ret = World(world.players, world.shuffle, world.logic, world.mode, world.swords, world.difficulty, world.difficulty_adjustments, world.timer, world.progressive, world.goal, world.algorithm, world.place_dungeon_items, world.accessibility, world.shuffle_ganon, world.quickswap, world.fastmenu, world.disable_music, world.keysanity, world.retro, world.custom, world.customitemarray, world.boss_shuffle, world.hints)
ret.required_medallions = world.required_medallions.copy()
ret.swamp_patch_required = world.swamp_patch_required.copy()
ret.ganon_at_pyramid = world.ganon_at_pyramid.copy()
ret.powder_patch_required = world.powder_patch_required.copy()
ret.ganonstower_vanilla = world.ganonstower_vanilla.copy()
ret.treasure_hunt_count = world.treasure_hunt_count
ret.treasure_hunt_icon = world.treasure_hunt_icon
ret.sewer_light_cone = world.sewer_light_cone
ret.light_world_light_cone = world.light_world_light_cone
ret.dark_world_light_cone = world.dark_world_light_cone
ret.seed = world.seed
ret.can_access_trock_eyebridge = world.can_access_trock_eyebridge
ret.can_access_trock_front = world.can_access_trock_front
ret.can_access_trock_big_chest = world.can_access_trock_big_chest
ret.can_access_trock_middle = world.can_access_trock_middle
ret.can_take_damage = world.can_take_damage
ret.difficulty_requirements = world.difficulty_requirements
ret.fix_fake_world = world.fix_fake_world
ret.lamps_needed_for_dark_rooms = world.lamps_needed_for_dark_rooms
ret.crystals_needed_for_ganon = world.crystals_needed_for_ganon
ret.crystals_needed_for_gt = world.crystals_needed_for_gt
if world.mode != 'inverted':
for player in range(1, world.players + 1):
create_regions(ret, player)
create_dungeons(ret, player)
else:
for player in range(1, world.players + 1):
create_inverted_regions(ret, player)
create_dungeons(ret, player)
copy_dynamic_regions_and_locations(world, ret)
# copy bosses
for dungeon in world.dungeons:
for level, boss in dungeon.bosses.items():
ret.get_dungeon(dungeon.name, dungeon.player).bosses[level] = boss
for shop in world.shops:
copied_shop = ret.get_region(shop.region.name, shop.region.player).shop
copied_shop.active = shop.active
copied_shop.inventory = copy.copy(shop.inventory)
# connect copied world
for region in world.regions:
copied_region = ret.get_region(region.name, region.player)
copied_region.is_light_world = region.is_light_world
copied_region.is_dark_world = region.is_dark_world
for entrance in region.entrances:
ret.get_entrance(entrance.name, entrance.player).connect(copied_region)
# fill locations
for location in world.get_locations():
if location.item is not None:
item = Item(location.item.name, location.item.advancement, location.item.priority, location.item.type, player = location.item.player)
ret.get_location(location.name, location.player).item = item
item.location = ret.get_location(location.name, location.player)
if location.event:
ret.get_location(location.name, location.player).event = True
if location.locked:
ret.get_location(location.name, location.player).locked = True
# copy remaining itempool. No item in itempool should have an assigned location
for item in world.itempool:
ret.itempool.append(Item(item.name, item.advancement, item.priority, item.type, player = item.player))
# copy progress items in state
ret.state.prog_items = world.state.prog_items.copy()
ret.precollected_items = world.precollected_items.copy()
ret.state.stale = {player: True for player in range(1, world.players + 1)}
for player in range(1, world.players + 1):
set_rules(ret, player)
return ret
def copy_dynamic_regions_and_locations(world, ret):
for
|
distribute_items_staleness(world)
|
conditional_block
|
Main.py
|
# ToDo: Not good yet
ret = World(world.players, world.shuffle, world.logic, world.mode, world.swords, world.difficulty, world.difficulty_adjustments, world.timer, world.progressive, world.goal, world.algorithm, world.place_dungeon_items, world.accessibility, world.shuffle_ganon, world.quickswap, world.fastmenu, world.disable_music, world.keysanity, world.retro, world.custom, world.customitemarray, world.boss_shuffle, world.hints)
ret.required_medallions = world.required_medallions.copy()
ret.swamp_patch_required = world.swamp_patch_required.copy()
ret.ganon_at_pyramid = world.ganon_at_pyramid.copy()
ret.powder_patch_required = world.powder_patch_required.copy()
ret.ganonstower_vanilla = world.ganonstower_vanilla.copy()
ret.treasure_hunt_count = world.treasure_hunt_count
ret.treasure_hunt_icon = world.treasure_hunt_icon
ret.sewer_light_cone = world.sewer_light_cone
ret.light_world_light_cone = world.light_world_light_cone
ret.dark_world_light_cone = world.dark_world_light_cone
ret.seed = world.seed
ret.can_access_trock_eyebridge = world.can_access_trock_eyebridge
ret.can_access_trock_front = world.can_access_trock_front
ret.can_access_trock_big_chest = world.can_access_trock_big_chest
ret.can_access_trock_middle = world.can_access_trock_middle
ret.can_take_damage = world.can_take_damage
ret.difficulty_requirements = world.difficulty_requirements
ret.fix_fake_world = world.fix_fake_world
ret.lamps_needed_for_dark_rooms = world.lamps_needed_for_dark_rooms
ret.crystals_needed_for_ganon = world.crystals_needed_for_ganon
ret.crystals_needed_for_gt = world.crystals_needed_for_gt
if world.mode != 'inverted':
for player in range(1, world.players + 1):
create_regions(ret, player)
create_dungeons(ret, player)
else:
for player in range(1, world.players + 1):
create_inverted_regions(ret, player)
create_dungeons(ret, player)
copy_dynamic_regions_and_locations(world, ret)
# copy bosses
for dungeon in world.dungeons:
for level, boss in dungeon.bosses.items():
ret.get_dungeon(dungeon.name, dungeon.player).bosses[level] = boss
for shop in world.shops:
copied_shop = ret.get_region(shop.region.name, shop.region.player).shop
copied_shop.active = shop.active
copied_shop.inventory = copy.copy(shop.inventory)
# connect copied world
for region in world.regions:
copied_region = ret.get_region(region.name, region.player)
copied_region.is_light_world = region.is_light_world
copied_region.is_dark_world = region.is_dark_world
for entrance in region.entrances:
ret.get_entrance(entrance.name, entrance.player).connect(copied_region)
# fill locations
for location in world.get_locations():
if location.item is not None:
item = Item(location.item.name, location.item.advancement, location.item.priority, location.item.type, player = location.item.player)
ret.get_location(location.name, location.player).item = item
item.location = ret.get_location(location.name, location.player)
if location.event:
ret.get_location(location.name, location.player).event = True
if location.locked:
ret.get_location(location.name, location.player).locked = True
# copy remaining itempool. No item in itempool should have an assigned location
for item in world.itempool:
ret.itempool.append(Item(item.name, item.advancement, item.priority, item.type, player = item.player))
# copy progress items in state
ret.state.prog_items = world.state.prog_items.copy()
ret.precollected_items = world.precollected_items.copy()
ret.state.stale = {player: True for player in range(1, world.players + 1)}
for player in range(1, world.players + 1):
set_rules(ret, player)
return ret
def copy_dynamic_regions_and_locations(world, ret):
for region in world.dynamic_regions:
new_reg = Region(region.name, region.type, region.hint_text, region.player)
new_reg.world = ret
ret.regions.append(new_reg)
ret.dynamic_regions.append(new_reg)
# Note: ideally exits should be copied here, but the current use case (Take anys) do not require this
if region.shop:
new_reg.shop = Shop(new_reg, region.shop.room_id, region.shop.type, region.shop.shopkeeper_config, region.shop.replaceable)
ret.shops.append(new_reg.shop)
for location in world.dynamic_locations:
new_reg = ret.get_region(location.parent_region.name, location.parent_region.player)
new_loc = Location(location.player, location.name, location.address, location.crystal, location.hint_text, new_reg)
# todo: this is potentially dangerous. later refactor so we
# can apply dynamic region rules on top of copied world like other rules
new_loc.access_rule = location.access_rule
new_loc.always_allow = location.always_allow
new_loc.item_rule = location.item_rule
new_reg.locations.append(new_loc)
ret.clear_location_cache()
def create_playthrough(world):
# create a copy as we will modify it
old_world = world
world = copy_world(world)
# if we only check for beatable, we can do this sanity check first before writing down spheres
if world.accessibility == 'none' and not world.can_beat_game():
raise RuntimeError('Cannot beat game. Something went terribly wrong here!')
# get locations containing progress items
prog_locations = [location for location in world.get_filled_locations() if location.item.advancement]
state_cache = [None]
collection_spheres = []
state = CollectionState(world)
sphere_candidates = list(prog_locations)
logging.getLogger('').debug('Building up collection spheres.')
while sphere_candidates:
if not world.keysanity:
state.sweep_for_events(key_only=True)
sphere = []
# build up spheres of collection radius. Everything in each sphere is independent from each other in dependencies and only depends on lower spheres
for location in sphere_candidates:
if state.can_reach(location):
sphere.append(location)
for location in sphere:
sphere_candidates.remove(location)
state.collect(location.item, True, location)
collection_spheres.append(sphere)
state_cache.append(state.copy())
logging.getLogger('').debug('Calculated sphere %i, containing %i of %i progress items.', len(collection_spheres), len(sphere), len(prog_locations))
if not sphere:
logging.getLogger('').debug('The following items could not be reached: %s', ['%s (Player %d) at %s (Player %d)' % (location.item.name, location.item.player, location.name, location.player) for location in sphere_candidates])
if not world.accessibility == 'none':
raise RuntimeError('Not all progression items reachable. Something went terribly wrong here.')
else:
break
# in the second phase, we cull each sphere such that the game is still beatable, reducing each range of influence to the bare minimum required inside it
for num, sphere in reversed(list(enumerate(collection_spheres))):
to_delete = []
for location in sphere:
# we remove the item at location and check if game is still beatable
logging.getLogger('').debug('Checking if %s (Player %d) is required to beat the game.', location.item.name, location.item.player)
old_item = location.item
location.item = None
state.remove(old_item)
if world.can_beat_game(state_cache[num]):
to_delete.append(location)
else:
# still required, got to keep it around
location.item = old_item
# cull entries in spheres for spoiler walkthrough at end
for location in to_delete:
sphere.remove(location)
# we are now down to just the required progress items in collection_spheres. Unfortunately
# the previous pruning stage could potentially have made certain items dependant on others
# in the same or later sphere (because the location had 2 ways to access but the item originally
# used to access it was deemed not required.) So we need to do one final sphere collection pass
# to build up the correct spheres
required_locations = [item for sphere in collection_spheres for item in sphere]
state = CollectionState(world)
collection_spheres = []
while required_locations:
if not world.keysanity:
state.sweep_for_events(key_only=True)
sphere = list(filter(state.can_reach, required_locations))
for location in sphere:
required_locations.remove(location)
state.collect(location.item, True, location)
collection_spheres.append(sphere)
logging.getLogger('').debug('Calculated final sphere %i, containing %i of %i progress items.', len(collection_spheres), len(sphere), len(required_locations))
if not sphere:
raise RuntimeError('Not all required items reachable. Something went terribly wrong here.')
# store the required locations for statistical analysis
old_world.required_locations = [(location.name, location.player) for sphere in collection_spheres for location in sphere]
def flist_to_iter(node):
while node:
value, node = node
yield value
def
|
get_path
|
identifier_name
|
|
Main.py
|
if world.mode != 'inverted':
for player in range(1, world.players + 1):
create_regions(world, player)
create_dungeons(world, player)
else:
for player in range(1, world.players + 1):
create_inverted_regions(world, player)
create_dungeons(world, player)
logger.info('Shuffling the World about.')
if world.mode != 'inverted':
for player in range(1, world.players + 1):
link_entrances(world, player)
mark_light_world_regions(world)
else:
for player in range(1, world.players + 1):
link_inverted_entrances(world, player)
mark_dark_world_regions(world)
logger.info('Generating Item Pool.')
for player in range(1, world.players + 1):
generate_itempool(world, player)
logger.info('Calculating Access Rules.')
for player in range(1, world.players + 1):
set_rules(world, player)
logger.info('Placing Dungeon Prizes.')
fill_prizes(world)
logger.info('Placing Dungeon Items.')
shuffled_locations = None
if args.algorithm in ['balanced', 'vt26'] or args.keysanity:
shuffled_locations = world.get_unfilled_locations()
random.shuffle(shuffled_locations)
fill_dungeons_restrictive(world, shuffled_locations)
else:
fill_dungeons(world)
logger.info('Fill the world.')
if args.algorithm == 'flood':
flood_items(world) # different algo, biased towards early game progress items
elif args.algorithm == 'vt21':
distribute_items_cutoff(world, 1)
elif args.algorithm == 'vt22':
distribute_items_cutoff(world, 0.66)
elif args.algorithm == 'freshness':
distribute_items_staleness(world)
elif args.algorithm == 'vt25':
distribute_items_restrictive(world, 0)
elif args.algorithm == 'vt26':
distribute_items_restrictive(world, gt_filler(world), shuffled_locations)
elif args.algorithm == 'balanced':
distribute_items_restrictive(world, gt_filler(world))
if world.players > 1:
logger.info('Balancing multiworld progression.')
balance_multiworld_progression(world)
logger.info('Patching ROM.')
if args.sprite is not None:
if isinstance(args.sprite, Sprite):
sprite = args.sprite
else:
sprite = Sprite(args.sprite)
else:
sprite = None
outfilebase = 'ER_%s_%s-%s-%s-%s%s_%s-%s%s%s%s%s_%s' % (world.logic, world.difficulty, world.difficulty_adjustments, world.mode, world.goal, "" if world.timer in ['none', 'display'] else "-" + world.timer, world.shuffle, world.algorithm, "-keysanity" if world.keysanity else "", "-retro" if world.retro else "", "-prog_" + world.progressive if world.progressive in ['off', 'random'] else "", "-nohints" if not world.hints else "", world.seed)
use_enemizer = args.enemizercli and (args.shufflebosses != 'none' or args.shuffleenemies or args.enemy_health != 'default' or args.enemy_health != 'default' or args.enemy_damage or args.shufflepalette or args.shufflepots)
jsonout = {}
if not args.suppress_rom:
if world.players > 1:
raise NotImplementedError("Multiworld rom writes have not been implemented")
else:
player = 1
local_rom = None
if args.jsonout:
rom = JsonRom()
else:
if use_enemizer:
local_rom = LocalRom(args.rom)
rom = JsonRom()
else:
rom = LocalRom(args.rom)
patch_rom(world, player, rom)
enemizer_patch = []
if use_enemizer:
enemizer_patch = get_enemizer_patch(world, player, rom, args.rom, args.enemizercli, args.shuffleenemies, args.enemy_health, args.enemy_damage, args.shufflepalette, args.shufflepots)
if args.jsonout:
jsonout['patch'] = rom.patches
if use_enemizer:
jsonout['enemizer' % player] = enemizer_patch
else:
if use_enemizer:
local_rom.patch_enemizer(rom.patches, os.path.join(os.path.dirname(args.enemizercli), "enemizerBasePatch.json"), enemizer_patch)
rom = local_rom
apply_rom_settings(rom, args.heartbeep, args.heartcolor, world.quickswap, world.fastmenu, world.disable_music, sprite)
rom.write_to_file(output_path('%s.sfc' % outfilebase))
if args.create_spoiler and not args.jsonout:
world.spoiler.to_file(output_path('%s_Spoiler.txt' % outfilebase))
if not args.skip_playthrough:
logger.info('Calculating playthrough.')
create_playthrough(world)
if args.jsonout:
print(json.dumps({**jsonout, 'spoiler': world.spoiler.to_json()}))
elif args.create_spoiler and not args.skip_playthrough:
world.spoiler.to_file(output_path('%s_Spoiler.txt' % outfilebase))
logger.info('Done. Enjoy.')
logger.debug('Total Time: %s', time.perf_counter() - start)
return world
def gt_filler(world):
if world.goal == 'triforcehunt':
return random.randint(15, 50)
return random.randint(0, 15)
def copy_world(world):
# ToDo: Not good yet
ret = World(world.players, world.shuffle, world.logic, world.mode, world.swords, world.difficulty, world.difficulty_adjustments, world.timer, world.progressive, world.goal, world.algorithm, world.place_dungeon_items, world.accessibility, world.shuffle_ganon, world.quickswap, world.fastmenu, world.disable_music, world.keysanity, world.retro, world.custom, world.customitemarray, world.boss_shuffle, world.hints)
ret.required_medallions = world.required_medallions.copy()
ret.swamp_patch_required = world.swamp_patch_required.copy()
ret.ganon_at_pyramid = world.ganon_at_pyramid.copy()
ret.powder_patch_required = world.powder_patch_required.copy()
ret.ganonstower_vanilla = world.ganonstower_vanilla.copy()
ret.treasure_hunt_count = world.treasure_hunt_count
ret.treasure_hunt_icon = world.treasure_hunt_icon
ret.sewer_light_cone = world.sewer_light_cone
ret.light_world_light_cone = world.light_world_light_cone
ret.dark_world_light_cone = world.dark_world_light_cone
ret.seed = world.seed
ret.can_access_trock_eyebridge = world.can_access_trock_eyebridge
ret.can_access_trock_front = world.can_access_trock_front
ret.can_access_trock_big_chest = world.can_access_trock_big_chest
ret.can_access_trock_middle = world.can_access_trock_middle
ret.can_take_damage = world.can_take_damage
ret.difficulty_requirements = world.difficulty_requirements
ret.fix_fake_world = world.fix_fake_world
ret.lamps_needed_for_dark_rooms = world.lamps_needed_for_dark_rooms
ret.crystals_needed_for_ganon = world.crystals_needed_for_ganon
ret.crystals_needed_for_gt = world.crystals_needed_for_gt
if world.mode != 'inverted':
for player in range(1, world.players + 1):
create_regions(ret, player)
create_dungeons(ret, player)
else:
for player in range(1, world.players + 1):
create_inverted_regions(ret, player)
create_dungeons(ret, player)
copy_dynamic_regions_and_locations(world, ret)
# copy bosses
for dungeon in world.dungeons:
for level, boss in dungeon.b
|
start = time.perf_counter()
# initialize the world
world = World(args.multi, args.shuffle, args.logic, args.mode, args.swords, args.difficulty, args.item_functionality, args.timer, args.progressive, args.goal, args.algorithm, not args.nodungeonitems, args.accessibility, args.shuffleganon, args.quickswap, args.fastmenu, args.disablemusic, args.keysanity, args.retro, args.custom, args.customitemarray, args.shufflebosses, args.hints)
logger = logging.getLogger('')
if seed is None:
random.seed(None)
world.seed = random.randint(0, 999999999)
else:
world.seed = int(seed)
random.seed(world.seed)
world.crystals_needed_for_ganon = random.randint(0, 7) if args.crystals_ganon == 'random' else int(args.crystals_ganon)
world.crystals_needed_for_gt = random.randint(0, 7) if args.crystals_gt == 'random' else int(args.crystals_gt)
world.rom_seeds = {player: random.randint(0, 999999999) for player in range(1, world.players + 1)}
logger.info('ALttP Entrance Randomizer Version %s - Seed: %s\n\n', __version__, world.seed)
world.difficulty_requirements = difficulties[world.difficulty]
|
identifier_body
|
|
Main.py
|
.copy()
ret.treasure_hunt_count = world.treasure_hunt_count
ret.treasure_hunt_icon = world.treasure_hunt_icon
ret.sewer_light_cone = world.sewer_light_cone
ret.light_world_light_cone = world.light_world_light_cone
ret.dark_world_light_cone = world.dark_world_light_cone
ret.seed = world.seed
ret.can_access_trock_eyebridge = world.can_access_trock_eyebridge
ret.can_access_trock_front = world.can_access_trock_front
ret.can_access_trock_big_chest = world.can_access_trock_big_chest
ret.can_access_trock_middle = world.can_access_trock_middle
ret.can_take_damage = world.can_take_damage
ret.difficulty_requirements = world.difficulty_requirements
ret.fix_fake_world = world.fix_fake_world
ret.lamps_needed_for_dark_rooms = world.lamps_needed_for_dark_rooms
ret.crystals_needed_for_ganon = world.crystals_needed_for_ganon
ret.crystals_needed_for_gt = world.crystals_needed_for_gt
if world.mode != 'inverted':
for player in range(1, world.players + 1):
create_regions(ret, player)
create_dungeons(ret, player)
else:
for player in range(1, world.players + 1):
create_inverted_regions(ret, player)
create_dungeons(ret, player)
copy_dynamic_regions_and_locations(world, ret)
# copy bosses
for dungeon in world.dungeons:
for level, boss in dungeon.bosses.items():
ret.get_dungeon(dungeon.name, dungeon.player).bosses[level] = boss
for shop in world.shops:
copied_shop = ret.get_region(shop.region.name, shop.region.player).shop
copied_shop.active = shop.active
copied_shop.inventory = copy.copy(shop.inventory)
# connect copied world
for region in world.regions:
copied_region = ret.get_region(region.name, region.player)
copied_region.is_light_world = region.is_light_world
copied_region.is_dark_world = region.is_dark_world
for entrance in region.entrances:
ret.get_entrance(entrance.name, entrance.player).connect(copied_region)
# fill locations
for location in world.get_locations():
if location.item is not None:
item = Item(location.item.name, location.item.advancement, location.item.priority, location.item.type, player = location.item.player)
ret.get_location(location.name, location.player).item = item
item.location = ret.get_location(location.name, location.player)
if location.event:
ret.get_location(location.name, location.player).event = True
if location.locked:
ret.get_location(location.name, location.player).locked = True
# copy remaining itempool. No item in itempool should have an assigned location
for item in world.itempool:
ret.itempool.append(Item(item.name, item.advancement, item.priority, item.type, player = item.player))
# copy progress items in state
ret.state.prog_items = world.state.prog_items.copy()
ret.precollected_items = world.precollected_items.copy()
ret.state.stale = {player: True for player in range(1, world.players + 1)}
for player in range(1, world.players + 1):
set_rules(ret, player)
return ret
def copy_dynamic_regions_and_locations(world, ret):
for region in world.dynamic_regions:
new_reg = Region(region.name, region.type, region.hint_text, region.player)
new_reg.world = ret
ret.regions.append(new_reg)
ret.dynamic_regions.append(new_reg)
# Note: ideally exits should be copied here, but the current use case (Take anys) do not require this
if region.shop:
new_reg.shop = Shop(new_reg, region.shop.room_id, region.shop.type, region.shop.shopkeeper_config, region.shop.replaceable)
ret.shops.append(new_reg.shop)
for location in world.dynamic_locations:
new_reg = ret.get_region(location.parent_region.name, location.parent_region.player)
new_loc = Location(location.player, location.name, location.address, location.crystal, location.hint_text, new_reg)
# todo: this is potentially dangerous. later refactor so we
# can apply dynamic region rules on top of copied world like other rules
new_loc.access_rule = location.access_rule
new_loc.always_allow = location.always_allow
new_loc.item_rule = location.item_rule
new_reg.locations.append(new_loc)
ret.clear_location_cache()
def create_playthrough(world):
# create a copy as we will modify it
old_world = world
world = copy_world(world)
# if we only check for beatable, we can do this sanity check first before writing down spheres
if world.accessibility == 'none' and not world.can_beat_game():
raise RuntimeError('Cannot beat game. Something went terribly wrong here!')
# get locations containing progress items
prog_locations = [location for location in world.get_filled_locations() if location.item.advancement]
state_cache = [None]
collection_spheres = []
state = CollectionState(world)
sphere_candidates = list(prog_locations)
logging.getLogger('').debug('Building up collection spheres.')
while sphere_candidates:
if not world.keysanity:
state.sweep_for_events(key_only=True)
sphere = []
# build up spheres of collection radius. Everything in each sphere is independent from each other in dependencies and only depends on lower spheres
for location in sphere_candidates:
if state.can_reach(location):
sphere.append(location)
for location in sphere:
sphere_candidates.remove(location)
state.collect(location.item, True, location)
collection_spheres.append(sphere)
state_cache.append(state.copy())
logging.getLogger('').debug('Calculated sphere %i, containing %i of %i progress items.', len(collection_spheres), len(sphere), len(prog_locations))
if not sphere:
logging.getLogger('').debug('The following items could not be reached: %s', ['%s (Player %d) at %s (Player %d)' % (location.item.name, location.item.player, location.name, location.player) for location in sphere_candidates])
if not world.accessibility == 'none':
raise RuntimeError('Not all progression items reachable. Something went terribly wrong here.')
else:
break
# in the second phase, we cull each sphere such that the game is still beatable, reducing each range of influence to the bare minimum required inside it
for num, sphere in reversed(list(enumerate(collection_spheres))):
to_delete = []
for location in sphere:
# we remove the item at location and check if game is still beatable
logging.getLogger('').debug('Checking if %s (Player %d) is required to beat the game.', location.item.name, location.item.player)
old_item = location.item
location.item = None
state.remove(old_item)
if world.can_beat_game(state_cache[num]):
to_delete.append(location)
else:
# still required, got to keep it around
location.item = old_item
# cull entries in spheres for spoiler walkthrough at end
for location in to_delete:
sphere.remove(location)
# we are now down to just the required progress items in collection_spheres. Unfortunately
# the previous pruning stage could potentially have made certain items dependant on others
# in the same or later sphere (because the location had 2 ways to access but the item originally
# used to access it was deemed not required.) So we need to do one final sphere collection pass
# to build up the correct spheres
required_locations = [item for sphere in collection_spheres for item in sphere]
state = CollectionState(world)
collection_spheres = []
while required_locations:
if not world.keysanity:
state.sweep_for_events(key_only=True)
sphere = list(filter(state.can_reach, required_locations))
for location in sphere:
required_locations.remove(location)
state.collect(location.item, True, location)
collection_spheres.append(sphere)
logging.getLogger('').debug('Calculated final sphere %i, containing %i of %i progress items.', len(collection_spheres), len(sphere), len(required_locations))
if not sphere:
raise RuntimeError('Not all required items reachable. Something went terribly wrong here.')
# store the required locations for statistical analysis
old_world.required_locations = [(location.name, location.player) for sphere in collection_spheres for location in sphere]
def flist_to_iter(node):
while node:
value, node = node
yield value
def get_path(state, region):
reversed_path_as_flist = state.path.get(region, (region, None))
string_path_flat = reversed(list(map(str, flist_to_iter(reversed_path_as_flist))))
# Now we combine the flat string list into (region, exit) pairs
pathsiter = iter(string_path_flat)
pathpairs = zip_longest(pathsiter, pathsiter)
return list(pathpairs)
old_world.spoiler.paths = dict()
for player in range(1, world.players + 1):
old_world.spoiler.paths.update({ str(location) : get_path(state, location.parent_region) for sphere in collection_spheres for location in sphere if location.player == player})
for _, path in dict(old_world.spoiler.paths).items():
|
if any(exit == 'Pyramid Fairy' for (_, exit) in path):
|
random_line_split
|
|
misc.go
|
502
StatusServiceUnavailable = 503
StatusGatewayTimeout = 504
StatusHTTPVersionNotSupported = 505
)
var statusText = map[int]string{
StatusContinue: "Continue",
StatusSwitchingProtocols: "Switching Protocols",
StatusOK: "OK",
StatusCreated: "Created",
StatusAccepted: "Accepted",
StatusNonAuthoritativeInformation: "Non-Authoritative Information",
StatusNoContent: "No Content",
StatusResetContent: "Reset Content",
StatusPartialContent: "Partial Content",
StatusMultipleChoices: "Multiple Choices",
StatusMovedPermanently: "Moved Permanently",
StatusFound: "Found",
StatusSeeOther: "See Other",
StatusNotModified: "Not Modified",
StatusUseProxy: "Use Proxy",
StatusTemporaryRedirect: "Temporary Redirect",
StatusBadRequest: "Bad Request",
StatusUnauthorized: "Unauthorized",
StatusPaymentRequired: "Payment Required",
StatusForbidden: "Forbidden",
StatusNotFound: "Not Found",
StatusMethodNotAllowed: "Method Not Allowed",
StatusNotAcceptable: "Not Acceptable",
StatusProxyAuthenticationRequired: "Proxy Authentication Required",
StatusRequestTimeout: "Request Timeout",
StatusConflict: "Conflict",
StatusGone: "Gone",
StatusLengthRequired: "Length Required",
StatusPreconditionFailed: "Precondition Failed",
StatusRequestEntityTooLarge: "Request Entity Too Large",
StatusRequestURITooLong: "Request URI Too Long",
StatusUnsupportedMediaType: "Unsupported Media Type",
StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
StatusExpectationFailed: "Expectation Failed",
StatusInternalServerError: "Internal Server Error",
StatusNotImplemented: "Not Implemented",
StatusBadGateway: "Bad Gateway",
StatusServiceUnavailable: "Service Unavailable",
StatusGatewayTimeout: "Gateway Timeout",
StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
}
// StatusText returns a text description of an HTTP status code.
func StatusText(status int) string {
s, found := statusText[status]
if !found {
s = "Status " + strconv.Itoa(status)
}
return s
}
// ProtocolVersion combines HTTP major and minor protocol numbers into a single
// integer for easy comparision of protocol versions.
func ProtocolVersion(major int, minor int) int {
if minor > 999 {
minor = 999
}
return major*1000 + minor
}
// Commonly used protocol versions in format returned by the ProtocolVersion
// function.
const (
ProtocolVersion10 = 1000 // HTTP/1.0
ProtocolVersion11 = 1001 // HTTP/1.1
)
// parseCookieValues parses cookies from values and adds them to m. The
// function supports the Netscape draft specification for cookies
// (http://goo.gl/1WSx3).
func parseCookieValues(values []string, m Values) os.Error {
for _, s := range values {
key := ""
begin := 0
end := 0
for i := 0; i < len(s); i++ {
switch s[i] {
case ' ', '\t':
// leading whitespace?
if begin == end {
begin = i + 1
end = begin
}
case '=':
if key == "" {
key = s[begin:end]
begin = i + 1
end = begin
} else {
end += 1
}
case ';':
if len(key) > 0 && begin < end {
value := s[begin:end]
m.Add(key, value)
}
key = ""
begin = i + 1
end = begin
default:
end = i + 1
}
}
if len(key) > 0 && begin < end {
m.Add(key, s[begin:end])
}
}
return nil
}
func signature(secret, key, expiration, value string) string {
hm := hmac.NewSHA1([]byte(secret))
io.WriteString(hm, key)
hm.Write([]byte{0})
io.WriteString(hm, expiration)
hm.Write([]byte{0})
io.WriteString(hm, value)
return hex.EncodeToString(hm.Sum())
}
// SignValue returns a string containing value, an expiration time and a
// signature. The expiration time is computed from the current time and
// maxAgeSeconds. The signature is an HMAC SHA-1 signature of value, context
// and the expiration time. Use the function VerifyValue to extract the value,
// check the expiration time and verify the signature.
//
// SignValue can be used to store credentials in a cookie:
//
// var secret string // Initialized by application
// const uidCookieMaxAge = 3600 * 24 * 30
//
// // uidCookieValue returns the Set-Cookie header value containing a
// // signed and timestamped user id.
// func uidCookieValue(uid string) string {
// s := web.SignValue(secret, "uid", uidCookieMaxAge, uid)
// return web.NewCookie("uid", s).MaxAge(uidCookieMaxAge).String()
// }
//
// // requestUid returns the user id from the request cookie. An error
// // is returned if the cookie is missing, the value has expired or the
// // signature is not valid.
// func requestUid(req *web.Request) (string, os.Error) {
// return web.VerifyValue(secret, "uid", req.Cookie.Get("uid"))
// }
func SignValue(secret, context string, maxAgeSeconds int, value string) string {
expiration := strconv.Itob64(time.Seconds()+int64(maxAgeSeconds), 16)
sig := signature(secret, context, expiration, value)
return sig + "~" + expiration + "~" + value
}
var errVerificationFailure = os.NewError("verification failed")
// VerifyValue extracts a value from a string created by SignValue. An error is
// returned if the expiration time has elapsed or the signature is not correct.
func VerifyValue(secret, context string, signedValue string) (string, os.Error) {
a := strings.Split(signedValue, "~", 3)
if len(a) != 3 {
return "", errVerificationFailure
}
expiration, err := strconv.Btoi64(a[1], 16)
if err != nil || expiration < time.Seconds() {
return "", errVerificationFailure
}
expectedSig := signature(secret, context, a[1], a[2])
actualSig := a[0]
if len(actualSig) != len(expectedSig) {
return "", errVerificationFailure
}
// Time independent compare
eq := 0
for i := 0; i < len(actualSig); i++ {
eq = eq | (int(actualSig[i]) ^ int(expectedSig[i]))
}
if eq != 0 {
return "", errVerificationFailure
}
return a[2], nil
}
// Cookie is a helper for constructing Set-Cookie header values.
//
// Cookie supports the ancient Netscape draft specification for cookies
// (http://goo.gl/1WSx3) and the modern HttpOnly attribute
// (http://www.owasp.org/index.php/HttpOnly). Cookie does not attempt to
// support any RFC for cookies because the RFCs are not supported by popular
// browsers.
//
// As a convenience, the NewCookie function returns a cookie with the path
// attribute set to "/" and the httponly attribute set to true.
//
// The following example shows how to set a cookie header using Cookie:
//
// func myHandler(req *web.Request) {
// c := web.NewCookie("my-cookie-name", "my-cookie-value").String()
// w := req.Respond(web.StatusOK, web.HeaderSetCookie, c)
// io.WriteString(w, "<html><body>Hello</body></html>")
// }
type Cookie struct {
name string
value string
path string
domain string
maxAge int
secure bool
httpOnly bool
}
// NewCookie returns a new cookie with the given name and value, the path
// attribute set to "/" and the httponly attribute set to true.
func NewCookie(name, value string) *Cookie {
return &Cookie{name: name, value: value, path: "/", httpOnly: true}
}
// Path sets the cookie path attribute. The path must either be "" or start with a
// '/'. The NewCookie function initializes the path to "/". If the path is "",
// then the path attribute is not included in the header value.
func (c *Cookie) Path(path string) *Cookie { c.path = path; return c }
// Domain sets the cookie domain attribute. If the host is "", then the domain
// attribute is not included in the header value.
func (c *Cookie) Domain(domain string) *Cookie { c.domain = domain; return c }
// MaxAge specifies the maximum age for a cookie. The age is converted to an
// absolute expiration time when the header value is rendered. If the maximum
// age is 0, then the expiration time is not included in the header value
// and the browser will handle the cookie as a "session" cookie.
func (c *Cookie)
|
MaxAge
|
identifier_name
|
|
misc.go
|
Value can be used to store credentials in a cookie:
//
// var secret string // Initialized by application
// const uidCookieMaxAge = 3600 * 24 * 30
//
// // uidCookieValue returns the Set-Cookie header value containing a
// // signed and timestamped user id.
// func uidCookieValue(uid string) string {
// s := web.SignValue(secret, "uid", uidCookieMaxAge, uid)
// return web.NewCookie("uid", s).MaxAge(uidCookieMaxAge).String()
// }
//
// // requestUid returns the user id from the request cookie. An error
// // is returned if the cookie is missing, the value has expired or the
// // signature is not valid.
// func requestUid(req *web.Request) (string, os.Error) {
// return web.VerifyValue(secret, "uid", req.Cookie.Get("uid"))
// }
func SignValue(secret, context string, maxAgeSeconds int, value string) string {
expiration := strconv.Itob64(time.Seconds()+int64(maxAgeSeconds), 16)
sig := signature(secret, context, expiration, value)
return sig + "~" + expiration + "~" + value
}
var errVerificationFailure = os.NewError("verification failed")
// VerifyValue extracts a value from a string created by SignValue. An error is
// returned if the expiration time has elapsed or the signature is not correct.
func VerifyValue(secret, context string, signedValue string) (string, os.Error) {
a := strings.Split(signedValue, "~", 3)
if len(a) != 3 {
return "", errVerificationFailure
}
expiration, err := strconv.Btoi64(a[1], 16)
if err != nil || expiration < time.Seconds() {
return "", errVerificationFailure
}
expectedSig := signature(secret, context, a[1], a[2])
actualSig := a[0]
if len(actualSig) != len(expectedSig) {
return "", errVerificationFailure
}
// Time independent compare
eq := 0
for i := 0; i < len(actualSig); i++ {
eq = eq | (int(actualSig[i]) ^ int(expectedSig[i]))
}
if eq != 0 {
return "", errVerificationFailure
}
return a[2], nil
}
// Cookie is a helper for constructing Set-Cookie header values.
//
// Cookie supports the ancient Netscape draft specification for cookies
// (http://goo.gl/1WSx3) and the modern HttpOnly attribute
// (http://www.owasp.org/index.php/HttpOnly). Cookie does not attempt to
// support any RFC for cookies because the RFCs are not supported by popular
// browsers.
//
// As a convenience, the NewCookie function returns a cookie with the path
// attribute set to "/" and the httponly attribute set to true.
//
// The following example shows how to set a cookie header using Cookie:
//
// func myHandler(req *web.Request) {
// c := web.NewCookie("my-cookie-name", "my-cookie-value").String()
// w := req.Respond(web.StatusOK, web.HeaderSetCookie, c)
// io.WriteString(w, "<html><body>Hello</body></html>")
// }
type Cookie struct {
name string
value string
path string
domain string
maxAge int
secure bool
httpOnly bool
}
// NewCookie returns a new cookie with the given name and value, the path
// attribute set to "/" and the httponly attribute set to true.
func NewCookie(name, value string) *Cookie {
return &Cookie{name: name, value: value, path: "/", httpOnly: true}
}
// Path sets the cookie path attribute. The path must either be "" or start with a
// '/'. The NewCookie function initializes the path to "/". If the path is "",
// then the path attribute is not included in the header value.
func (c *Cookie) Path(path string) *Cookie { c.path = path; return c }
// Domain sets the cookie domain attribute. If the host is "", then the domain
// attribute is not included in the header value.
func (c *Cookie) Domain(domain string) *Cookie { c.domain = domain; return c }
// MaxAge specifies the maximum age for a cookie. The age is converted to an
// absolute expiration time when the header value is rendered. If the maximum
// age is 0, then the expiration time is not included in the header value
// and the browser will handle the cookie as a "session" cookie.
func (c *Cookie) MaxAge(seconds int) *Cookie { c.maxAge = seconds; return c }
// MaxAgeDays sets the maximum age for the cookie in days.
func (c *Cookie) MaxAgeDays(days int) *Cookie { return c.MaxAge(days * 60 * 60 * 24) }
// Delete sets the expiration date to a time in the past.
func (c *Cookie) Delete() *Cookie { return c.MaxAgeDays(-30).HTTPOnly(false) }
// Secure sets the secure attribute.
func (c *Cookie) Secure(secure bool) *Cookie { c.secure = secure; return c }
// HTTPOnly sets the httponly attribute. The NewCookie function
// initializes the httponly attribute to true.
func (c *Cookie) HTTPOnly(httpOnly bool) *Cookie {
c.httpOnly = httpOnly
return c
}
// String renders the Set-Cookie header value as a string.
func (c *Cookie) String() string {
var buf bytes.Buffer
buf.WriteString(c.name)
buf.WriteByte('=')
buf.WriteString(c.value)
if c.path != "" {
buf.WriteString("; path=")
buf.WriteString(c.path)
}
if c.domain != "" {
buf.WriteString("; domain=")
buf.WriteString(c.domain)
}
if c.maxAge != 0 {
buf.WriteString("; expires=")
buf.WriteString(FormatDeltaSeconds(c.maxAge))
}
if c.secure {
buf.WriteString("; secure")
}
if c.httpOnly {
buf.WriteString("; HttpOnly")
}
return buf.String()
}
// HTMLEscapeString returns s with special HTML characters escaped.
func HTMLEscapeString(s string) string {
escape := false
for i := 0; i < len(s); i++ {
if c := s[i]; c == '"' || c == '\'' || c == '/' || c == '&' || c == '<' || c == '>' {
escape = true
break
}
}
if !escape {
return s
}
var b bytes.Buffer
for i := 0; i < len(s); i++ {
switch c := s[i]; c {
case '"':
b.WriteString(""")
case '\'':
// ' is not defined in the HTML standard
b.WriteString("'")
case '/':
// forward slash is included as it helps end an HTML entity
b.WriteString("/")
case '&':
b.WriteString("&")
case '<':
b.WriteString("<")
case '>':
b.WriteString(">")
default:
b.WriteByte(c)
}
}
return b.String()
}
// CheckXSRF implements cross-site request forgery protection. Here's how it works:
//
// CheckXSRF sets a cookie with name cookieName to a random token.
//
// The application ensures that POSTed forms include a parameter with name
// paramName and value equal to the token.
//
// POSTed forms are considered valid if the cookieName cookie is set and is
// equal to the paramName request parameter. A third party site cannot generate
// a request where the cookie and request parameter are equal because the third
// party site cannot access the cookie value.
//
// CheckXSRF returns an error if the request is not valid. It is the applications's
// responsiblity to respond to the request with an appropriate error.
//
// Before returning, CheckXSRF ensures that the paramName request parameter is
// set to the token. The application should use the value of the paramName
// parameter when generating hidden fields in POSTed forms.
//
// CheckXSRF also validates PUT and DELETE requests.
//
// The X-XSRFToken can be used to specifiy the token in addition to the
// paramName request parameter.
//
// See http://en.wikipedia.org/wiki/Cross-site_request_forgery for information
// on cross-site request forgery.
func CheckXSRF(req *Request, cookieName string, paramName string) os.Error {
const tokenLen = 8
expectedToken := req.Cookie.Get(cookieName)
// Create new XSRF token?
if len(expectedToken) != tokenLen {
p := make([]byte, tokenLen/2)
_, err := rand.Reader.Read(p)
if err != nil {
panic("twister: rand read failed")
}
expectedToken = hex.EncodeToString(p)
c := NewCookie(cookieName, expectedToken).String()
FilterRespond(req, func(status int, header Header) (int, Header) {
header.Add(HeaderSetCookie, c)
return status, header
})
}
actualToken := req.Param.Get(paramName)
if actualToken == ""
|
{
actualToken = req.Header.Get(HeaderXXSRFToken)
req.Param.Set(paramName, expectedToken)
}
|
conditional_block
|
|
misc.go
|
1
StatusPaymentRequired = 402
StatusForbidden = 403
StatusNotFound = 404
StatusMethodNotAllowed = 405
StatusNotAcceptable = 406
StatusProxyAuthenticationRequired = 407
StatusRequestTimeout = 408
StatusConflict = 409
StatusGone = 410
StatusLengthRequired = 411
StatusPreconditionFailed = 412
StatusRequestEntityTooLarge = 413
StatusRequestURITooLong = 414
StatusUnsupportedMediaType = 415
StatusRequestedRangeNotSatisfiable = 416
StatusExpectationFailed = 417
StatusInternalServerError = 500
StatusNotImplemented = 501
StatusBadGateway = 502
StatusServiceUnavailable = 503
StatusGatewayTimeout = 504
StatusHTTPVersionNotSupported = 505
)
var statusText = map[int]string{
StatusContinue: "Continue",
StatusSwitchingProtocols: "Switching Protocols",
StatusOK: "OK",
StatusCreated: "Created",
StatusAccepted: "Accepted",
StatusNonAuthoritativeInformation: "Non-Authoritative Information",
StatusNoContent: "No Content",
StatusResetContent: "Reset Content",
StatusPartialContent: "Partial Content",
StatusMultipleChoices: "Multiple Choices",
StatusMovedPermanently: "Moved Permanently",
StatusFound: "Found",
StatusSeeOther: "See Other",
StatusNotModified: "Not Modified",
StatusUseProxy: "Use Proxy",
StatusTemporaryRedirect: "Temporary Redirect",
StatusBadRequest: "Bad Request",
StatusUnauthorized: "Unauthorized",
StatusPaymentRequired: "Payment Required",
StatusForbidden: "Forbidden",
StatusNotFound: "Not Found",
StatusMethodNotAllowed: "Method Not Allowed",
StatusNotAcceptable: "Not Acceptable",
StatusProxyAuthenticationRequired: "Proxy Authentication Required",
StatusRequestTimeout: "Request Timeout",
StatusConflict: "Conflict",
StatusGone: "Gone",
StatusLengthRequired: "Length Required",
StatusPreconditionFailed: "Precondition Failed",
StatusRequestEntityTooLarge: "Request Entity Too Large",
StatusRequestURITooLong: "Request URI Too Long",
StatusUnsupportedMediaType: "Unsupported Media Type",
StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
StatusExpectationFailed: "Expectation Failed",
StatusInternalServerError: "Internal Server Error",
StatusNotImplemented: "Not Implemented",
StatusBadGateway: "Bad Gateway",
StatusServiceUnavailable: "Service Unavailable",
StatusGatewayTimeout: "Gateway Timeout",
StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
}
// StatusText returns a text description of an HTTP status code.
func StatusText(status int) string {
s, found := statusText[status]
if !found {
s = "Status " + strconv.Itoa(status)
}
return s
}
// ProtocolVersion combines HTTP major and minor protocol numbers into a single
// integer for easy comparision of protocol versions.
func ProtocolVersion(major int, minor int) int {
if minor > 999 {
minor = 999
}
return major*1000 + minor
}
// Commonly used protocol versions in format returned by the ProtocolVersion
// function.
const (
ProtocolVersion10 = 1000 // HTTP/1.0
ProtocolVersion11 = 1001 // HTTP/1.1
)
// parseCookieValues parses cookies from values and adds them to m. The
// function supports the Netscape draft specification for cookies
// (http://goo.gl/1WSx3).
func parseCookieValues(values []string, m Values) os.Error {
for _, s := range values {
key := ""
begin := 0
end := 0
for i := 0; i < len(s); i++ {
switch s[i] {
case ' ', '\t':
// leading whitespace?
if begin == end {
begin = i + 1
end = begin
}
case '=':
if key == "" {
key = s[begin:end]
begin = i + 1
end = begin
} else {
end += 1
}
case ';':
if len(key) > 0 && begin < end {
value := s[begin:end]
m.Add(key, value)
}
key = ""
begin = i + 1
end = begin
default:
end = i + 1
}
}
if len(key) > 0 && begin < end {
m.Add(key, s[begin:end])
}
}
return nil
}
func signature(secret, key, expiration, value string) string {
hm := hmac.NewSHA1([]byte(secret))
io.WriteString(hm, key)
hm.Write([]byte{0})
io.WriteString(hm, expiration)
hm.Write([]byte{0})
io.WriteString(hm, value)
return hex.EncodeToString(hm.Sum())
}
// SignValue returns a string containing value, an expiration time and a
// signature. The expiration time is computed from the current time and
// maxAgeSeconds. The signature is an HMAC SHA-1 signature of value, context
// and the expiration time. Use the function VerifyValue to extract the value,
// check the expiration time and verify the signature.
//
// SignValue can be used to store credentials in a cookie:
//
// var secret string // Initialized by application
// const uidCookieMaxAge = 3600 * 24 * 30
//
// // uidCookieValue returns the Set-Cookie header value containing a
// // signed and timestamped user id.
// func uidCookieValue(uid string) string {
// s := web.SignValue(secret, "uid", uidCookieMaxAge, uid)
// return web.NewCookie("uid", s).MaxAge(uidCookieMaxAge).String()
// }
//
// // requestUid returns the user id from the request cookie. An error
// // is returned if the cookie is missing, the value has expired or the
// // signature is not valid.
// func requestUid(req *web.Request) (string, os.Error) {
// return web.VerifyValue(secret, "uid", req.Cookie.Get("uid"))
// }
func SignValue(secret, context string, maxAgeSeconds int, value string) string {
expiration := strconv.Itob64(time.Seconds()+int64(maxAgeSeconds), 16)
sig := signature(secret, context, expiration, value)
return sig + "~" + expiration + "~" + value
}
var errVerificationFailure = os.NewError("verification failed")
// VerifyValue extracts a value from a string created by SignValue. An error is
// returned if the expiration time has elapsed or the signature is not correct.
func VerifyValue(secret, context string, signedValue string) (string, os.Error) {
a := strings.Split(signedValue, "~", 3)
if len(a) != 3 {
return "", errVerificationFailure
}
expiration, err := strconv.Btoi64(a[1], 16)
if err != nil || expiration < time.Seconds() {
return "", errVerificationFailure
}
expectedSig := signature(secret, context, a[1], a[2])
actualSig := a[0]
if len(actualSig) != len(expectedSig) {
return "", errVerificationFailure
}
// Time independent compare
eq := 0
for i := 0; i < len(actualSig); i++ {
eq = eq | (int(actualSig[i]) ^ int(expectedSig[i]))
}
if eq != 0 {
return "", errVerificationFailure
}
return a[2], nil
}
// Cookie is a helper for constructing Set-Cookie header values.
//
// Cookie supports the ancient Netscape draft specification for cookies
// (http://goo.gl/1WSx3) and the modern HttpOnly attribute
// (http://www.owasp.org/index.php/HttpOnly). Cookie does not attempt to
// support any RFC for cookies because the RFCs are not supported by popular
// browsers.
//
// As a convenience, the NewCookie function returns a cookie with the path
// attribute set to "/" and the httponly attribute set to true.
//
// The following example shows how to set a cookie header using Cookie:
//
// func myHandler(req *web.Request) {
// c := web.NewCookie("my-cookie-name", "my-cookie-value").String()
// w := req.Respond(web.StatusOK, web.HeaderSetCookie, c)
// io.WriteString(w, "<html><body>Hello</body></html>")
// }
type Cookie struct {
name string
value string
path string
domain string
maxAge int
secure bool
httpOnly bool
}
// NewCookie returns a new cookie with the given name and value, the path
// attribute set to "/" and the httponly attribute set to true.
func NewCookie(name, value string) *Cookie
|
{
return &Cookie{name: name, value: value, path: "/", httpOnly: true}
}
|
identifier_body
|
|
misc.go
|
401
StatusPaymentRequired = 402
StatusForbidden = 403
StatusNotFound = 404
StatusMethodNotAllowed = 405
StatusNotAcceptable = 406
StatusProxyAuthenticationRequired = 407
StatusRequestTimeout = 408
StatusConflict = 409
StatusGone = 410
StatusLengthRequired = 411
StatusPreconditionFailed = 412
StatusRequestEntityTooLarge = 413
StatusRequestURITooLong = 414
StatusUnsupportedMediaType = 415
StatusRequestedRangeNotSatisfiable = 416
StatusExpectationFailed = 417
StatusInternalServerError = 500
StatusNotImplemented = 501
StatusBadGateway = 502
StatusServiceUnavailable = 503
StatusGatewayTimeout = 504
StatusHTTPVersionNotSupported = 505
)
var statusText = map[int]string{
StatusContinue: "Continue",
StatusSwitchingProtocols: "Switching Protocols",
StatusOK: "OK",
StatusCreated: "Created",
StatusAccepted: "Accepted",
StatusNonAuthoritativeInformation: "Non-Authoritative Information",
StatusNoContent: "No Content",
StatusResetContent: "Reset Content",
StatusPartialContent: "Partial Content",
StatusMultipleChoices: "Multiple Choices",
|
StatusNotModified: "Not Modified",
StatusUseProxy: "Use Proxy",
StatusTemporaryRedirect: "Temporary Redirect",
StatusBadRequest: "Bad Request",
StatusUnauthorized: "Unauthorized",
StatusPaymentRequired: "Payment Required",
StatusForbidden: "Forbidden",
StatusNotFound: "Not Found",
StatusMethodNotAllowed: "Method Not Allowed",
StatusNotAcceptable: "Not Acceptable",
StatusProxyAuthenticationRequired: "Proxy Authentication Required",
StatusRequestTimeout: "Request Timeout",
StatusConflict: "Conflict",
StatusGone: "Gone",
StatusLengthRequired: "Length Required",
StatusPreconditionFailed: "Precondition Failed",
StatusRequestEntityTooLarge: "Request Entity Too Large",
StatusRequestURITooLong: "Request URI Too Long",
StatusUnsupportedMediaType: "Unsupported Media Type",
StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
StatusExpectationFailed: "Expectation Failed",
StatusInternalServerError: "Internal Server Error",
StatusNotImplemented: "Not Implemented",
StatusBadGateway: "Bad Gateway",
StatusServiceUnavailable: "Service Unavailable",
StatusGatewayTimeout: "Gateway Timeout",
StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
}
// StatusText returns a text description of an HTTP status code.
func StatusText(status int) string {
s, found := statusText[status]
if !found {
s = "Status " + strconv.Itoa(status)
}
return s
}
// ProtocolVersion combines HTTP major and minor protocol numbers into a single
// integer for easy comparision of protocol versions.
func ProtocolVersion(major int, minor int) int {
if minor > 999 {
minor = 999
}
return major*1000 + minor
}
// Commonly used protocol versions in format returned by the ProtocolVersion
// function.
const (
ProtocolVersion10 = 1000 // HTTP/1.0
ProtocolVersion11 = 1001 // HTTP/1.1
)
// parseCookieValues parses cookies from values and adds them to m. The
// function supports the Netscape draft specification for cookies
// (http://goo.gl/1WSx3).
func parseCookieValues(values []string, m Values) os.Error {
for _, s := range values {
key := ""
begin := 0
end := 0
for i := 0; i < len(s); i++ {
switch s[i] {
case ' ', '\t':
// leading whitespace?
if begin == end {
begin = i + 1
end = begin
}
case '=':
if key == "" {
key = s[begin:end]
begin = i + 1
end = begin
} else {
end += 1
}
case ';':
if len(key) > 0 && begin < end {
value := s[begin:end]
m.Add(key, value)
}
key = ""
begin = i + 1
end = begin
default:
end = i + 1
}
}
if len(key) > 0 && begin < end {
m.Add(key, s[begin:end])
}
}
return nil
}
func signature(secret, key, expiration, value string) string {
hm := hmac.NewSHA1([]byte(secret))
io.WriteString(hm, key)
hm.Write([]byte{0})
io.WriteString(hm, expiration)
hm.Write([]byte{0})
io.WriteString(hm, value)
return hex.EncodeToString(hm.Sum())
}
// SignValue returns a string containing value, an expiration time and a
// signature. The expiration time is computed from the current time and
// maxAgeSeconds. The signature is an HMAC SHA-1 signature of value, context
// and the expiration time. Use the function VerifyValue to extract the value,
// check the expiration time and verify the signature.
//
// SignValue can be used to store credentials in a cookie:
//
// var secret string // Initialized by application
// const uidCookieMaxAge = 3600 * 24 * 30
//
// // uidCookieValue returns the Set-Cookie header value containing a
// // signed and timestamped user id.
// func uidCookieValue(uid string) string {
// s := web.SignValue(secret, "uid", uidCookieMaxAge, uid)
// return web.NewCookie("uid", s).MaxAge(uidCookieMaxAge).String()
// }
//
// // requestUid returns the user id from the request cookie. An error
// // is returned if the cookie is missing, the value has expired or the
// // signature is not valid.
// func requestUid(req *web.Request) (string, os.Error) {
// return web.VerifyValue(secret, "uid", req.Cookie.Get("uid"))
// }
func SignValue(secret, context string, maxAgeSeconds int, value string) string {
expiration := strconv.Itob64(time.Seconds()+int64(maxAgeSeconds), 16)
sig := signature(secret, context, expiration, value)
return sig + "~" + expiration + "~" + value
}
var errVerificationFailure = os.NewError("verification failed")
// VerifyValue extracts a value from a string created by SignValue. An error is
// returned if the expiration time has elapsed or the signature is not correct.
func VerifyValue(secret, context string, signedValue string) (string, os.Error) {
a := strings.Split(signedValue, "~", 3)
if len(a) != 3 {
return "", errVerificationFailure
}
expiration, err := strconv.Btoi64(a[1], 16)
if err != nil || expiration < time.Seconds() {
return "", errVerificationFailure
}
expectedSig := signature(secret, context, a[1], a[2])
actualSig := a[0]
if len(actualSig) != len(expectedSig) {
return "", errVerificationFailure
}
// Time independent compare
eq := 0
for i := 0; i < len(actualSig); i++ {
eq = eq | (int(actualSig[i]) ^ int(expectedSig[i]))
}
if eq != 0 {
return "", errVerificationFailure
}
return a[2], nil
}
// Cookie is a helper for constructing Set-Cookie header values.
//
// Cookie supports the ancient Netscape draft specification for cookies
// (http://goo.gl/1WSx3) and the modern HttpOnly attribute
// (http://www.owasp.org/index.php/HttpOnly). Cookie does not attempt to
// support any RFC for cookies because the RFCs are not supported by popular
// browsers.
//
// As a convenience, the NewCookie function returns a cookie with the path
// attribute set to "/" and the httponly attribute set to true.
//
// The following example shows how to set a cookie header using Cookie:
//
// func myHandler(req *web.Request) {
// c := web.NewCookie("my-cookie-name", "my-cookie-value").String()
// w := req.Respond(web.StatusOK, web.HeaderSetCookie, c)
// io.WriteString(w, "<html><body>Hello</body></html>")
// }
type Cookie struct {
name string
value string
path string
domain string
maxAge int
secure bool
httpOnly bool
}
// NewCookie returns a new cookie with the given name and value, the path
// attribute set to "/" and the httponly attribute set to true.
func NewCookie(name, value string) *Cookie {
return &Cookie{name: name, value: value, path: "/", httpOnly: true
|
StatusMovedPermanently: "Moved Permanently",
StatusFound: "Found",
StatusSeeOther: "See Other",
|
random_line_split
|
ABCA_topK.py
|
""" returns the vertices of a graph """
return list(self.__graph_dict.keys())
def edges(self):
""" returns the edges of a graph """
return self.__generate_edges()
def num_vertices(self):
""" returns the number of vertices of a graph """
return len(self.__graph_dict.keys())
def num_edges(self):
""" returns the number of edges of a graph """
return len(self.__generate_edges())
def add_vertex(self, vertex):
""" If the vertex "vertex" is not in
self.__graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.__graph_dict:
self.__graph_dict[vertex] = {}
def delete_vertex(self,vertex):
if vertex not in self.__graph_dict.keys():
print("The vertex is not in the graph")
else:
for node in self.__graph_dict[vertex]:
self.__graph_dict[node].remove(vertex)
self.__graph_dict.pop(vertex)
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]:
return
self.__graph_dict[vertex1].add(vertex2)
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 not in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 in self.__graph_dict.keys() and vertex2 not in self.__graph_dict.keys():
self.__graph_dict[vertex2] = {vertex1}
self.__graph_dict[vertex1].add(vertex2)
else:
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2] = {vertex1}
def delete_edge(self, edge):
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict[vertex1]:
self.__graph_dict[vertex1].remove(vertex2)
#if vertex2 in self.__graph_dict.keys() and vertex1 in self.__graph_dict[vertex2]:
self.__graph_dict[vertex2].remove(vertex1)
else:
print("This edge is not in the graph.")
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbor in self.__graph_dict[vertex]:
if {neighbor, vertex} not in edges:
edges.append({vertex, neighbor})
return edges
# the bfs_dict need to be renewed every time the node changed in graph
def bfs(self, vertex_s):
"""
use bfs explore graph from a single vertex
return a shortest path tree from that vertex
"""
nd_list = list(self.vertices())
visited = dict((node, 0) for node in nd_list)
nq = deque()
pre_dict, dist = {}, {}
nq.append(vertex_s)
visited[vertex_s]=1
dist[vertex_s] = 0
loop_counts = 0
while nq:
s = nq.popleft()
for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'
loop_counts += 1
#if not node in visited:
if not visited[node]:
nq.append(node) # let 'node' in queue
pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'
dist[node] = dist[s] + 1 # shortest path to 'root'
visited[node]=1 # 'node' is visted
#if node in visited and dist[node] == dist[s] + 1: # still within the shortest path
if visited[node] and dist[node] == dist[s] + 1: # still within the shortest path
if s not in pre_dict[node]: # if this path have NOT been recorded, let's do that now
pre_dict[node].append(s)
if visited[node] and dist[node] > dist[s] + 1: # the previous 'recorded' path is longer than our current path (via node 's'); let's update that path and distance
pre_dict[node] = [s]
dist[node] = dist[s] + 1
#print(" #loops: %d" %loop_counts)
#current_bfs[vertex_s] = pre_dict
return pre_dict
def read_edgelist(self, file):
f = open(file, 'r')
while True:
line = f.readline()
if not line:
break
v1, v2 = line.strip().split()
if v1 != v2: # no self loop
self.add_edge({v1,v2})
def is_connect(self, s, t):
#current_bfs = dict()
pre_map = self.bfs(s)
if t in pre_map:
return [True, pre_map]
return[False, pre_map]
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
class Node(object):
"""Generic tree."""
def __init__(self, name='', children=None):
self.name = name
if children is None:
children = []
self.children = children
def add_child(self, child):
self.children.append(child)
####not in graph class#############
def bfs_counting(graph, root_vertex, bottom_vertex): # perform analysis twice: 1) set root_vertex = 't'; 2) set root_vertex = 's'
"""
use bfs explore graph from a single vertex
return a shortest path tree from that vertex
"""
#visited = dict()
nd_list = graph.keys()
visited = dict((node, 0) for node in nd_list)
visited[bottom_vertex]=0
nq = deque()# queue for recording current nodes
pre_dict, dist, parents, node_count_dict = {}, {}, {}, {}
nq.append(root_vertex)
visited[root_vertex]=1
dist[root_vertex] = 0
parents[root_vertex]=['fake_root']
node_count_dict['fake_root']=1
while nq:
s = nq.popleft() # dequeue
node_count_dict[s] = 0
for p in parents[s]: # count is defined as the sum of counts from all parents
node_count_dict[s] += node_count_dict[p]
#for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'
if not s in graph.keys():
continue
for node in graph[s]:
#if not node in visited:
if not visited[node]:
nq.append(node) # let 'node' in queue
pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'
dist[node] = dist[s] + 1 # shortest path to 'root'
visited[node]=1 # 'node' is visted
parents[node]=[s] # record 'parents' of this node
else:
parents[node].append(s) # record 'parents' of this node
pre_dict[node].append(s)
node_count_dict.pop('fake_root')
return [pre_dict, node_count_dict] # two returns: 1) tree; 2) node count dictionary
def dfs(root, total_count):
#visited = []
leaf_count = dict()
#total_count = dict()
dfs_helper(root, leaf_count, total_count)
n = leaf_count['root']
for k in total_count.keys():
total_count[k] = total_count[k]/n
return total_count
def dfs_helper(v, leaf_count, total_count):
# Set current to root of binary tree
#visited.append(v.name)
if len(v.children) == 0:
leaf_count[v.name] = 1
else:
leaf_count[v.name] = 0
for nd in v.children:
#print(nd.name)
dfs_helper(nd, leaf_count, total_count)
leaf_count[v.name] += leaf_count[nd.name]
#print(leaf_count)
total_count[nd.name] += leaf_count[nd.name]
#print(total_count)
return
def add_branch(tree_map, current_node, total_count):
|
return self.__bfs_dict
def vertices(self):
|
random_line_split
|
|
ABCA_topK.py
|
graph_dict[vertex]:
self.__graph_dict[node].remove(vertex)
self.__graph_dict.pop(vertex)
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]:
return
self.__graph_dict[vertex1].add(vertex2)
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 not in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 in self.__graph_dict.keys() and vertex2 not in self.__graph_dict.keys():
self.__graph_dict[vertex2] = {vertex1}
self.__graph_dict[vertex1].add(vertex2)
else:
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2] = {vertex1}
def delete_edge(self, edge):
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict[vertex1]:
self.__graph_dict[vertex1].remove(vertex2)
#if vertex2 in self.__graph_dict.keys() and vertex1 in self.__graph_dict[vertex2]:
self.__graph_dict[vertex2].remove(vertex1)
else:
print("This edge is not in the graph.")
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbor in self.__graph_dict[vertex]:
if {neighbor, vertex} not in edges:
edges.append({vertex, neighbor})
return edges
# the bfs_dict need to be renewed every time the node changed in graph
def bfs(self, vertex_s):
"""
use bfs explore graph from a single vertex
return a shortest path tree from that vertex
"""
nd_list = list(self.vertices())
visited = dict((node, 0) for node in nd_list)
nq = deque()
pre_dict, dist = {}, {}
nq.append(vertex_s)
visited[vertex_s]=1
dist[vertex_s] = 0
loop_counts = 0
while nq:
s = nq.popleft()
for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'
loop_counts += 1
#if not node in visited:
if not visited[node]:
nq.append(node) # let 'node' in queue
pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'
dist[node] = dist[s] + 1 # shortest path to 'root'
visited[node]=1 # 'node' is visted
#if node in visited and dist[node] == dist[s] + 1: # still within the shortest path
if visited[node] and dist[node] == dist[s] + 1: # still within the shortest path
if s not in pre_dict[node]: # if this path have NOT been recorded, let's do that now
pre_dict[node].append(s)
if visited[node] and dist[node] > dist[s] + 1: # the previous 'recorded' path is longer than our current path (via node 's'); let's update that path and distance
pre_dict[node] = [s]
dist[node] = dist[s] + 1
#print(" #loops: %d" %loop_counts)
#current_bfs[vertex_s] = pre_dict
return pre_dict
def read_edgelist(self, file):
f = open(file, 'r')
while True:
line = f.readline()
if not line:
break
v1, v2 = line.strip().split()
if v1 != v2: # no self loop
self.add_edge({v1,v2})
def is_connect(self, s, t):
#current_bfs = dict()
pre_map = self.bfs(s)
if t in pre_map:
return [True, pre_map]
return[False, pre_map]
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
class Node(object):
"""Generic tree."""
def __init__(self, name='', children=None):
self.name = name
if children is None:
children = []
self.children = children
def add_child(self, child):
self.children.append(child)
####not in graph class#############
def bfs_counting(graph, root_vertex, bottom_vertex): # perform analysis twice: 1) set root_vertex = 't'; 2) set root_vertex = 's'
"""
use bfs explore graph from a single vertex
return a shortest path tree from that vertex
"""
#visited = dict()
nd_list = graph.keys()
visited = dict((node, 0) for node in nd_list)
visited[bottom_vertex]=0
nq = deque()# queue for recording current nodes
pre_dict, dist, parents, node_count_dict = {}, {}, {}, {}
nq.append(root_vertex)
visited[root_vertex]=1
dist[root_vertex] = 0
parents[root_vertex]=['fake_root']
node_count_dict['fake_root']=1
while nq:
s = nq.popleft() # dequeue
node_count_dict[s] = 0
for p in parents[s]: # count is defined as the sum of counts from all parents
node_count_dict[s] += node_count_dict[p]
#for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'
if not s in graph.keys():
continue
for node in graph[s]:
#if not node in visited:
if not visited[node]:
nq.append(node) # let 'node' in queue
pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'
dist[node] = dist[s] + 1 # shortest path to 'root'
visited[node]=1 # 'node' is visted
parents[node]=[s] # record 'parents' of this node
else:
parents[node].append(s) # record 'parents' of this node
pre_dict[node].append(s)
node_count_dict.pop('fake_root')
return [pre_dict, node_count_dict] # two returns: 1) tree; 2) node count dictionary
def dfs(root, total_count):
#visited = []
leaf_count = dict()
#total_count = dict()
dfs_helper(root, leaf_count, total_count)
n = leaf_count['root']
for k in total_count.keys():
total_count[k] = total_count[k]/n
return total_count
def dfs_helper(v, leaf_count, total_count):
# Set current to root of binary tree
#visited.append(v.name)
if len(v.children) == 0:
leaf_count[v.name] = 1
else:
leaf_count[v.name] = 0
for nd in v.children:
#print(nd.name)
dfs_helper(nd, leaf_count, total_count)
leaf_count[v.name] += leaf_count[nd.name]
#print(leaf_count)
total_count[nd.name] += leaf_count[nd.name]
#print(total_count)
return
def add_branch(tree_map, current_node, total_count):
|
def set_m(graph, eta):
m = int(math.log2((graph.num_vertices()**2)/(eta**2)))
print("m = %d" %m)
return m
#@jit
def cal_bc(graph, m, s_list, t_list): # m must be much smaller than the number of edges
nd_list = list(graph.vertices())
bc_dict = dict((node, 0) for node in nd_list)
for i in range(m):
#ndl_copy = copy.copy(nd_list)
print(i)
if len(nd_list) >=2:
s = choice(nd_list)
s_list.add(s)
nd_list.remove(s)
t = choice(nd_list)
|
total_count[current_node.name] = 0
if current_node.name not in tree_map.keys():
return
children = tree_map[current_node.name]
for child in children:
child_node = Node(child)
current_node.add_child(child_node)
add_branch(tree_map, child_node, total_count)
return
|
identifier_body
|
ABCA_topK.py
|
graph_dict[vertex]:
self.__graph_dict[node].remove(vertex)
self.__graph_dict.pop(vertex)
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]:
return
self.__graph_dict[vertex1].add(vertex2)
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 not in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 in self.__graph_dict.keys() and vertex2 not in self.__graph_dict.keys():
self.__graph_dict[vertex2] = {vertex1}
self.__graph_dict[vertex1].add(vertex2)
else:
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2] = {vertex1}
def delete_edge(self, edge):
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict[vertex1]:
self.__graph_dict[vertex1].remove(vertex2)
#if vertex2 in self.__graph_dict.keys() and vertex1 in self.__graph_dict[vertex2]:
self.__graph_dict[vertex2].remove(vertex1)
else:
print("This edge is not in the graph.")
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbor in self.__graph_dict[vertex]:
if {neighbor, vertex} not in edges:
edges.append({vertex, neighbor})
return edges
# the bfs_dict need to be renewed every time the node changed in graph
def bfs(self, vertex_s):
"""
use bfs explore graph from a single vertex
return a shortest path tree from that vertex
"""
nd_list = list(self.vertices())
visited = dict((node, 0) for node in nd_list)
nq = deque()
pre_dict, dist = {}, {}
nq.append(vertex_s)
visited[vertex_s]=1
dist[vertex_s] = 0
loop_counts = 0
while nq:
s = nq.popleft()
for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'
loop_counts += 1
#if not node in visited:
if not visited[node]:
nq.append(node) # let 'node' in queue
pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'
dist[node] = dist[s] + 1 # shortest path to 'root'
visited[node]=1 # 'node' is visted
#if node in visited and dist[node] == dist[s] + 1: # still within the shortest path
if visited[node] and dist[node] == dist[s] + 1: # still within the shortest path
if s not in pre_dict[node]: # if this path have NOT been recorded, let's do that now
pre_dict[node].append(s)
if visited[node] and dist[node] > dist[s] + 1: # the previous 'recorded' path is longer than our current path (via node 's'); let's update that path and distance
pre_dict[node] = [s]
dist[node] = dist[s] + 1
#print(" #loops: %d" %loop_counts)
#current_bfs[vertex_s] = pre_dict
return pre_dict
def read_edgelist(self, file):
f = open(file, 'r')
while True:
line = f.readline()
if not line:
break
v1, v2 = line.strip().split()
if v1 != v2: # no self loop
self.add_edge({v1,v2})
def
|
(self, s, t):
#current_bfs = dict()
pre_map = self.bfs(s)
if t in pre_map:
return [True, pre_map]
return[False, pre_map]
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
class Node(object):
"""Generic tree."""
def __init__(self, name='', children=None):
self.name = name
if children is None:
children = []
self.children = children
def add_child(self, child):
self.children.append(child)
####not in graph class#############
def bfs_counting(graph, root_vertex, bottom_vertex): # perform analysis twice: 1) set root_vertex = 't'; 2) set root_vertex = 's'
"""
use bfs explore graph from a single vertex
return a shortest path tree from that vertex
"""
#visited = dict()
nd_list = graph.keys()
visited = dict((node, 0) for node in nd_list)
visited[bottom_vertex]=0
nq = deque()# queue for recording current nodes
pre_dict, dist, parents, node_count_dict = {}, {}, {}, {}
nq.append(root_vertex)
visited[root_vertex]=1
dist[root_vertex] = 0
parents[root_vertex]=['fake_root']
node_count_dict['fake_root']=1
while nq:
s = nq.popleft() # dequeue
node_count_dict[s] = 0
for p in parents[s]: # count is defined as the sum of counts from all parents
node_count_dict[s] += node_count_dict[p]
#for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'
if not s in graph.keys():
continue
for node in graph[s]:
#if not node in visited:
if not visited[node]:
nq.append(node) # let 'node' in queue
pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'
dist[node] = dist[s] + 1 # shortest path to 'root'
visited[node]=1 # 'node' is visted
parents[node]=[s] # record 'parents' of this node
else:
parents[node].append(s) # record 'parents' of this node
pre_dict[node].append(s)
node_count_dict.pop('fake_root')
return [pre_dict, node_count_dict] # two returns: 1) tree; 2) node count dictionary
def dfs(root, total_count):
#visited = []
leaf_count = dict()
#total_count = dict()
dfs_helper(root, leaf_count, total_count)
n = leaf_count['root']
for k in total_count.keys():
total_count[k] = total_count[k]/n
return total_count
def dfs_helper(v, leaf_count, total_count):
# Set current to root of binary tree
#visited.append(v.name)
if len(v.children) == 0:
leaf_count[v.name] = 1
else:
leaf_count[v.name] = 0
for nd in v.children:
#print(nd.name)
dfs_helper(nd, leaf_count, total_count)
leaf_count[v.name] += leaf_count[nd.name]
#print(leaf_count)
total_count[nd.name] += leaf_count[nd.name]
#print(total_count)
return
def add_branch(tree_map, current_node, total_count):
total_count[current_node.name] = 0
if current_node.name not in tree_map.keys():
return
children = tree_map[current_node.name]
for child in children:
child_node = Node(child)
current_node.add_child(child_node)
add_branch(tree_map, child_node, total_count)
return
def set_m(graph, eta):
m = int(math.log2((graph.num_vertices()**2)/(eta**2)))
print("m = %d" %m)
return m
#@jit
def cal_bc(graph, m, s_list, t_list): # m must be much smaller than the number of edges
nd_list = list(graph.vertices())
bc_dict = dict((node, 0) for node in nd_list)
for i in range(m):
#ndl_copy = copy.copy(nd_list)
print(i)
if len(nd_list) >=2:
s = choice(nd_list)
s_list.add(s)
nd_list.remove(s)
t = choice(nd_list)
|
is_connect
|
identifier_name
|
ABCA_topK.py
|
graph_dict[vertex]:
self.__graph_dict[node].remove(vertex)
self.__graph_dict.pop(vertex)
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]:
return
self.__graph_dict[vertex1].add(vertex2)
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 not in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 in self.__graph_dict.keys() and vertex2 not in self.__graph_dict.keys():
self.__graph_dict[vertex2] = {vertex1}
self.__graph_dict[vertex1].add(vertex2)
else:
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2] = {vertex1}
def delete_edge(self, edge):
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict[vertex1]:
self.__graph_dict[vertex1].remove(vertex2)
#if vertex2 in self.__graph_dict.keys() and vertex1 in self.__graph_dict[vertex2]:
self.__graph_dict[vertex2].remove(vertex1)
else:
print("This edge is not in the graph.")
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbor in self.__graph_dict[vertex]:
if {neighbor, vertex} not in edges:
edges.append({vertex, neighbor})
return edges
# the bfs_dict need to be renewed every time the node changed in graph
def bfs(self, vertex_s):
"""
use bfs explore graph from a single vertex
return a shortest path tree from that vertex
"""
nd_list = list(self.vertices())
visited = dict((node, 0) for node in nd_list)
nq = deque()
pre_dict, dist = {}, {}
nq.append(vertex_s)
visited[vertex_s]=1
dist[vertex_s] = 0
loop_counts = 0
while nq:
s = nq.popleft()
for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'
loop_counts += 1
#if not node in visited:
if not visited[node]:
nq.append(node) # let 'node' in queue
pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'
dist[node] = dist[s] + 1 # shortest path to 'root'
visited[node]=1 # 'node' is visted
#if node in visited and dist[node] == dist[s] + 1: # still within the shortest path
if visited[node] and dist[node] == dist[s] + 1: # still within the shortest path
if s not in pre_dict[node]: # if this path have NOT been recorded, let's do that now
pre_dict[node].append(s)
if visited[node] and dist[node] > dist[s] + 1: # the previous 'recorded' path is longer than our current path (via node 's'); let's update that path and distance
pre_dict[node] = [s]
dist[node] = dist[s] + 1
#print(" #loops: %d" %loop_counts)
#current_bfs[vertex_s] = pre_dict
return pre_dict
def read_edgelist(self, file):
f = open(file, 'r')
while True:
line = f.readline()
if not line:
break
v1, v2 = line.strip().split()
if v1 != v2: # no self loop
self.add_edge({v1,v2})
def is_connect(self, s, t):
#current_bfs = dict()
pre_map = self.bfs(s)
if t in pre_map:
return [True, pre_map]
return[False, pre_map]
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
class Node(object):
"""Generic tree."""
def __init__(self, name='', children=None):
self.name = name
if children is None:
children = []
self.children = children
def add_child(self, child):
self.children.append(child)
####not in graph class#############
def bfs_counting(graph, root_vertex, bottom_vertex): # perform analysis twice: 1) set root_vertex = 't'; 2) set root_vertex = 's'
"""
use bfs explore graph from a single vertex
return a shortest path tree from that vertex
"""
#visited = dict()
nd_list = graph.keys()
visited = dict((node, 0) for node in nd_list)
visited[bottom_vertex]=0
nq = deque()# queue for recording current nodes
pre_dict, dist, parents, node_count_dict = {}, {}, {}, {}
nq.append(root_vertex)
visited[root_vertex]=1
dist[root_vertex] = 0
parents[root_vertex]=['fake_root']
node_count_dict['fake_root']=1
while nq:
s = nq.popleft() # dequeue
node_count_dict[s] = 0
for p in parents[s]: # count is defined as the sum of counts from all parents
node_count_dict[s] += node_count_dict[p]
#for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'
if not s in graph.keys():
continue
for node in graph[s]:
#if not node in visited:
if not visited[node]:
|
else:
parents[node].append(s) # record 'parents' of this node
pre_dict[node].append(s)
node_count_dict.pop('fake_root')
return [pre_dict, node_count_dict] # two returns: 1) tree; 2) node count dictionary
def dfs(root, total_count):
#visited = []
leaf_count = dict()
#total_count = dict()
dfs_helper(root, leaf_count, total_count)
n = leaf_count['root']
for k in total_count.keys():
total_count[k] = total_count[k]/n
return total_count
def dfs_helper(v, leaf_count, total_count):
# Set current to root of binary tree
#visited.append(v.name)
if len(v.children) == 0:
leaf_count[v.name] = 1
else:
leaf_count[v.name] = 0
for nd in v.children:
#print(nd.name)
dfs_helper(nd, leaf_count, total_count)
leaf_count[v.name] += leaf_count[nd.name]
#print(leaf_count)
total_count[nd.name] += leaf_count[nd.name]
#print(total_count)
return
def add_branch(tree_map, current_node, total_count):
total_count[current_node.name] = 0
if current_node.name not in tree_map.keys():
return
children = tree_map[current_node.name]
for child in children:
child_node = Node(child)
current_node.add_child(child_node)
add_branch(tree_map, child_node, total_count)
return
def set_m(graph, eta):
m = int(math.log2((graph.num_vertices()**2)/(eta**2)))
print("m = %d" %m)
return m
#@jit
def cal_bc(graph, m, s_list, t_list): # m must be much smaller than the number of edges
nd_list = list(graph.vertices())
bc_dict = dict((node, 0) for node in nd_list)
for i in range(m):
#ndl_copy = copy.copy(nd_list)
print(i)
if len(nd_list) >=2:
s = choice(nd_list)
s_list.add(s)
nd_list.remove(s)
t = choice(nd_list)
|
nq.append(node) # let 'node' in queue
pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'
dist[node] = dist[s] + 1 # shortest path to 'root'
visited[node]=1 # 'node' is visted
parents[node]=[s] # record 'parents' of this node
|
conditional_block
|
zbdsqr.go
|
.Off(n-1), u); err != nil {
panic(err)
}
}
if ncc > 0 {
if err = Zlasr(Left, 'V', 'F', n, ncc, rwork.Off(0), rwork.Off(n-1), c); err != nil {
panic(err)
}
}
}
// Compute singular values to relative accuracy TOL
// (By setting TOL to be negative, algorithm will compute
// singular values to absolute accuracy ABS(TOL)*norm(input matrix))
tolmul = math.Max(ten, math.Min(hndrd, math.Pow(eps, meigth)))
tol = tolmul * eps
// Compute approximate maximum, minimum singular values
smax = zero
for i = 1; i <= n; i++ {
smax = math.Max(smax, math.Abs(d.Get(i-1)))
}
for i = 1; i <= n-1; i++ {
smax = math.Max(smax, math.Abs(e.Get(i-1)))
}
sminl = zero
if tol >= zero {
// Relative accuracy desired
sminoa = math.Abs(d.Get(0))
if sminoa == zero {
goto label50
}
mu = sminoa
for i = 2; i <= n; i++ {
mu = math.Abs(d.Get(i-1)) * (mu / (mu + math.Abs(e.Get(i-1-1))))
sminoa = math.Min(sminoa, mu)
if sminoa == zero {
goto label50
}
}
label50:
;
sminoa = sminoa / math.Sqrt(float64(n))
thresh = math.Max(tol*sminoa, float64(maxitr*n*n)*unfl)
} else {
// Absolute accuracy desired
thresh = math.Max(math.Abs(tol)*smax, float64(maxitr*n*n)*unfl)
}
// Prepare for main iteration loop for the singular values
// (MAXIT is the maximum number of passes through the inner
// loop permitted before nonconvergence signalled.)
maxit = maxitr * n * n
iter = 0
oldll = -1
oldm = -1
// M points to last element of unconverged part of matrix
m = n
// Begin main iteration loop
label60:
;
// Check for convergence or exceeding iteration count
if m <= 1 {
goto label160
}
if iter > maxit {
goto label200
}
// Find diagonal block of matrix to work on
if tol < zero && math.Abs(d.Get(m-1)) <= thresh {
d.Set(m-1, zero)
}
smax = math.Abs(d.Get(m - 1))
smin = smax
for lll = 1; lll <= m-1; lll++ {
ll = m - lll
abss = math.Abs(d.Get(ll - 1))
abse = math.Abs(e.Get(ll - 1))
if tol < zero && abss <= thresh {
d.Set(ll-1, zero)
}
if abse <= thresh {
goto label80
}
smin = math.Min(smin, abss)
smax = math.Max(smax, math.Max(abss, abse))
}
ll = 0
goto label90
label80:
;
e.Set(ll-1, zero)
// Matrix splits since E(LL) = 0
if ll == m-1 {
// Convergence of bottom singular value, return to top of loop
m = m - 1
goto label60
}
label90:
;
ll = ll + 1
// E(LL) through E(M-1) are nonzero, E(LL-1) is zero
if ll == m-1 {
// 2 by 2 block, handle separately
sigmn, sigmx, sinr, cosr, sinl, cosl = Dlasv2(d.Get(m-1-1), e.Get(m-1-1), d.Get(m-1))
d.Set(m-1-1, sigmx)
e.Set(m-1-1, zero)
d.Set(m-1, sigmn)
// Compute singular vectors, if desired
if ncvt > 0 {
vt.Off(m-1, 0).CVector().Drot(ncvt, vt.Off(m-1-1, 0).CVector(), vt.Rows, vt.Rows, cosr, sinr)
}
if nru > 0 {
u.Off(0, m-1).CVector().Drot(nru, u.Off(0, m-1-1).CVector(), 1, 1, cosl, sinl)
}
if ncc > 0 {
c.Off(m-1, 0).CVector().Drot(ncc, c.Off(m-1-1, 0).CVector(), c.Rows, c.Rows, cosl, sinl)
}
m = m - 2
goto label60
}
// If working on new submatrix, choose shift direction
// (from larger end diagonal element towards smaller)
if ll > oldm || m < oldll {
if math.Abs(d.Get(ll-1)) >= math.Abs(d.Get(m-1)) {
// Chase bulge from top (big end) to bottom (small end)
idir = 1
} else {
// Chase bulge from bottom (big end) to top (small end)
idir = 2
}
}
// Apply convergence tests
if idir == 1 {
// Run convergence test in forward direction
// First apply standard test to bottom of matrix
if math.Abs(e.Get(m-1-1)) <= math.Abs(tol)*math.Abs(d.Get(m-1)) || (tol < zero && math.Abs(e.Get(m-1-1)) <= thresh) {
e.Set(m-1-1, zero)
goto label60
}
if tol >= zero {
// If relative accuracy desired,
// apply convergence criterion forward
mu = math.Abs(d.Get(ll - 1))
sminl = mu
for lll = ll; lll <= m-1; lll++ {
if math.Abs(e.Get(lll-1)) <= tol*mu {
e.Set(lll-1, zero)
goto label60
}
mu = math.Abs(d.Get(lll)) * (mu / (mu + math.Abs(e.Get(lll-1))))
sminl = math.Min(sminl, mu)
}
}
} else {
// Run convergence test in backward direction
// First apply standard test to top of matrix
if math.Abs(e.Get(ll-1)) <= math.Abs(tol)*math.Abs(d.Get(ll-1)) || (tol < zero && math.Abs(e.Get(ll-1)) <= thresh) {
e.Set(ll-1, zero)
goto label60
}
if tol >= zero {
// If relative accuracy desired,
// apply convergence criterion backward
mu = math.Abs(d.Get(m - 1))
sminl = mu
for lll = m - 1; lll >= ll; lll-- {
if math.Abs(e.Get(lll-1)) <= tol*mu {
e.Set(lll-1, zero)
goto label60
}
mu = math.Abs(d.Get(lll-1)) * (mu / (mu + math.Abs(e.Get(lll-1))))
sminl = math.Min(sminl, mu)
}
}
}
oldll = ll
oldm = m
// Compute shift. First, test if shifting would ruin relative
// accuracy, and if so set the shift to zero.
if tol >= zero && float64(n)*tol*(sminl/smax) <= math.Max(eps, hndrth*tol) {
// Use a zero shift to avoid loss of relative accuracy
shift = zero
} else {
// Compute the shift from 2-by-2 block at end of matrix
if idir == 1 {
sll = math.Abs(d.Get(ll - 1))
shift, r = Dlas2(d.Get(m-1-1), e.Get(m-1-1), d.Get(m-1))
} else {
sll = math.Abs(d.Get(m - 1))
shift, r = Dlas2(d.Get(ll-1), e.Get(ll-1), d.Get(ll))
}
// Test if shift negligible, and if so set to zero
if sll > zero {
if math.Pow(shift/sll, 2) < eps {
|
shift = zero
|
random_line_split
|
|
zbdsqr.go
|
} else if ncvt < 0 {
err = fmt.Errorf("ncvt < 0: ncvt=%v", ncvt)
} else if nru < 0 {
err = fmt.Errorf("nru < 0: nru=%v", nru)
} else if ncc < 0 {
err = fmt.Errorf("ncc < 0: ncc=%v", ncc)
} else if (ncvt == 0 && vt.Rows < 1) || (ncvt > 0 && vt.Rows < max(1, n)) {
err = fmt.Errorf("(ncvt == 0 && vt.Rows < 1) || (ncvt > 0 && vt.Rows < max(1, n)): ncvt=%v, vt.Rows=%v, n=%v", ncvt, vt.Rows, n)
} else if u.Rows < max(1, nru) {
err = fmt.Errorf("u.Rows < max(1, nru): u.Rows=%v, nru=%v", u.Rows, nru)
} else if (ncc == 0 && c.Rows < 1) || (ncc > 0 && c.Rows < max(1, n)) {
err = fmt.Errorf("(ncc == 0 && c.Rows < 1) || (ncc > 0 && c.Rows < max(1, n)): ncc=%v, c.Rows=%v, n=%v", ncc, c.Rows, n)
}
if err != nil {
gltest.Xerbla2("Zbdsqr", err)
return
}
if n == 0 {
return
}
if n == 1 {
goto label160
}
// ROTATE is true if any singular vectors desired, false otherwise
rotate = (ncvt > 0) || (nru > 0) || (ncc > 0)
// If no singular vectors desired, use qd algorithm
if !rotate {
if info, err = Dlasq1(n, d, e, rwork); err != nil {
panic(err)
}
// If INFO equals 2, dqds didn't finish, try to finish
if info != 2 {
return
}
info = 0
}
nm1 = n - 1
nm12 = nm1 + nm1
nm13 = nm12 + nm1
idir = 0
// Get machine constants
eps = Dlamch(Epsilon)
unfl = Dlamch(SafeMinimum)
// If matrix lower bidiagonal, rotate to be upper bidiagonal
// by applying Givens rotations on the left
if lower {
for i = 1; i <= n-1; i++ {
cs, sn, r = Dlartg(d.Get(i-1), e.Get(i-1))
d.Set(i-1, r)
e.Set(i-1, sn*d.Get(i))
d.Set(i, cs*d.Get(i))
rwork.Set(i-1, cs)
rwork.Set(nm1+i-1, sn)
}
// Update singular vectors if desired
if nru > 0 {
if err = Zlasr(Right, 'V', 'F', nru, n, rwork.Off(0), rwork.Off(n-1), u); err != nil {
panic(err)
}
}
if ncc > 0 {
if err = Zlasr(Left, 'V', 'F', n, ncc, rwork.Off(0), rwork.Off(n-1), c); err != nil {
panic(err)
}
}
}
// Compute singular values to relative accuracy TOL
// (By setting TOL to be negative, algorithm will compute
// singular values to absolute accuracy ABS(TOL)*norm(input matrix))
tolmul = math.Max(ten, math.Min(hndrd, math.Pow(eps, meigth)))
tol = tolmul * eps
// Compute approximate maximum, minimum singular values
smax = zero
for i = 1; i <= n; i++ {
smax = math.Max(smax, math.Abs(d.Get(i-1)))
}
for i = 1; i <= n-1; i++ {
smax = math.Max(smax, math.Abs(e.Get(i-1)))
}
sminl = zero
if tol >= zero {
// Relative accuracy desired
sminoa = math.Abs(d.Get(0))
if sminoa == zero {
goto label50
}
mu = sminoa
for i = 2; i <= n; i++ {
mu = math.Abs(d.Get(i-1)) * (mu / (mu + math.Abs(e.Get(i-1-1))))
sminoa = math.Min(sminoa, mu)
if sminoa == zero {
goto label50
}
}
label50:
;
sminoa = sminoa / math.Sqrt(float64(n))
thresh = math.Max(tol*sminoa, float64(maxitr*n*n)*unfl)
} else {
// Absolute accuracy desired
thresh = math.Max(math.Abs(tol)*smax, float64(maxitr*n*n)*unfl)
}
// Prepare for main iteration loop for the singular values
// (MAXIT is the maximum number of passes through the inner
// loop permitted before nonconvergence signalled.)
maxit = maxitr * n * n
iter = 0
oldll = -1
oldm = -1
// M points to last element of unconverged part of matrix
m = n
// Begin main iteration loop
label60:
;
// Check for convergence or exceeding iteration count
if m <= 1 {
goto label160
}
if iter > maxit {
goto label200
}
// Find diagonal block of matrix to work on
if tol < zero && math.Abs(d.Get(m-1)) <= thresh {
d.Set(m-1, zero)
}
smax = math.Abs(d.Get(m - 1))
smin = smax
for lll = 1; lll <= m-1; lll++ {
ll = m - lll
abss = math.Abs(d.Get(ll - 1))
abse = math.Abs(e.Get(ll - 1))
if tol < zero && abss <= thresh {
d.Set(ll-1, zero)
}
if abse <= thresh {
goto label80
}
smin = math.Min(smin, abss)
smax = math.Max(smax, math.Max(abss, abse))
}
ll = 0
goto label90
label80:
;
e.Set(ll-1, zero)
// Matrix splits since E(LL) = 0
if ll == m-1 {
// Convergence of bottom singular value, return to top of loop
m = m - 1
goto label60
}
label90:
;
ll = ll + 1
// E(LL) through E(M-1) are nonzero, E(LL-1) is zero
if ll == m-1 {
// 2 by 2 block, handle separately
sigmn, sigmx, sinr, cosr, sinl, cosl = Dlasv2(d.Get(m-1-1), e.Get(m-1-1), d.Get(m-1))
d.Set(m-1-1, sigmx)
e.Set(m-1-1, zero)
d.Set(m-1, sigmn)
// Compute singular vectors, if desired
if
|
{
var lower, rotate bool
var abse, abss, cosl, cosr, cs, eps, f, g, h, hndrd, hndrth, meigth, mu, negone, oldcs, oldsn, one, r, shift, sigmn, sigmx, sinl, sinr, sll, smax, smin, sminl, sminoa, sn, ten, thresh, tol, tolmul, unfl, zero float64
var i, idir, isub, iter, j, ll, lll, m, maxit, maxitr, nm1, nm12, nm13, oldll, oldm int
zero = 0.0
one = 1.0
negone = -1.0
hndrth = 0.01
ten = 10.0
hndrd = 100.0
meigth = -0.125
maxitr = 6
// Test the input parameters.
lower = uplo == Lower
if uplo != Upper && !lower {
err = fmt.Errorf("uplo != Upper && !lower: uplo=%s", uplo)
} else if n < 0 {
err = fmt.Errorf("n < 0: n=%v", n)
|
identifier_body
|
|
zbdsqr.go
|
(uplo mat.MatUplo, n, ncvt, nru, ncc int, d, e *mat.Vector, vt, u, c *mat.CMatrix, rwork *mat.Vector) (info int, err error) {
var lower, rotate bool
var abse, abss, cosl, cosr, cs, eps, f, g, h, hndrd, hndrth, meigth, mu, negone, oldcs, oldsn, one, r, shift, sigmn, sigmx, sinl, sinr, sll, smax, smin, sminl, sminoa, sn, ten, thresh, tol, tolmul, unfl, zero float64
var i, idir, isub, iter, j, ll, lll, m, maxit, maxitr, nm1, nm12, nm13, oldll, oldm int
zero = 0.0
one = 1.0
negone = -1.0
hndrth = 0.01
ten = 10.0
hndrd = 100.0
meigth = -0.125
maxitr = 6
// Test the input parameters.
lower = uplo == Lower
if uplo != Upper && !lower {
err = fmt.Errorf("uplo != Upper && !lower: uplo=%s", uplo)
} else if n < 0 {
err = fmt.Errorf("n < 0: n=%v", n)
} else if ncvt < 0 {
err = fmt.Errorf("ncvt < 0: ncvt=%v", ncvt)
} else if nru < 0 {
err = fmt.Errorf("nru < 0: nru=%v", nru)
} else if ncc < 0 {
err = fmt.Errorf("ncc < 0: ncc=%v", ncc)
} else if (ncvt == 0 && vt.Rows < 1) || (ncvt > 0 && vt.Rows < max(1, n)) {
err = fmt.Errorf("(ncvt == 0 && vt.Rows < 1) || (ncvt > 0 && vt.Rows < max(1, n)): ncvt=%v, vt.Rows=%v, n=%v", ncvt, vt.Rows, n)
} else if u.Rows < max(1, nru) {
err = fmt.Errorf("u.Rows < max(1, nru): u.Rows=%v, nru=%v", u.Rows, nru)
} else if (ncc == 0 && c.Rows < 1) || (ncc > 0 && c.Rows < max(1, n)) {
err = fmt.Errorf("(ncc == 0 && c.Rows < 1) || (ncc > 0 && c.Rows < max(1, n)): ncc=%v, c.Rows=%v, n=%v", ncc, c.Rows, n)
}
if err != nil {
gltest.Xerbla2("Zbdsqr", err)
return
}
if n == 0 {
return
}
if n == 1 {
goto label160
}
// ROTATE is true if any singular vectors desired, false otherwise
rotate = (ncvt > 0) || (nru > 0) || (ncc > 0)
// If no singular vectors desired, use qd algorithm
if !rotate {
if info, err = Dlasq1(n, d, e, rwork); err != nil {
panic(err)
}
// If INFO equals 2, dqds didn't finish, try to finish
if info != 2 {
return
}
info = 0
}
nm1 = n - 1
nm12 = nm1 + nm1
nm13 = nm12 + nm1
idir = 0
// Get machine constants
eps = Dlamch(Epsilon)
unfl = Dlamch(SafeMinimum)
// If matrix lower bidiagonal, rotate to be upper bidiagonal
// by applying Givens rotations on the left
if lower {
for i = 1; i <= n-1; i++ {
cs, sn, r = Dlartg(d.Get(i-1), e.Get(i-1))
d.Set(i-1, r)
e.Set(i-1, sn*d.Get(i))
d.Set(i, cs*d.Get(i))
rwork.Set(i-1, cs)
rwork.Set(nm1+i-1, sn)
}
// Update singular vectors if desired
if nru > 0 {
if err = Zlasr(Right, 'V', 'F', nru, n, rwork.Off(0), rwork.Off(n-1), u); err != nil {
panic(err)
}
}
if ncc > 0 {
if err = Zlasr(Left, 'V', 'F', n, ncc, rwork.Off(0), rwork.Off(n-1), c); err != nil {
panic(err)
}
}
}
// Compute singular values to relative accuracy TOL
// (By setting TOL to be negative, algorithm will compute
// singular values to absolute accuracy ABS(TOL)*norm(input matrix))
tolmul = math.Max(ten, math.Min(hndrd, math.Pow(eps, meigth)))
tol = tolmul * eps
// Compute approximate maximum, minimum singular values
smax = zero
for i = 1; i <= n; i++ {
smax = math.Max(smax, math.Abs(d.Get(i-1)))
}
for i = 1; i <= n-1; i++ {
smax = math.Max(smax, math.Abs(e.Get(i-1)))
}
sminl = zero
if tol >= zero {
// Relative accuracy desired
sminoa = math.Abs(d.Get(0))
if sminoa == zero {
goto label50
}
mu = sminoa
for i = 2; i <= n; i++ {
mu = math.Abs(d.Get(i-1)) * (mu / (mu + math.Abs(e.Get(i-1-1))))
sminoa = math.Min(sminoa, mu)
if sminoa == zero {
goto label50
}
}
label50:
;
sminoa = sminoa / math.Sqrt(float64(n))
thresh = math.Max(tol*sminoa, float64(maxitr*n*n)*unfl)
} else {
// Absolute accuracy desired
thresh = math.Max(math.Abs(tol)*smax, float64(maxitr*n*n)*unfl)
}
// Prepare for main iteration loop for the singular values
// (MAXIT is the maximum number of passes through the inner
// loop permitted before nonconvergence signalled.)
maxit = maxitr * n * n
iter = 0
oldll = -1
oldm = -1
// M points to last element of unconverged part of matrix
m = n
// Begin main iteration loop
label60:
;
// Check for convergence or exceeding iteration count
if m <= 1 {
goto label160
}
if iter > maxit {
goto label200
}
// Find diagonal block of matrix to work on
if tol < zero && math.Abs(d.Get(m-1)) <= thresh {
d.Set(m-1, zero)
}
smax = math.Abs(d.Get(m - 1))
smin = smax
for lll = 1; lll <= m-1; lll++ {
ll = m - lll
abss = math.Abs(d.Get(ll - 1))
abse = math.Abs(e.Get(ll - 1))
if tol < zero && abss <= thresh {
d.Set(ll-1, zero)
}
if abse <= thresh {
goto label80
}
smin = math.Min(smin, abss)
smax = math.Max(smax, math.Max(abss, abse))
}
ll = 0
goto label90
label80:
;
e.Set(ll-1, zero)
// Matrix splits since E(LL) = 0
if ll == m-1 {
// Convergence of bottom singular value, return to top of loop
m = m - 1
goto label60
}
label90:
;
ll = ll + 1
// E(LL) through E(M-1) are nonzero, E(LL-1) is zero
if ll == m-1 {
// 2 by 2 block, handle separately
sigmn, sigmx, sinr, cosr, sinl, cosl = Dlasv2(d.Get(m-1-1), e.Get(m-1-
|
Zbdsqr
|
identifier_name
|
|
zbdsqr.go
|
64(maxitr*n*n)*unfl)
}
// Prepare for main iteration loop for the singular values
// (MAXIT is the maximum number of passes through the inner
// loop permitted before nonconvergence signalled.)
maxit = maxitr * n * n
iter = 0
oldll = -1
oldm = -1
// M points to last element of unconverged part of matrix
m = n
// Begin main iteration loop
label60:
;
// Check for convergence or exceeding iteration count
if m <= 1 {
goto label160
}
if iter > maxit {
goto label200
}
// Find diagonal block of matrix to work on
if tol < zero && math.Abs(d.Get(m-1)) <= thresh {
d.Set(m-1, zero)
}
smax = math.Abs(d.Get(m - 1))
smin = smax
for lll = 1; lll <= m-1; lll++ {
ll = m - lll
abss = math.Abs(d.Get(ll - 1))
abse = math.Abs(e.Get(ll - 1))
if tol < zero && abss <= thresh {
d.Set(ll-1, zero)
}
if abse <= thresh {
goto label80
}
smin = math.Min(smin, abss)
smax = math.Max(smax, math.Max(abss, abse))
}
ll = 0
goto label90
label80:
;
e.Set(ll-1, zero)
// Matrix splits since E(LL) = 0
if ll == m-1 {
// Convergence of bottom singular value, return to top of loop
m = m - 1
goto label60
}
label90:
;
ll = ll + 1
// E(LL) through E(M-1) are nonzero, E(LL-1) is zero
if ll == m-1 {
// 2 by 2 block, handle separately
sigmn, sigmx, sinr, cosr, sinl, cosl = Dlasv2(d.Get(m-1-1), e.Get(m-1-1), d.Get(m-1))
d.Set(m-1-1, sigmx)
e.Set(m-1-1, zero)
d.Set(m-1, sigmn)
// Compute singular vectors, if desired
if ncvt > 0 {
vt.Off(m-1, 0).CVector().Drot(ncvt, vt.Off(m-1-1, 0).CVector(), vt.Rows, vt.Rows, cosr, sinr)
}
if nru > 0 {
u.Off(0, m-1).CVector().Drot(nru, u.Off(0, m-1-1).CVector(), 1, 1, cosl, sinl)
}
if ncc > 0 {
c.Off(m-1, 0).CVector().Drot(ncc, c.Off(m-1-1, 0).CVector(), c.Rows, c.Rows, cosl, sinl)
}
m = m - 2
goto label60
}
// If working on new submatrix, choose shift direction
// (from larger end diagonal element towards smaller)
if ll > oldm || m < oldll {
if math.Abs(d.Get(ll-1)) >= math.Abs(d.Get(m-1)) {
// Chase bulge from top (big end) to bottom (small end)
idir = 1
} else {
// Chase bulge from bottom (big end) to top (small end)
idir = 2
}
}
// Apply convergence tests
if idir == 1 {
// Run convergence test in forward direction
// First apply standard test to bottom of matrix
if math.Abs(e.Get(m-1-1)) <= math.Abs(tol)*math.Abs(d.Get(m-1)) || (tol < zero && math.Abs(e.Get(m-1-1)) <= thresh) {
e.Set(m-1-1, zero)
goto label60
}
if tol >= zero {
// If relative accuracy desired,
// apply convergence criterion forward
mu = math.Abs(d.Get(ll - 1))
sminl = mu
for lll = ll; lll <= m-1; lll++ {
if math.Abs(e.Get(lll-1)) <= tol*mu {
e.Set(lll-1, zero)
goto label60
}
mu = math.Abs(d.Get(lll)) * (mu / (mu + math.Abs(e.Get(lll-1))))
sminl = math.Min(sminl, mu)
}
}
} else {
// Run convergence test in backward direction
// First apply standard test to top of matrix
if math.Abs(e.Get(ll-1)) <= math.Abs(tol)*math.Abs(d.Get(ll-1)) || (tol < zero && math.Abs(e.Get(ll-1)) <= thresh) {
e.Set(ll-1, zero)
goto label60
}
if tol >= zero {
// If relative accuracy desired,
// apply convergence criterion backward
mu = math.Abs(d.Get(m - 1))
sminl = mu
for lll = m - 1; lll >= ll; lll-- {
if math.Abs(e.Get(lll-1)) <= tol*mu {
e.Set(lll-1, zero)
goto label60
}
mu = math.Abs(d.Get(lll-1)) * (mu / (mu + math.Abs(e.Get(lll-1))))
sminl = math.Min(sminl, mu)
}
}
}
oldll = ll
oldm = m
// Compute shift. First, test if shifting would ruin relative
// accuracy, and if so set the shift to zero.
if tol >= zero && float64(n)*tol*(sminl/smax) <= math.Max(eps, hndrth*tol) {
// Use a zero shift to avoid loss of relative accuracy
shift = zero
} else {
// Compute the shift from 2-by-2 block at end of matrix
if idir == 1 {
sll = math.Abs(d.Get(ll - 1))
shift, r = Dlas2(d.Get(m-1-1), e.Get(m-1-1), d.Get(m-1))
} else {
sll = math.Abs(d.Get(m - 1))
shift, r = Dlas2(d.Get(ll-1), e.Get(ll-1), d.Get(ll))
}
// Test if shift negligible, and if so set to zero
if sll > zero {
if math.Pow(shift/sll, 2) < eps {
shift = zero
}
}
}
// Increment iteration count
iter = iter + m - ll
// If SHIFT = 0, do simplified QR iteration
if shift == zero {
if idir == 1 {
// Chase bulge from top to bottom
// Save cosines and sines for later singular vector updates
cs = one
oldcs = one
for i = ll; i <= m-1; i++ {
cs, sn, r = Dlartg(d.Get(i-1)*cs, e.Get(i-1))
if i > ll {
e.Set(i-1-1, oldsn*r)
}
oldcs, oldsn, *d.GetPtr(i - 1) = Dlartg(oldcs*r, d.Get(i)*sn)
rwork.Set(i-ll, cs)
rwork.Set(i-ll+1+nm1-1, sn)
rwork.Set(i-ll+1+nm12-1, oldcs)
rwork.Set(i-ll+1+nm13-1, oldsn)
}
h = d.Get(m-1) * cs
d.Set(m-1, h*oldcs)
e.Set(m-1-1, h*oldsn)
// Update singular vectors
if ncvt > 0 {
if err = Zlasr(Left, 'V', 'F', m-ll+1, ncvt, rwork.Off(0), rwork.Off(n-1), vt.Off(ll-1, 0)); err != nil {
panic(err)
}
}
if nru > 0
|
{
if err = Zlasr(Right, 'V', 'F', nru, m-ll+1, rwork.Off(nm12), rwork.Off(nm13), u.Off(0, ll-1)); err != nil {
panic(err)
}
}
|
conditional_block
|
|
bignum.rs
|
5).unwrap();
assert_eq!(six.cmp(&five), ::std::cmp::Ordering::Greater);
assert_eq!(five.cmp(&five), ::std::cmp::Ordering::Equal);
assert_eq!(five.cmp(&six), ::std::cmp::Ordering::Less);
let bigger = Mpi::new(0x2a2f5dce).unwrap();
assert_eq!(bigger.byte_length().unwrap(), 4);
assert_eq!(bigger.bit_length().unwrap(), 30);
let b_bytes = bigger.to_binary().unwrap();
assert_eq!(b_bytes.len(), 4);
assert_eq!(b_bytes[0], 0x2a);
assert_eq!(b_bytes[1], 0x2f);
assert_eq!(b_bytes[2], 0x5d);
assert_eq!(b_bytes[3], 0xce);
assert!(bigger.eq(&Mpi::from_binary(&b_bytes).unwrap()));
}
#[test]
fn bignum_shifts() {
let x = Mpi::new(3).unwrap();
let y = (&x << 30).unwrap();
assert_eq!(format!("{}", y), "3221225472");
let y = (&y >> 30).unwrap();
assert_eq!(format!("{}", y), "3");
let y = (&y >> 2).unwrap();
assert_eq!(format!("{}", y), "0");
let mut z = Mpi::new(1).unwrap();
z <<= 5;
assert_eq!(format!("{}", z), "32");
z <<= 15;
assert_eq!(format!("{}", z), "1048576");
z >>= 10;
assert_eq!(format!("{}", z), "1024");
}
#[test]
fn bignum_op_assign() {
let mut x = Mpi::new(4).unwrap();
x += 9;
assert_eq!(format!("{}", x), "13");
x += Mpi::new(13).unwrap();
assert_eq!(format!("{}", x), "26");
let y = Mpi::new(10).unwrap();
x += &y;
assert_eq!(format!("{}", x), "36");
x -= 3;
assert_eq!(format!("{}", x), "33");
x -= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "28");
x -= &y;
assert_eq!(format!("{}", x), "18");
x *= &y;
assert_eq!(format!("{}", x), "180");
x *= 2;
assert_eq!(format!("{}", x), "360");
x *= Mpi::new(-2).unwrap();
assert_eq!(format!("{}", x), "-720");
x /= Mpi::new(-3).unwrap();
assert_eq!(format!("{}", x), "240");
x /= 2;
assert_eq!(format!("{}", x), "120");
x /= &y;
assert_eq!(format!("{}", x), "12");
x %= 100;
assert_eq!(format!("{}", x), "12");
x %= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "2");
assert_eq!(format!("{}", y), "10"); // verify y not moved
}
#[cfg(feature = "std")]
#[test]
fn test_jacobi_fn() {
use std::str::FromStr;
fn jacobi_symbol_test(a: &str, n: &str, expected: i32) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let j = a.jacobi(&n).unwrap();
//println!("a={} n={} J={}", a, n, j);
assert_eq!(j, expected);
}
// Tests generated by Sagemath
jacobi_symbol_test("5", "9", 1);
jacobi_symbol_test(
"80530568503105393620776136885268819039",
"136759011081214619901277936869624817013",
-1,
);
jacobi_symbol_test("541641436", "50733077", -1);
jacobi_symbol_test("541641437", "50733077", 1);
jacobi_symbol_test("50733077", "50733077", 0);
jacobi_symbol_test("126192963", "2869415899", 1);
jacobi_symbol_test("126192964", "2869415899", -1);
jacobi_symbol_test(
"290122183148875935619099270547",
"392382503032982745991600930111",
-1,
);
jacobi_symbol_test(
"652189681324592774835681787902",
"851019412553174450003757422011",
1,
);
jacobi_symbol_test(
"68607521964935451958858272376",
"89491088927603607083107403767",
1,
);
jacobi_symbol_test(
"218068701715357900365812660263",
"238095134266847041021320150827",
-1,
);
jacobi_symbol_test(
"9847597030024907406584779047",
"20414312383664964481261270711",
1,
);
jacobi_symbol_test(
"38938513347318987388516082474",
"49516772312071161029219932219",
1,
);
jacobi_symbol_test(
"300820947915083731970108494721",
"657305681340895250386089542863",
-1,
);
jacobi_symbol_test(
"12565726709694140412667952162",
"31771076028760826448147679003",
-1,
);
jacobi_symbol_test(
"344945231515347227453035588988",
"828252022515408040124517036011",
1,
);
jacobi_symbol_test(
"93331799786934264132380785163",
"313205417670262818093976413871",
-1,
);
}
#[cfg(feature = "std")]
#[test]
fn
|
() {
use std::str::FromStr;
fn mod_sqrt_test(a: &str, n: &str, expected: &str) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let expected = Mpi::from_str(expected).unwrap();
let mut computed = a.mod_sqrt(&n).unwrap();
/*
If x = (a*a) mod p then also x = (-a*-a) mod p, ie
if a square root exists then there are two square roots related by
x and p-x. The mod sqrt might return either of these
|
test_mod_sqrt_fn
|
identifier_name
|
bignum.rs
|
::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let expected = Mpi::from_str(expected).unwrap();
let mut computed = a.mod_sqrt(&n).unwrap();
/*
If x = (a*a) mod p then also x = (-a*-a) mod p, ie
if a square root exists then there are two square roots related by
x and p-x. The mod sqrt might return either of these options
*/
if &computed != &expected {
computed = (&n - &computed).unwrap();
}
assert_eq!(computed, expected);
}
// Tests generated by Sagemath
mod_sqrt_test("2", "7", "4");
mod_sqrt_test("5", "469289024411159", "234325000312516");
mod_sqrt_test(
"458050473005020050313790240477",
"905858848829014223214249213947",
"126474086260479574845714194337",
);
mod_sqrt_test("4", "13", "2");
mod_sqrt_test("2", "113", "62");
mod_sqrt_test(
"14432894130216089699367965001582109139186342668614313620824414613061488655",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"64346440714386899555372506097606752274599811989145306413544609746921648646",
);
mod_sqrt_test(
"2",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"29863506841820532608636271306847583140720915984413766535227954746838873278",
);
mod_sqrt_test(
"2",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x507442007322AA895340CBA4ABC2D730BFD0B16C2C79A46815F8780D2C55A2DD",
);
mod_sqrt_test(
"0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B",
);
// Primes where 2^s divides p-1 for s >= 3 which caught a bug
mod_sqrt_test("2", "17", "6");
mod_sqrt_test("2", "97", "14");
mod_sqrt_test("2", "193", "52");
mod_sqrt_test("2", "257", "60");
mod_sqrt_test("2", "65537", "4080");
mod_sqrt_test("2", "0x1200000001", "17207801277");
mod_sqrt_test(
"2",
"0x660000000000000000000000000000000000000000000000000000000000000001",
"0xce495874f10d32d28105400c73f73aafc7cbbae7cd1dfa1525f2701b3573d78c0",
);
}
#[test]
fn bignum_cmp() {
let big = Mpi::new(2147483647).unwrap();
let small = Mpi::new(2).unwrap();
assert!(big > small);
assert!(small < big);
assert!(big >= small);
assert!(small <= big);
assert!(small >= small);
assert!(big <= big);
assert!(small == small);
assert!(small != big);
}
#[test]
fn bigint_ops() {
let x = Mpi::new(100).unwrap();
let y = Mpi::new(20900).unwrap();
assert_eq!(x.as_u32().unwrap(), 100);
let z = (&x + &y).unwrap();
assert_eq!(z.as_u32().unwrap(), 21000);
let z = (&z * &y).unwrap();
assert_eq!(z, Mpi::new(438900000).unwrap());
let z = (&z - &x).unwrap();
assert_eq!(z, Mpi::new(0x1A2914BC).unwrap());
let r = (&z % 127).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let r = (&z % &Mpi::new(127).unwrap()).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let q = (&z / 53).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let q = (&z / &Mpi::new(53).unwrap()).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let nan = &z / 0;
assert!(nan.is_err());
}
const BASE58_ALPHABET: &[u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
fn base58_encode(bits: &[u8]) -> mbedtls::Result<String> {
let zero = Mpi::new(0)?;
let mut n = Mpi::from_binary(bits)?;
let radix: i64 = 58;
let mut s = Vec::new();
while n > zero {
let (q, r) = n.divrem_int(radix)?;
n = q;
s.push(BASE58_ALPHABET[r.as_u32()? as usize]);
}
s.reverse();
Ok(String::from_utf8(s).unwrap())
}
fn base58_decode(b58: &str) -> mbedtls::Result<Vec<u8>>
|
{
let radix: i64 = 58;
let mut n = Mpi::new(0)?;
fn base58_val(b: u8) -> mbedtls::Result<usize> {
for (i, c) in BASE58_ALPHABET.iter().enumerate() {
if *c == b {
return Ok(i);
}
}
Err(mbedtls::Error::Base64InvalidCharacter)
}
for c in b58.bytes() {
let v = base58_val(c)? as i64;
n = (&n * radix)?;
n = (&n + v)?;
}
|
identifier_body
|
|
bignum.rs
|
(5).unwrap();
assert_eq!(six.cmp(&five), ::std::cmp::Ordering::Greater);
assert_eq!(five.cmp(&five), ::std::cmp::Ordering::Equal);
assert_eq!(five.cmp(&six), ::std::cmp::Ordering::Less);
let bigger = Mpi::new(0x2a2f5dce).unwrap();
assert_eq!(bigger.byte_length().unwrap(), 4);
assert_eq!(bigger.bit_length().unwrap(), 30);
let b_bytes = bigger.to_binary().unwrap();
assert_eq!(b_bytes.len(), 4);
assert_eq!(b_bytes[0], 0x2a);
assert_eq!(b_bytes[1], 0x2f);
assert_eq!(b_bytes[2], 0x5d);
assert_eq!(b_bytes[3], 0xce);
assert!(bigger.eq(&Mpi::from_binary(&b_bytes).unwrap()));
}
#[test]
fn bignum_shifts() {
let x = Mpi::new(3).unwrap();
let y = (&x << 30).unwrap();
assert_eq!(format!("{}", y), "3221225472");
let y = (&y >> 30).unwrap();
assert_eq!(format!("{}", y), "3");
let y = (&y >> 2).unwrap();
assert_eq!(format!("{}", y), "0");
let mut z = Mpi::new(1).unwrap();
z <<= 5;
assert_eq!(format!("{}", z), "32");
z <<= 15;
assert_eq!(format!("{}", z), "1048576");
z >>= 10;
assert_eq!(format!("{}", z), "1024");
}
#[test]
fn bignum_op_assign() {
let mut x = Mpi::new(4).unwrap();
x += 9;
assert_eq!(format!("{}", x), "13");
x += Mpi::new(13).unwrap();
assert_eq!(format!("{}", x), "26");
let y = Mpi::new(10).unwrap();
x += &y;
assert_eq!(format!("{}", x), "36");
x -= 3;
assert_eq!(format!("{}", x), "33");
x -= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "28");
x -= &y;
assert_eq!(format!("{}", x), "18");
x *= &y;
assert_eq!(format!("{}", x), "180");
x *= 2;
assert_eq!(format!("{}", x), "360");
x *= Mpi::new(-2).unwrap();
assert_eq!(format!("{}", x), "-720");
x /= Mpi::new(-3).unwrap();
assert_eq!(format!("{}", x), "240");
x /= 2;
assert_eq!(format!("{}", x), "120");
x /= &y;
assert_eq!(format!("{}", x), "12");
x %= 100;
assert_eq!(format!("{}", x), "12");
x %= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "2");
assert_eq!(format!("{}", y), "10"); // verify y not moved
|
#[cfg(feature = "std")]
#[test]
fn test_jacobi_fn() {
use std::str::FromStr;
fn jacobi_symbol_test(a: &str, n: &str, expected: i32) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let j = a.jacobi(&n).unwrap();
//println!("a={} n={} J={}", a, n, j);
assert_eq!(j, expected);
}
// Tests generated by Sagemath
jacobi_symbol_test("5", "9", 1);
jacobi_symbol_test(
"80530568503105393620776136885268819039",
"136759011081214619901277936869624817013",
-1,
);
jacobi_symbol_test("541641436", "50733077", -1);
jacobi_symbol_test("541641437", "50733077", 1);
jacobi_symbol_test("50733077", "50733077", 0);
jacobi_symbol_test("126192963", "2869415899", 1);
jacobi_symbol_test("126192964", "2869415899", -1);
jacobi_symbol_test(
"290122183148875935619099270547",
"392382503032982745991600930111",
-1,
);
jacobi_symbol_test(
"652189681324592774835681787902",
"851019412553174450003757422011",
1,
);
jacobi_symbol_test(
"68607521964935451958858272376",
"89491088927603607083107403767",
1,
);
jacobi_symbol_test(
"218068701715357900365812660263",
"238095134266847041021320150827",
-1,
);
jacobi_symbol_test(
"9847597030024907406584779047",
"20414312383664964481261270711",
1,
);
jacobi_symbol_test(
"38938513347318987388516082474",
"49516772312071161029219932219",
1,
);
jacobi_symbol_test(
"300820947915083731970108494721",
"657305681340895250386089542863",
-1,
);
jacobi_symbol_test(
"12565726709694140412667952162",
"31771076028760826448147679003",
-1,
);
jacobi_symbol_test(
"344945231515347227453035588988",
"828252022515408040124517036011",
1,
);
jacobi_symbol_test(
"93331799786934264132380785163",
"313205417670262818093976413871",
-1,
);
}
#[cfg(feature = "std")]
#[test]
fn test_mod_sqrt_fn() {
use std::str::FromStr;
fn mod_sqrt_test(a: &str, n: &str, expected: &str) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let expected = Mpi::from_str(expected).unwrap();
let mut computed = a.mod_sqrt(&n).unwrap();
/*
If x = (a*a) mod p then also x = (-a*-a) mod p, ie
if a square root exists then there are two square roots related by
x and p-x. The mod sqrt might return either of these options
|
}
|
random_line_split
|
bignum.rs
|
assert_eq!(five.cmp(&five), ::std::cmp::Ordering::Equal);
assert_eq!(five.cmp(&six), ::std::cmp::Ordering::Less);
let bigger = Mpi::new(0x2a2f5dce).unwrap();
assert_eq!(bigger.byte_length().unwrap(), 4);
assert_eq!(bigger.bit_length().unwrap(), 30);
let b_bytes = bigger.to_binary().unwrap();
assert_eq!(b_bytes.len(), 4);
assert_eq!(b_bytes[0], 0x2a);
assert_eq!(b_bytes[1], 0x2f);
assert_eq!(b_bytes[2], 0x5d);
assert_eq!(b_bytes[3], 0xce);
assert!(bigger.eq(&Mpi::from_binary(&b_bytes).unwrap()));
}
#[test]
fn bignum_shifts() {
let x = Mpi::new(3).unwrap();
let y = (&x << 30).unwrap();
assert_eq!(format!("{}", y), "3221225472");
let y = (&y >> 30).unwrap();
assert_eq!(format!("{}", y), "3");
let y = (&y >> 2).unwrap();
assert_eq!(format!("{}", y), "0");
let mut z = Mpi::new(1).unwrap();
z <<= 5;
assert_eq!(format!("{}", z), "32");
z <<= 15;
assert_eq!(format!("{}", z), "1048576");
z >>= 10;
assert_eq!(format!("{}", z), "1024");
}
#[test]
fn bignum_op_assign() {
let mut x = Mpi::new(4).unwrap();
x += 9;
assert_eq!(format!("{}", x), "13");
x += Mpi::new(13).unwrap();
assert_eq!(format!("{}", x), "26");
let y = Mpi::new(10).unwrap();
x += &y;
assert_eq!(format!("{}", x), "36");
x -= 3;
assert_eq!(format!("{}", x), "33");
x -= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "28");
x -= &y;
assert_eq!(format!("{}", x), "18");
x *= &y;
assert_eq!(format!("{}", x), "180");
x *= 2;
assert_eq!(format!("{}", x), "360");
x *= Mpi::new(-2).unwrap();
assert_eq!(format!("{}", x), "-720");
x /= Mpi::new(-3).unwrap();
assert_eq!(format!("{}", x), "240");
x /= 2;
assert_eq!(format!("{}", x), "120");
x /= &y;
assert_eq!(format!("{}", x), "12");
x %= 100;
assert_eq!(format!("{}", x), "12");
x %= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "2");
assert_eq!(format!("{}", y), "10"); // verify y not moved
}
#[cfg(feature = "std")]
#[test]
fn test_jacobi_fn() {
use std::str::FromStr;
fn jacobi_symbol_test(a: &str, n: &str, expected: i32) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let j = a.jacobi(&n).unwrap();
//println!("a={} n={} J={}", a, n, j);
assert_eq!(j, expected);
}
// Tests generated by Sagemath
jacobi_symbol_test("5", "9", 1);
jacobi_symbol_test(
"80530568503105393620776136885268819039",
"136759011081214619901277936869624817013",
-1,
);
jacobi_symbol_test("541641436", "50733077", -1);
jacobi_symbol_test("541641437", "50733077", 1);
jacobi_symbol_test("50733077", "50733077", 0);
jacobi_symbol_test("126192963", "2869415899", 1);
jacobi_symbol_test("126192964", "2869415899", -1);
jacobi_symbol_test(
"290122183148875935619099270547",
"392382503032982745991600930111",
-1,
);
jacobi_symbol_test(
"652189681324592774835681787902",
"851019412553174450003757422011",
1,
);
jacobi_symbol_test(
"68607521964935451958858272376",
"89491088927603607083107403767",
1,
);
jacobi_symbol_test(
"218068701715357900365812660263",
"238095134266847041021320150827",
-1,
);
jacobi_symbol_test(
"9847597030024907406584779047",
"20414312383664964481261270711",
1,
);
jacobi_symbol_test(
"38938513347318987388516082474",
"49516772312071161029219932219",
1,
);
jacobi_symbol_test(
"300820947915083731970108494721",
"657305681340895250386089542863",
-1,
);
jacobi_symbol_test(
"12565726709694140412667952162",
"31771076028760826448147679003",
-1,
);
jacobi_symbol_test(
"344945231515347227453035588988",
"828252022515408040124517036011",
1,
);
jacobi_symbol_test(
"93331799786934264132380785163",
"313205417670262818093976413871",
-1,
);
}
#[cfg(feature = "std")]
#[test]
fn test_mod_sqrt_fn() {
use std::str::FromStr;
fn mod_sqrt_test(a: &str, n: &str, expected: &str) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let expected = Mpi::from_str(expected).unwrap();
let mut computed = a.mod_sqrt(&n).unwrap();
/*
If x = (a*a) mod p then also x = (-a*-a) mod p, ie
if a square root exists then there are two square roots related by
x and p-x. The mod sqrt might return either of these options
*/
if &computed != &expected
|
{
computed = (&n - &computed).unwrap();
}
|
conditional_block
|
|
warc.py
|
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Classes writing data to WARC files
"""
import json, threading
from io import BytesIO
from warcio.statusandheaders import StatusAndHeaders
from urllib.parse import urlsplit
from datetime import datetime
from warcio.timeutils import datetime_to_iso_date
from warcio.warcwriter import WARCWriter
from .util import packageUrl
from .controller import defaultSettings, EventHandler, ControllerStart
from .behavior import Script, DomSnapshotEvent, ScreenshotEvent
from .browser import Item
class WarcHandler (EventHandler):
__slots__ = ('logger', 'writer', 'maxBodySize', 'documentRecords', 'log',
'maxLogSize', 'logEncoding', 'warcinfoRecordId')
def __init__ (self, fd,
logger,
maxBodySize=defaultSettings.maxBodySize):
self.logger = logger
self.writer = WARCWriter (fd, gzip=True)
self.maxBodySize = maxBodySize
self.logEncoding = 'utf-8'
self.log = BytesIO ()
# max log buffer size (bytes)
self.maxLogSize = 500*1024
# maps document urls to WARC record ids, required for DomSnapshotEvent
# and ScreenshotEvent
self.documentRecords = {}
# record id of warcinfo record
self.warcinfoRecordId = None
def __enter__ (self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._flushLogEntries ()
def writeRecord (self, url, kind, payload, warc_headers_dict=None, http_headers=None):
"""
Thin wrapper around writer.create_warc_record and writer.write_record.
Adds default WARC headers.
"""
d = {}
if self.warcinfoRecordId:
d['WARC-Warcinfo-ID'] = self.warcinfoRecordId
d.update (warc_headers_dict)
warc_headers_dict = d
record = self.writer.create_warc_record (url, kind, payload=payload,
warc_headers_dict=warc_headers_dict, http_headers=http_headers)
self.writer.write_record (record)
return record
def _writeRequest (self, item):
logger = self.logger.bind (reqId=item.id)
req = item.request
resp = item.response
url = urlsplit (resp['url'])
path = url.path
if url.query:
path += '?' + url.query
httpHeaders = StatusAndHeaders('{} {} HTTP/1.1'.format (req['method'], path),
item.requestHeaders, protocol='HTTP/1.1', is_http_request=True)
initiator = item.initiator
warcHeaders = {
'X-Chrome-Initiator': json.dumps (initiator),
'X-Chrome-Request-ID': item.id,
'WARC-Date': datetime_to_iso_date (datetime.utcfromtimestamp (item.chromeRequest['wallTime'])),
}
try:
bodyTruncated = None
payload, payloadBase64Encoded = item.requestBody
except ValueError:
# oops, don’t know what went wrong here
bodyTruncated = 'unspecified'
logger.error ('requestBody missing', uuid='ee9adc58-e723-4595-9feb-312a67ead6a0')
if bodyTruncated:
warcHeaders['WARC-Truncated'] = bodyTruncated
payload = None
if payload:
payload = BytesIO (payload)
warcHeaders['X-Chrome-Base64Body'] = str (payloadBase64Encoded)
record = self.writeRecord (req['url'], 'request',
payload=payload, http_headers=httpHeaders,
warc_headers_dict=warcHeaders)
return record.rec_headers['WARC-Record-ID']
def _writeResponse (self, item, concurrentTo):
# fetch the body
reqId = item.id
rawBody = None
base64Encoded = False
bodyTruncated = None
if item.isRedirect:
# redirects reuse the same request, thus we cannot safely retrieve
# the body (i.e getResponseBody may return the new location’s
# body).
bodyTruncated = 'unspecified'
elif item.encodedDataLength > self.maxBodySize:
bodyTruncated = 'length'
# check body size first, since we’re loading everything into memory
self.logger.error ('body for {} too large {} vs {}'.format (reqId,
item.encodedDataLength, self.maxBodySize))
else:
try:
rawBody, base64Encoded = item.body
except ValueError:
# oops, don’t know what went wrong here
bodyTruncated = 'unspecified'
# now the response
resp = item.response
warcHeaders = {
'WARC-Concurrent-To': concurrentTo,
'WARC-IP-Address': resp.get ('remoteIPAddress', ''),
'X-Chrome-Protocol': resp.get ('protocol', ''),
'X-Chrome-FromDiskCache': str (resp.get ('fromDiskCache')),
'X-Chrome-ConnectionReused': str (resp.get ('connectionReused')),
'X-Chrome-Request-ID': item.id,
'WARC-Date': datetime_to_iso_date (datetime.utcfromtimestamp (
item.chromeRequest['wallTime']+
(item.chromeResponse['timestamp']-item.chromeRequest['timestamp']))),
}
if bodyTruncated:
warcHeaders['WARC-Truncated'] = bodyTruncated
else:
warcHeaders['X-Chrome-Base64Body'] = str (base64Encoded)
httpHeaders = StatusAndHeaders('{} {}'.format (resp['status'],
item.statusText), item.responseHeaders,
protocol='HTTP/1.1')
# Content is saved decompressed and decoded, remove these headers
blacklistedHeaders = {'transfer-encoding', 'content-encoding'}
for h in blacklistedHeaders:
httpHeaders.remove_header (h)
# chrome sends nothing but utf8 encoded text. Fortunately HTTP
# headers take precedence over the document’s <meta>, thus we can
# easily override those.
contentType = resp.get ('mimeType')
if contentType:
if not base64Encoded:
contentType += '; charset=utf-8'
httpHeaders.replace_header ('content-type', contentType)
if rawBody is not None:
httpHeaders.replace_header ('content-length', '{:d}'.format (len (rawBody)))
bodyIo = BytesIO (rawBody)
else:
bodyIo = BytesIO ()
record = self.writeRecord (resp['url'], 'response',
warc_headers_dict=warcHeaders, payload=bodyIo,
http_headers=httpHeaders)
if item.resourceType == 'Document':
self.documentRecords[item.url] = record.rec_headers.get_header ('WARC-Record-ID')
def _writeScript (self, item):
writer = self.writer
encoding = 'utf-8'
self.writeRecord (packageUrl ('script/{}'.format (item.path)), 'metadata',
payload=BytesIO (str (item).encode (encoding)),
warc_headers_dict={'Content-Type': 'application/javascript; charset={}'.format (encoding)})
def _writeItem (self, item):
if item.failed:
# should have been handled by the logger already
return
concurrentTo = self._writeRequest (item)
self._writeResponse (item, concurrentTo)
def _addRefersTo (self, headers, url):
refersTo = self.documentRecords.get (url)
if refersTo:
headers['WARC-Refers-To'] = refersTo
else:
self.logger.error ('No document record found for {}'.format (url))
return headers
def _writeDomSnapshot (self, item):
writer = self.writer
warcHeaders = {'X-DOM-Snapshot': str (True),
'X-Chrome-Viewport': item.viewport,
'Content-Type': 'text/html; charset=utf-8',
}
self._addRefersTo (warcHeaders, item.url)
self.writeRecord (item.url, 'conversion',
payload=BytesIO (item.document),
warc_headers_dict=warcHeaders)
def _writeScreenshot (self, item):
writer = self.writer
warcHeaders = {'Content-Type': 'image/png',
'X-Crocoite-Screenshot-Y-Offset': str (item.yoff)}
self._addRefersTo (warcHeaders, item.url)
self.writeRecord (item.url, 'conversion',
payload=BytesIO (item.data), warc_headers_dict=warcHeaders)
def _writeControllerStart (self, item):
payload = BytesIO (json.dumps (item.payload, indent=2).encode ('utf-8'))
writer = self.writer
warcinfo = self.writeRecord (packageUrl ('warcinfo'), 'warcinfo',
warc_headers_dict={'Content-Type': 'text/plain; encoding=utf-8'},
payload=payload)
self.warcinfoRecordId = warcinfo.rec_headers['WARC-Record-ID']
|
random_line_split
|
||
warc.py
|
IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Classes writing data to WARC files
"""
import json, threading
from io import BytesIO
from warcio.statusandheaders import StatusAndHeaders
from urllib.parse import urlsplit
from datetime import datetime
from warcio.timeutils import datetime_to_iso_date
from warcio.warcwriter import WARCWriter
from .util import packageUrl
from .controller import defaultSettings, EventHandler, ControllerStart
from .behavior import Script, DomSnapshotEvent, ScreenshotEvent
from .browser import Item
class WarcHandler (EventHandler):
__slots__ = ('logger', 'writer', 'maxBodySize', 'documentRecords', 'log',
'maxLogSize', 'logEncoding', 'warcinfoRecordId')
def __init__ (self, fd,
logger,
maxBodySize=defaultSettings.maxBodySize):
self.logger = logger
self.writer = WARCWriter (fd, gzip=True)
self.maxBodySize = maxBodySize
self.logEncoding = 'utf-8'
self.log = BytesIO ()
# max log buffer size (bytes)
self.maxLogSize = 500*1024
# maps document urls to WARC record ids, required for DomSnapshotEvent
# and ScreenshotEvent
self.documentRecords = {}
# record id of warcinfo record
self.warcinfoRecordId = None
def __enter__ (self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._flushLogEntries ()
def writeRecord (self, url, kind, payload, warc_headers_dict=None, http_headers=None):
"""
Thin wrapper around writer.create_warc_record and writer.write_record.
Adds default WARC headers.
"""
d = {}
if self.warcinfoRecordId:
d['WARC-Warcinfo-ID'] = self.warcinfoRecordId
d.update (warc_headers_dict)
warc_headers_dict = d
record = self.writer.create_warc_record (url, kind, payload=payload,
warc_headers_dict=warc_headers_dict, http_headers=http_headers)
self.writer.write_record (record)
return record
def _writeRequest (self, item):
logger = self.logger.bind (reqId=item.id)
req = item.request
resp = item.response
url = urlsplit (resp['url'])
path = url.path
if url.query:
path += '?' + url.query
httpHeaders = StatusAndHeaders('{} {} HTTP/1.1'.format (req['method'], path),
item.requestHeaders, protocol='HTTP/1.1', is_http_request=True)
initiator = item.initiator
warcHeaders = {
'X-Chrome-Initiator': json.dumps (initiator),
'X-Chrome-Request-ID': item.id,
'WARC-Date': datetime_to_iso_date (datetime.utcfromtimestamp (item.chromeRequest['wallTime'])),
}
try:
bodyTruncated = None
payload, payloadBase64Encoded = item.requestBody
except ValueError:
# oops, don’t know what went wrong here
bodyTruncated = 'unspecified'
logger.error ('requestBody missing', uuid='ee9adc58-e723-4595-9feb-312a67ead6a0')
if bodyTruncated:
warcHeaders['WARC-Truncated'] = bodyTruncated
payload = None
if payload:
payload = BytesIO (payload)
warcHeaders['X-Chrome-Base64Body'] = str (payloadBase64Encoded)
record = self.writeRecord (req['url'], 'request',
payload=payload, http_headers=httpHeaders,
warc_headers_dict=warcHeaders)
return record.rec_headers['WARC-Record-ID']
def _writeResponse (self, item, concurrentTo):
# fetch the body
reqId = item.id
rawBody = None
base64Encoded = False
bodyTruncated = None
if item.isRedirect:
# redirects reuse the same request, thus we cannot safely retrieve
# the body (i.e getResponseBody may return the new location’s
# body).
bodyTruncated = 'unspecified'
elif item.encodedDataLength > self.maxBodySize:
bodyTruncated = 'length'
# check body size first, since we’re loading everything into memory
self.logger.error ('body for {} too large {} vs {}'.format (reqId,
item.encodedDataLength, self.maxBodySize))
else:
try:
rawBody, base64Encoded = item.body
except ValueError:
# oops, don’t know what went wrong here
bodyTruncated = 'unspecified'
# now the response
resp = item.response
warcHeaders = {
'WARC-Concurrent-To': concurrentTo,
'WARC-IP-Address': resp.get ('remoteIPAddress', ''),
'X-Chrome-Protocol': resp.get ('protocol', ''),
'X-Chrome-FromDiskCache': str (resp.get ('fromDiskCache')),
'X-Chrome-ConnectionReused': str (resp.get ('connectionReused')),
'X-Chrome-Request-ID': item.id,
'WARC-Date': datetime_to_iso_date (datetime.utcfromtimestamp (
item.chromeRequest['wallTime']+
(item.chromeResponse['timestamp']-item.chromeRequest['timestamp']))),
}
if bodyTruncated:
warcHeaders['WARC-Truncated'] = bodyTruncated
else:
warcHeaders['X-Chrome-Base64Body'] = str (base64Encoded)
httpHeaders = StatusAndHeaders('{} {}'.format (resp['status'],
item.statusText), item.responseHeaders,
protocol='HTTP/1.1')
# Content is saved decompressed and decoded, remove these headers
blacklistedHeaders = {'transfer-encoding', 'content-encoding'}
for h in blacklistedHeaders:
httpHeaders.remove_header (h)
# chrome sends nothing but utf8 encoded text. Fortunately HTTP
# headers take precedence over the document’s <meta>, thus we can
# easily override those.
contentType = resp.get ('mimeType')
if contentType:
if not base64Encoded:
contentType += '; charset=utf-8'
httpHeaders.replace_header ('content-type', contentType)
if rawBody is not None:
httpHeaders.replace_header ('content-length', '{:d}'.format (len (rawBody)))
bodyIo = BytesIO (rawBody)
else:
bodyIo = BytesIO ()
record = self.writeRecord (resp['url'], 'response',
warc_headers_dict=warcHeaders, payload=bodyIo,
http_headers=httpHeaders)
if item.resourceType == 'Document':
self.documentRecords[item.url] = record.rec_headers.get_header ('WARC-Record-ID')
def _writeScript (self, item):
writer = self.writer
encoding = 'utf-8'
self.writeRecord (packageUrl ('script/{}'.format (item.path)), 'metadata',
payload=BytesIO (str (item).encode (encoding)),
warc_headers_dict={'Content-Type': 'application/javascript; charset={}'.format (encoding)})
def _writeItem (self, item):
if item.fa
|
_addRefersTo (self, headers, url):
refersTo = self.documentRecords.get (url)
if refersTo:
headers['WARC-Refers-To'] = refersTo
else:
self.logger.error ('No document record found for {}'.format (url))
return headers
def _writeDomSnapshot (self, item):
writer = self.writer
warcHeaders = {'X-DOM-Snapshot': str (True),
'X-Chrome-Viewport': item.viewport,
'Content-Type': 'text/html; charset=utf-8',
}
self._addRefersTo (warcHeaders, item.url)
self.writeRecord (item.url, 'conversion',
payload=BytesIO (item.document),
warc_headers_dict=warcHeaders)
def _writeScreenshot (self, item):
writer = self.writer
warcHeaders = {'Content-Type': 'image/png',
'X-Crocoite-Screenshot-Y-Offset': str (item.yoff)}
self._addRefersTo (warcHeaders, item.url)
self.writeRecord (item.url, 'conversion',
payload=BytesIO (item.data), warc_headers_dict=warcHeaders)
def _writeControllerStart (self, item):
payload = BytesIO (json.dumps (item.payload, indent=2).encode ('utf-8'))
writer = self.writer
warcinfo = self.writeRecord (packageUrl ('warcinfo'),
|
iled:
# should have been handled by the logger already
return
concurrentTo = self._writeRequest (item)
self._writeResponse (item, concurrentTo)
def
|
identifier_body
|
warc.py
|
IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Classes writing data to WARC files
"""
import json, threading
from io import BytesIO
from warcio.statusandheaders import StatusAndHeaders
from urllib.parse import urlsplit
from datetime import datetime
from warcio.timeutils import datetime_to_iso_date
from warcio.warcwriter import WARCWriter
from .util import packageUrl
from .controller import defaultSettings, EventHandler, ControllerStart
from .behavior import Script, DomSnapshotEvent, ScreenshotEvent
from .browser import Item
class WarcHandler (EventHandler):
__slots__ = ('logger', 'writer', 'maxBodySize', 'documentRecords', 'log',
'maxLogSize', 'logEncoding', 'warcinfoRecordId')
def __init__ (self, fd,
logger,
maxBodySize=defaultSettings.maxBodySize):
self.logger = logger
self.writer = WARCWriter (fd, gzip=True)
self.maxBodySize = maxBodySize
self.logEncoding = 'utf-8'
self.log = BytesIO ()
# max log buffer size (bytes)
self.maxLogSize = 500*1024
# maps document urls to WARC record ids, required for DomSnapshotEvent
# and ScreenshotEvent
self.documentRecords = {}
# record id of warcinfo record
self.warcinfoRecordId = None
def __enter__ (self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._flushLogEntries ()
def writeRecord (self, url, kind, payload, warc_headers_dict=None, http_headers=None):
"""
Thin wrapper around writer.create_warc_record and writer.write_record.
Adds default WARC headers.
"""
d = {}
if self.warcinfoRecordId:
d['WARC-Warcinfo-ID'] = self.warcinfoRecordId
d.update (warc_headers_dict)
warc_headers_dict = d
record = self.writer.create_warc_record (url, kind, payload=payload,
warc_headers_dict=warc_headers_dict, http_headers=http_headers)
self.writer.write_record (record)
return record
def _writeRequest (self, item):
logger = self.logger.bind (reqId=item.id)
req = item.request
resp = item.response
url = urlsplit (resp['url'])
path = url.path
if url.query:
path += '?' + url.query
httpHeaders = StatusAndHeaders('{} {} HTTP/1.1'.format (req['method'], path),
item.requestHeaders, protocol='HTTP/1.1', is_http_request=True)
initiator = item.initiator
warcHeaders = {
'X-Chrome-Initiator': json.dumps (initiator),
'X-Chrome-Request-ID': item.id,
'WARC-Date': datetime_to_iso_date (datetime.utcfromtimestamp (item.chromeRequest['wallTime'])),
}
try:
bodyTruncated = None
payload, payloadBase64Encoded = item.requestBody
except ValueError:
# oops, don’t know what went wrong here
bodyTruncated = 'unspecified'
logger.error ('requestBody missing', uuid='ee9adc58-e723-4595-9feb-312a67ead6a0')
if bodyTruncated:
wa
|
if payload:
payload = BytesIO (payload)
warcHeaders['X-Chrome-Base64Body'] = str (payloadBase64Encoded)
record = self.writeRecord (req['url'], 'request',
payload=payload, http_headers=httpHeaders,
warc_headers_dict=warcHeaders)
return record.rec_headers['WARC-Record-ID']
def _writeResponse (self, item, concurrentTo):
# fetch the body
reqId = item.id
rawBody = None
base64Encoded = False
bodyTruncated = None
if item.isRedirect:
# redirects reuse the same request, thus we cannot safely retrieve
# the body (i.e getResponseBody may return the new location’s
# body).
bodyTruncated = 'unspecified'
elif item.encodedDataLength > self.maxBodySize:
bodyTruncated = 'length'
# check body size first, since we’re loading everything into memory
self.logger.error ('body for {} too large {} vs {}'.format (reqId,
item.encodedDataLength, self.maxBodySize))
else:
try:
rawBody, base64Encoded = item.body
except ValueError:
# oops, don’t know what went wrong here
bodyTruncated = 'unspecified'
# now the response
resp = item.response
warcHeaders = {
'WARC-Concurrent-To': concurrentTo,
'WARC-IP-Address': resp.get ('remoteIPAddress', ''),
'X-Chrome-Protocol': resp.get ('protocol', ''),
'X-Chrome-FromDiskCache': str (resp.get ('fromDiskCache')),
'X-Chrome-ConnectionReused': str (resp.get ('connectionReused')),
'X-Chrome-Request-ID': item.id,
'WARC-Date': datetime_to_iso_date (datetime.utcfromtimestamp (
item.chromeRequest['wallTime']+
(item.chromeResponse['timestamp']-item.chromeRequest['timestamp']))),
}
if bodyTruncated:
warcHeaders['WARC-Truncated'] = bodyTruncated
else:
warcHeaders['X-Chrome-Base64Body'] = str (base64Encoded)
httpHeaders = StatusAndHeaders('{} {}'.format (resp['status'],
item.statusText), item.responseHeaders,
protocol='HTTP/1.1')
# Content is saved decompressed and decoded, remove these headers
blacklistedHeaders = {'transfer-encoding', 'content-encoding'}
for h in blacklistedHeaders:
httpHeaders.remove_header (h)
# chrome sends nothing but utf8 encoded text. Fortunately HTTP
# headers take precedence over the document’s <meta>, thus we can
# easily override those.
contentType = resp.get ('mimeType')
if contentType:
if not base64Encoded:
contentType += '; charset=utf-8'
httpHeaders.replace_header ('content-type', contentType)
if rawBody is not None:
httpHeaders.replace_header ('content-length', '{:d}'.format (len (rawBody)))
bodyIo = BytesIO (rawBody)
else:
bodyIo = BytesIO ()
record = self.writeRecord (resp['url'], 'response',
warc_headers_dict=warcHeaders, payload=bodyIo,
http_headers=httpHeaders)
if item.resourceType == 'Document':
self.documentRecords[item.url] = record.rec_headers.get_header ('WARC-Record-ID')
def _writeScript (self, item):
writer = self.writer
encoding = 'utf-8'
self.writeRecord (packageUrl ('script/{}'.format (item.path)), 'metadata',
payload=BytesIO (str (item).encode (encoding)),
warc_headers_dict={'Content-Type': 'application/javascript; charset={}'.format (encoding)})
def _writeItem (self, item):
if item.failed:
# should have been handled by the logger already
return
concurrentTo = self._writeRequest (item)
self._writeResponse (item, concurrentTo)
def _addRefersTo (self, headers, url):
refersTo = self.documentRecords.get (url)
if refersTo:
headers['WARC-Refers-To'] = refersTo
else:
self.logger.error ('No document record found for {}'.format (url))
return headers
def _writeDomSnapshot (self, item):
writer = self.writer
warcHeaders = {'X-DOM-Snapshot': str (True),
'X-Chrome-Viewport': item.viewport,
'Content-Type': 'text/html; charset=utf-8',
}
self._addRefersTo (warcHeaders, item.url)
self.writeRecord (item.url, 'conversion',
payload=BytesIO (item.document),
warc_headers_dict=warcHeaders)
def _writeScreenshot (self, item):
writer = self.writer
warcHeaders = {'Content-Type': 'image/png',
'X-Crocoite-Screenshot-Y-Offset': str (item.yoff)}
self._addRefersTo (warcHeaders, item.url)
self.writeRecord (item.url, 'conversion',
payload=BytesIO (item.data), warc_headers_dict=warcHeaders)
def _writeControllerStart (self, item):
payload = BytesIO (json.dumps (item.payload, indent=2).encode ('utf-8'))
writer = self.writer
warcinfo = self.writeRecord (packageUrl ('warcinfo'), '
|
rcHeaders['WARC-Truncated'] = bodyTruncated
payload = None
|
conditional_block
|
warc.py
|
PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Classes writing data to WARC files
"""
import json, threading
from io import BytesIO
from warcio.statusandheaders import StatusAndHeaders
from urllib.parse import urlsplit
from datetime import datetime
from warcio.timeutils import datetime_to_iso_date
from warcio.warcwriter import WARCWriter
from .util import packageUrl
from .controller import defaultSettings, EventHandler, ControllerStart
from .behavior import Script, DomSnapshotEvent, ScreenshotEvent
from .browser import Item
class WarcHandler (EventHandler):
__slots__ = ('logger', 'writer', 'maxBodySize', 'documentRecords', 'log',
'maxLogSize', 'logEncoding', 'warcinfoRecordId')
def __init__ (self, fd,
logger,
maxBodySize=defaultSettings.maxBodySize):
self.logger = logger
self.writer = WARCWriter (fd, gzip=True)
self.maxBodySize = maxBodySize
self.logEncoding = 'utf-8'
self.log = BytesIO ()
# max log buffer size (bytes)
self.maxLogSize = 500*1024
# maps document urls to WARC record ids, required for DomSnapshotEvent
# and ScreenshotEvent
self.documentRecords = {}
# record id of warcinfo record
self.warcinfoRecordId = None
def __enter__ (self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._flushLogEntries ()
def writeRecord (self, url, kind, payload, warc_headers_dict=None, http_headers=None):
"""
Thin wrapper around writer.create_warc_record and writer.write_record.
Adds default WARC headers.
"""
d = {}
if self.warcinfoRecordId:
d['WARC-Warcinfo-ID'] = self.warcinfoRecordId
d.update (warc_headers_dict)
warc_headers_dict = d
record = self.writer.create_warc_record (url, kind, payload=payload,
warc_headers_dict=warc_headers_dict, http_headers=http_headers)
self.writer.write_record (record)
return record
def _writeRequest (self, item):
logger = self.logger.bind (reqId=item.id)
req = item.request
resp = item.response
url = urlsplit (resp['url'])
path = url.path
if url.query:
path += '?' + url.query
httpHeaders = StatusAndHeaders('{} {} HTTP/1.1'.format (req['method'], path),
item.requestHeaders, protocol='HTTP/1.1', is_http_request=True)
initiator = item.initiator
warcHeaders = {
'X-Chrome-Initiator': json.dumps (initiator),
'X-Chrome-Request-ID': item.id,
'WARC-Date': datetime_to_iso_date (datetime.utcfromtimestamp (item.chromeRequest['wallTime'])),
}
try:
bodyTruncated = None
payload, payloadBase64Encoded = item.requestBody
except ValueError:
# oops, don’t know what went wrong here
bodyTruncated = 'unspecified'
logger.error ('requestBody missing', uuid='ee9adc58-e723-4595-9feb-312a67ead6a0')
if bodyTruncated:
warcHeaders['WARC-Truncated'] = bodyTruncated
payload = None
if payload:
payload = BytesIO (payload)
warcHeaders['X-Chrome-Base64Body'] = str (payloadBase64Encoded)
record = self.writeRecord (req['url'], 'request',
payload=payload, http_headers=httpHeaders,
warc_headers_dict=warcHeaders)
return record.rec_headers['WARC-Record-ID']
def _writeResponse (self, item, concurrentTo):
# fetch the body
reqId = item.id
rawBody = None
base64Encoded = False
bodyTruncated = None
if item.isRedirect:
# redirects reuse the same request, thus we cannot safely retrieve
# the body (i.e getResponseBody may return the new location’s
# body).
bodyTruncated = 'unspecified'
elif item.encodedDataLength > self.maxBodySize:
bodyTruncated = 'length'
# check body size first, since we’re loading everything into memory
self.logger.error ('body for {} too large {} vs {}'.format (reqId,
item.encodedDataLength, self.maxBodySize))
else:
try:
rawBody, base64Encoded = item.body
except ValueError:
# oops, don’t know what went wrong here
bodyTruncated = 'unspecified'
# now the response
resp = item.response
warcHeaders = {
'WARC-Concurrent-To': concurrentTo,
'WARC-IP-Address': resp.get ('remoteIPAddress', ''),
'X-Chrome-Protocol': resp.get ('protocol', ''),
'X-Chrome-FromDiskCache': str (resp.get ('fromDiskCache')),
'X-Chrome-ConnectionReused': str (resp.get ('connectionReused')),
'X-Chrome-Request-ID': item.id,
'WARC-Date': datetime_to_iso_date (datetime.utcfromtimestamp (
item.chromeRequest['wallTime']+
(item.chromeResponse['timestamp']-item.chromeRequest['timestamp']))),
}
if bodyTruncated:
warcHeaders['WARC-Truncated'] = bodyTruncated
else:
warcHeaders['X-Chrome-Base64Body'] = str (base64Encoded)
httpHeaders = StatusAndHeaders('{} {}'.format (resp['status'],
item.statusText), item.responseHeaders,
protocol='HTTP/1.1')
# Content is saved decompressed and decoded, remove these headers
blacklistedHeaders = {'transfer-encoding', 'content-encoding'}
for h in blacklistedHeaders:
httpHeaders.remove_header (h)
# chrome sends nothing but utf8 encoded text. Fortunately HTTP
# headers take precedence over the document’s <meta>, thus we can
# easily override those.
contentType = resp.get ('mimeType')
if contentType:
if not base64Encoded:
contentType += '; charset=utf-8'
httpHeaders.replace_header ('content-type', contentType)
if rawBody is not None:
httpHeaders.replace_header ('content-length', '{:d}'.format (len (rawBody)))
bodyIo = BytesIO (rawBody)
else:
bodyIo = BytesIO ()
record = self.writeRecord (resp['url'], 'response',
warc_headers_dict=warcHeaders, payload=bodyIo,
http_headers=httpHeaders)
if item.resourceType == 'Document':
self.documentRecords[item.url] = record.rec_headers.get_header ('WARC-Record-ID')
def _writeScri
|
em):
writer = self.writer
encoding = 'utf-8'
self.writeRecord (packageUrl ('script/{}'.format (item.path)), 'metadata',
payload=BytesIO (str (item).encode (encoding)),
warc_headers_dict={'Content-Type': 'application/javascript; charset={}'.format (encoding)})
def _writeItem (self, item):
if item.failed:
# should have been handled by the logger already
return
concurrentTo = self._writeRequest (item)
self._writeResponse (item, concurrentTo)
def _addRefersTo (self, headers, url):
refersTo = self.documentRecords.get (url)
if refersTo:
headers['WARC-Refers-To'] = refersTo
else:
self.logger.error ('No document record found for {}'.format (url))
return headers
def _writeDomSnapshot (self, item):
writer = self.writer
warcHeaders = {'X-DOM-Snapshot': str (True),
'X-Chrome-Viewport': item.viewport,
'Content-Type': 'text/html; charset=utf-8',
}
self._addRefersTo (warcHeaders, item.url)
self.writeRecord (item.url, 'conversion',
payload=BytesIO (item.document),
warc_headers_dict=warcHeaders)
def _writeScreenshot (self, item):
writer = self.writer
warcHeaders = {'Content-Type': 'image/png',
'X-Crocoite-Screenshot-Y-Offset': str (item.yoff)}
self._addRefersTo (warcHeaders, item.url)
self.writeRecord (item.url, 'conversion',
payload=BytesIO (item.data), warc_headers_dict=warcHeaders)
def _writeControllerStart (self, item):
payload = BytesIO (json.dumps (item.payload, indent=2).encode ('utf-8'))
writer = self.writer
warcinfo = self.writeRecord (packageUrl ('warcinfo
|
pt (self, it
|
identifier_name
|
api_op_UpdateItem.go
|
For the complete list of reserved words, see
// Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
// in the Amazon DynamoDB Developer Guide.) To work around this, you could specify
// the following for ExpressionAttributeNames :
// - {"#P":"Percentile"}
// You could then use this substitution in an expression, as in this example:
// - #P = :val
// Tokens that begin with the : character are expression attribute values, which
// are placeholders for the actual value at runtime. For more information about
// expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
// in the Amazon DynamoDB Developer Guide.
|
// One or more values that can be substituted in an expression. Use the : (colon)
// character in an expression to dereference an attribute value. For example,
// suppose that you wanted to check whether the value of the ProductStatus
// attribute was one of the following: Available | Backordered | Discontinued You
// would first need to specify ExpressionAttributeValues as follows: {
// ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
// ":disc":{"S":"Discontinued"} } You could then use these values in an expression,
// such as this: ProductStatus IN (:avail, :back, :disc) For more information on
// expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
// in the Amazon DynamoDB Developer Guide.
ExpressionAttributeValues map[string]types.AttributeValue
// Determines the level of detail about either provisioned or on-demand throughput
// consumption that is returned in the response:
// - INDEXES - The response includes the aggregate ConsumedCapacity for the
// operation, together with ConsumedCapacity for each table and secondary index
// that was accessed. Note that some operations, such as GetItem and BatchGetItem
// , do not access any indexes at all. In these cases, specifying INDEXES will
// only return ConsumedCapacity information for table(s).
// - TOTAL - The response includes only the aggregate ConsumedCapacity for the
// operation.
// - NONE - No ConsumedCapacity details are included in the response.
ReturnConsumedCapacity types.ReturnConsumedCapacity
// Determines whether item collection metrics are returned. If set to SIZE , the
// response includes statistics about item collections, if any, that were modified
// during the operation are returned in the response. If set to NONE (the
// default), no statistics are returned.
ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics
// Use ReturnValues if you want to get the item attributes as they appear before
// or after they are successfully updated. For UpdateItem , the valid values are:
// - NONE - If ReturnValues is not specified, or if its value is NONE , then
// nothing is returned. (This setting is the default for ReturnValues .)
// - ALL_OLD - Returns all of the attributes of the item, as they appeared before
// the UpdateItem operation.
// - UPDATED_OLD - Returns only the updated attributes, as they appeared before
// the UpdateItem operation.
// - ALL_NEW - Returns all of the attributes of the item, as they appear after
// the UpdateItem operation.
// - UPDATED_NEW - Returns only the updated attributes, as they appear after the
// UpdateItem operation.
// There is no additional cost associated with requesting a return value aside
// from the small network and processing overhead of receiving a larger response.
// No read capacity units are consumed. The values returned are strongly
// consistent.
ReturnValues types.ReturnValue
// An optional parameter that returns the item attributes for an UpdateItem
// operation that failed a condition check. There is no additional cost associated
// with requesting a return value aside from the small network and processing
// overhead of receiving a larger response. No read capacity units are consumed.
ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure
// An expression that defines one or more attributes to be updated, the action to
// be performed on them, and new values for them. The following action values are
// available for UpdateExpression .
// - SET - Adds one or more attributes and values to an item. If any of these
// attributes already exist, they are replaced by the new values. You can also use
// SET to add or subtract from an attribute that is of type Number. For example:
// SET myNum = myNum + :val SET supports the following functions:
// - if_not_exists (path, operand) - if the item does not contain an attribute at
// the specified path, then if_not_exists evaluates to operand; otherwise, it
// evaluates to path. You can use this function to avoid overwriting an attribute
// that may already be present in the item.
// - list_append (operand, operand) - evaluates to a list with a new element
// added to it. You can append the new element to the start or the end of the list
// by reversing the order of the operands. These function names are
// case-sensitive.
// - REMOVE - Removes one or more attributes from an item.
// - ADD - Adds the specified value to the item, if the attribute does not
// already exist. If the attribute does exist, then the behavior of ADD depends
// on the data type of the attribute:
// - If the existing attribute is a number, and if Value is also a number, then
// Value is mathematically added to the existing attribute. If Value is a
// negative number, then it is subtracted from the existing attribute. If you use
// ADD to increment or decrement a number value for an item that doesn't exist
// before the update, DynamoDB uses 0 as the initial value. Similarly, if you use
// ADD for an existing item to increment or decrement an attribute value that
// doesn't exist before the update, DynamoDB uses 0 as the initial value. For
// example, suppose that the item you want to update doesn't have an attribute
// named itemcount , but you decide to ADD the number 3 to this attribute anyway.
// DynamoDB will create the itemcount attribute, set its initial value to 0 , and
// finally add 3 to it. The result will be a new itemcount attribute in the item,
// with a value of 3 .
// - If the existing data type is a set and if Value is also a set, then Value is
// added to the existing set. For example, if the attribute value is the set
// [1,2] , and the ADD action specified [3] , then the final attribute value is
// [1,2,3] . An error occurs if an ADD action is specified for a set attribute
// and the attribute type specified does not match the existing set type. Both sets
// must have the same primitive data type. For example, if the existing data type
// is a set of strings, the Value must also be a set of strings. The ADD action
// only supports Number and set data types. In addition, ADD can only be used on
// top-level attributes, not nested attributes.
// - DELETE - Deletes an element from a set. If a set of values is specified,
// then those values are subtracted from the old set. For example, if the attribute
// value was the set [a,b,c] and the DELETE action specifies [a,c] , then the
// final attribute value is [b] . Specifying an empty set is an error. The DELETE
// action only supports set data types. In addition, DELETE can only be used on
// top-level attributes, not nested attributes.
// You can have many actions in a single expression, such as the following: SET
// a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on
// update expressions, see Modifying Items and Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html)
// in the Amazon DynamoDB Developer Guide.
UpdateExpression *string
noSmithyDocumentSerde
}
// Represents the output of an UpdateItem operation.
type UpdateItemOutput struct {
// A map of attribute values as they appear before or after the UpdateItem
// operation, as determined by the ReturnValues parameter. The Attributes map is
// only present if the update was successful and ReturnValues was specified as
// something other than NONE in the request. Each element represents one attribute.
Attributes map[string]types.AttributeValue
// The capacity units consumed by the UpdateItem operation. The data returned
// includes the total provisioned throughput consumed, along with statistics for
// the table and any indexes involved in the operation
|
ExpressionAttributeNames map[string]string
|
random_line_split
|
api_op_UpdateItem.go
|
them, and new values for them. The following action values are
// available for UpdateExpression .
// - SET - Adds one or more attributes and values to an item. If any of these
// attributes already exist, they are replaced by the new values. You can also use
// SET to add or subtract from an attribute that is of type Number. For example:
// SET myNum = myNum + :val SET supports the following functions:
// - if_not_exists (path, operand) - if the item does not contain an attribute at
// the specified path, then if_not_exists evaluates to operand; otherwise, it
// evaluates to path. You can use this function to avoid overwriting an attribute
// that may already be present in the item.
// - list_append (operand, operand) - evaluates to a list with a new element
// added to it. You can append the new element to the start or the end of the list
// by reversing the order of the operands. These function names are
// case-sensitive.
// - REMOVE - Removes one or more attributes from an item.
// - ADD - Adds the specified value to the item, if the attribute does not
// already exist. If the attribute does exist, then the behavior of ADD depends
// on the data type of the attribute:
// - If the existing attribute is a number, and if Value is also a number, then
// Value is mathematically added to the existing attribute. If Value is a
// negative number, then it is subtracted from the existing attribute. If you use
// ADD to increment or decrement a number value for an item that doesn't exist
// before the update, DynamoDB uses 0 as the initial value. Similarly, if you use
// ADD for an existing item to increment or decrement an attribute value that
// doesn't exist before the update, DynamoDB uses 0 as the initial value. For
// example, suppose that the item you want to update doesn't have an attribute
// named itemcount , but you decide to ADD the number 3 to this attribute anyway.
// DynamoDB will create the itemcount attribute, set its initial value to 0 , and
// finally add 3 to it. The result will be a new itemcount attribute in the item,
// with a value of 3 .
// - If the existing data type is a set and if Value is also a set, then Value is
// added to the existing set. For example, if the attribute value is the set
// [1,2] , and the ADD action specified [3] , then the final attribute value is
// [1,2,3] . An error occurs if an ADD action is specified for a set attribute
// and the attribute type specified does not match the existing set type. Both sets
// must have the same primitive data type. For example, if the existing data type
// is a set of strings, the Value must also be a set of strings. The ADD action
// only supports Number and set data types. In addition, ADD can only be used on
// top-level attributes, not nested attributes.
// - DELETE - Deletes an element from a set. If a set of values is specified,
// then those values are subtracted from the old set. For example, if the attribute
// value was the set [a,b,c] and the DELETE action specifies [a,c] , then the
// final attribute value is [b] . Specifying an empty set is an error. The DELETE
// action only supports set data types. In addition, DELETE can only be used on
// top-level attributes, not nested attributes.
// You can have many actions in a single expression, such as the following: SET
// a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on
// update expressions, see Modifying Items and Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html)
// in the Amazon DynamoDB Developer Guide.
UpdateExpression *string
noSmithyDocumentSerde
}
// Represents the output of an UpdateItem operation.
type UpdateItemOutput struct {
// A map of attribute values as they appear before or after the UpdateItem
// operation, as determined by the ReturnValues parameter. The Attributes map is
// only present if the update was successful and ReturnValues was specified as
// something other than NONE in the request. Each element represents one attribute.
Attributes map[string]types.AttributeValue
// The capacity units consumed by the UpdateItem operation. The data returned
// includes the total provisioned throughput consumed, along with statistics for
// the table and any indexes involved in the operation. ConsumedCapacity is only
// returned if the ReturnConsumedCapacity parameter was specified. For more
// information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads)
// in the Amazon DynamoDB Developer Guide.
ConsumedCapacity *types.ConsumedCapacity
// Information about item collections, if any, that were affected by the UpdateItem
// operation. ItemCollectionMetrics is only returned if the
// ReturnItemCollectionMetrics parameter was specified. If the table does not have
// any local secondary indexes, this information is not returned in the response.
// Each ItemCollectionMetrics element consists of:
// - ItemCollectionKey - The partition key value of the item collection. This is
// the same as the partition key value of the item itself.
// - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
// This value is a two-element array containing a lower bound and an upper bound
// for the estimate. The estimate includes the size of all the items in the table,
// plus the size of all attributes projected into all of the local secondary
// indexes on that table. Use this estimate to measure whether a local secondary
// index is approaching its size limit. The estimate is subject to change over
// time; therefore, do not rely on the precision or accuracy of the estimate.
ItemCollectionMetrics *types.ItemCollectionMetrics
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateItem{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateItem{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpUpdateItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
return err
}
if err = addUpdateItemResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateItemValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateItem(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addValidateResponseChecksum(stack, options); err != nil {
return err
}
if err = addAcceptEncodingGzip(stack, options); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil
|
{
return err
}
|
conditional_block
|
|
api_op_UpdateItem.go
|
// Value is mathematically added to the existing attribute. If Value is a
// negative number, then it is subtracted from the existing attribute. If you use
// ADD to increment or decrement a number value for an item that doesn't exist
// before the update, DynamoDB uses 0 as the initial value. Similarly, if you use
// ADD for an existing item to increment or decrement an attribute value that
// doesn't exist before the update, DynamoDB uses 0 as the initial value. For
// example, suppose that the item you want to update doesn't have an attribute
// named itemcount , but you decide to ADD the number 3 to this attribute anyway.
// DynamoDB will create the itemcount attribute, set its initial value to 0 , and
// finally add 3 to it. The result will be a new itemcount attribute in the item,
// with a value of 3 .
// - If the existing data type is a set and if Value is also a set, then Value is
// added to the existing set. For example, if the attribute value is the set
// [1,2] , and the ADD action specified [3] , then the final attribute value is
// [1,2,3] . An error occurs if an ADD action is specified for a set attribute
// and the attribute type specified does not match the existing set type. Both sets
// must have the same primitive data type. For example, if the existing data type
// is a set of strings, the Value must also be a set of strings. The ADD action
// only supports Number and set data types. In addition, ADD can only be used on
// top-level attributes, not nested attributes.
// - DELETE - Deletes an element from a set. If a set of values is specified,
// then those values are subtracted from the old set. For example, if the attribute
// value was the set [a,b,c] and the DELETE action specifies [a,c] , then the
// final attribute value is [b] . Specifying an empty set is an error. The DELETE
// action only supports set data types. In addition, DELETE can only be used on
// top-level attributes, not nested attributes.
// You can have many actions in a single expression, such as the following: SET
// a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on
// update expressions, see Modifying Items and Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html)
// in the Amazon DynamoDB Developer Guide.
UpdateExpression *string
noSmithyDocumentSerde
}
// Represents the output of an UpdateItem operation.
type UpdateItemOutput struct {
// A map of attribute values as they appear before or after the UpdateItem
// operation, as determined by the ReturnValues parameter. The Attributes map is
// only present if the update was successful and ReturnValues was specified as
// something other than NONE in the request. Each element represents one attribute.
Attributes map[string]types.AttributeValue
// The capacity units consumed by the UpdateItem operation. The data returned
// includes the total provisioned throughput consumed, along with statistics for
// the table and any indexes involved in the operation. ConsumedCapacity is only
// returned if the ReturnConsumedCapacity parameter was specified. For more
// information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads)
// in the Amazon DynamoDB Developer Guide.
ConsumedCapacity *types.ConsumedCapacity
// Information about item collections, if any, that were affected by the UpdateItem
// operation. ItemCollectionMetrics is only returned if the
// ReturnItemCollectionMetrics parameter was specified. If the table does not have
// any local secondary indexes, this information is not returned in the response.
// Each ItemCollectionMetrics element consists of:
// - ItemCollectionKey - The partition key value of the item collection. This is
// the same as the partition key value of the item itself.
// - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
// This value is a two-element array containing a lower bound and an upper bound
// for the estimate. The estimate includes the size of all the items in the table,
// plus the size of all attributes projected into all of the local secondary
// indexes on that table. Use this estimate to measure whether a local secondary
// index is approaching its size limit. The estimate is subject to change over
// time; therefore, do not rely on the precision or accuracy of the estimate.
ItemCollectionMetrics *types.ItemCollectionMetrics
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateItem{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateItem{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpUpdateItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
return err
}
if err = addUpdateItemResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateItemValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateItem(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addValidateResponseChecksum(stack, options); err != nil {
return err
}
if err = addAcceptEncodingGzip(stack, options); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func addOpUpdateItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
return stack.Serialize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
opt.Logger = o.Logger
},
},
DiscoverOperation: c.fetchOpUpdateItemDiscoverEndpoint,
EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
EndpointDiscoveryRequired: false,
}, "ResolveEndpoint", middleware.After)
}
func (c *Client) fetchOpUpdateItemDiscoverEndpoint(ctx context.Context, input interface{}, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error)
|
{
in, ok := input.(*UpdateItemInput)
if !ok {
return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
}
_ = in
identifierMap := make(map[string]string, 0)
key := fmt.Sprintf("DynamoDB.%v", identifierMap)
if v, ok := c.endpointCache.Get(key); ok {
return v, nil
}
discoveryOperationInput := &DescribeEndpointsInput{}
opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
for _, fn := range optFns {
fn(&opt)
|
identifier_body
|
|
api_op_UpdateItem.go
|
// - list_append (operand, operand) - evaluates to a list with a new element
// added to it. You can append the new element to the start or the end of the list
// by reversing the order of the operands. These function names are
// case-sensitive.
// - REMOVE - Removes one or more attributes from an item.
// - ADD - Adds the specified value to the item, if the attribute does not
// already exist. If the attribute does exist, then the behavior of ADD depends
// on the data type of the attribute:
// - If the existing attribute is a number, and if Value is also a number, then
// Value is mathematically added to the existing attribute. If Value is a
// negative number, then it is subtracted from the existing attribute. If you use
// ADD to increment or decrement a number value for an item that doesn't exist
// before the update, DynamoDB uses 0 as the initial value. Similarly, if you use
// ADD for an existing item to increment or decrement an attribute value that
// doesn't exist before the update, DynamoDB uses 0 as the initial value. For
// example, suppose that the item you want to update doesn't have an attribute
// named itemcount , but you decide to ADD the number 3 to this attribute anyway.
// DynamoDB will create the itemcount attribute, set its initial value to 0 , and
// finally add 3 to it. The result will be a new itemcount attribute in the item,
// with a value of 3 .
// - If the existing data type is a set and if Value is also a set, then Value is
// added to the existing set. For example, if the attribute value is the set
// [1,2] , and the ADD action specified [3] , then the final attribute value is
// [1,2,3] . An error occurs if an ADD action is specified for a set attribute
// and the attribute type specified does not match the existing set type. Both sets
// must have the same primitive data type. For example, if the existing data type
// is a set of strings, the Value must also be a set of strings. The ADD action
// only supports Number and set data types. In addition, ADD can only be used on
// top-level attributes, not nested attributes.
// - DELETE - Deletes an element from a set. If a set of values is specified,
// then those values are subtracted from the old set. For example, if the attribute
// value was the set [a,b,c] and the DELETE action specifies [a,c] , then the
// final attribute value is [b] . Specifying an empty set is an error. The DELETE
// action only supports set data types. In addition, DELETE can only be used on
// top-level attributes, not nested attributes.
// You can have many actions in a single expression, such as the following: SET
// a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on
// update expressions, see Modifying Items and Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html)
// in the Amazon DynamoDB Developer Guide.
UpdateExpression *string
noSmithyDocumentSerde
}
// Represents the output of an UpdateItem operation.
type UpdateItemOutput struct {
// A map of attribute values as they appear before or after the UpdateItem
// operation, as determined by the ReturnValues parameter. The Attributes map is
// only present if the update was successful and ReturnValues was specified as
// something other than NONE in the request. Each element represents one attribute.
Attributes map[string]types.AttributeValue
// The capacity units consumed by the UpdateItem operation. The data returned
// includes the total provisioned throughput consumed, along with statistics for
// the table and any indexes involved in the operation. ConsumedCapacity is only
// returned if the ReturnConsumedCapacity parameter was specified. For more
// information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads)
// in the Amazon DynamoDB Developer Guide.
ConsumedCapacity *types.ConsumedCapacity
// Information about item collections, if any, that were affected by the UpdateItem
// operation. ItemCollectionMetrics is only returned if the
// ReturnItemCollectionMetrics parameter was specified. If the table does not have
// any local secondary indexes, this information is not returned in the response.
// Each ItemCollectionMetrics element consists of:
// - ItemCollectionKey - The partition key value of the item collection. This is
// the same as the partition key value of the item itself.
// - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
// This value is a two-element array containing a lower bound and an upper bound
// for the estimate. The estimate includes the size of all the items in the table,
// plus the size of all attributes projected into all of the local secondary
// indexes on that table. Use this estimate to measure whether a local secondary
// index is approaching its size limit. The estimate is subject to change over
// time; therefore, do not rely on the precision or accuracy of the estimate.
ItemCollectionMetrics *types.ItemCollectionMetrics
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateItem{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateItem{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpUpdateItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
return err
}
if err = addUpdateItemResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateItemValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateItem(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addValidateResponseChecksum(stack, options); err != nil {
return err
}
if err = addAcceptEncodingGzip(stack, options); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func addOpUpdateItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
return stack.Serialize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
opt.Logger = o.Logger
},
},
DiscoverOperation: c.fetchOpUpdateItemDiscoverEndpoint,
EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
EndpointDiscoveryRequired: false,
}, "ResolveEndpoint", middleware.After)
}
func (c *Client)
|
fetchOpUpdateItemDiscoverEndpoint
|
identifier_name
|
|
lib.rs
|
allow_sgx_debug_mode): bool;
}
}
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
// the integritee-service wants to register his enclave
#[weight = (<T as Config>::WeightInfo::register_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn register_enclave(origin, ra_report: Vec<u8>, worker_url: Vec<u8>) -> DispatchResult {
log::info!("teerex: called into runtime call register_enclave()");
let sender = ensure_signed(origin)?;
ensure!(ra_report.len() <= MAX_RA_REPORT_LEN, <Error<T>>::RaReportTooLong);
ensure!(worker_url.len() <= MAX_URL_LEN, <Error<T>>::EnclaveUrlTooLong);
log::info!("teerex: parameter lenght ok");
#[cfg(not(feature = "skip-ias-check"))]
let enclave = Self::verify_report(&sender, ra_report)
.map(|report| Enclave::new(sender.clone(), report.mr_enclave, report.timestamp, worker_url.clone(), report.build_mode))?;
#[cfg(not(feature = "skip-ias-check"))]
if !<AllowSGXDebugMode>::get() && enclave.sgx_mode == SgxBuildMode::Debug {
log::error!("substraTEE_registry: debug mode is not allowed to attest!");
return Err(<Error<T>>::SgxModeNotAllowed.into());
}
#[cfg(feature = "skip-ias-check")]
log::warn!("[teerex]: Skipping remote attestation check. Only dev-chains are allowed to do this!");
#[cfg(feature = "skip-ias-check")]
let enclave = Enclave::new(
sender.clone(),
// insert mrenclave if the ra_report represents one, otherwise insert default
<[u8; 32]>::decode(&mut ra_report.as_slice()).unwrap_or_default(),
<timestamp::Pallet<T>>::get().saturated_into(),
worker_url.clone(),
SgxBuildMode::default()
);
Self::add_enclave(&sender, &enclave)?;
Self::deposit_event(RawEvent::AddedEnclave(sender, worker_url));
Ok(())
}
// TODO: we can't expect a dead enclave to unregister itself
// alternative: allow anyone to unregister an enclave that hasn't recently supplied a RA
// such a call should be feeless if successful
#[weight = (<T as Config>::WeightInfo::unregister_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn unregister_enclave(origin) -> DispatchResult {
let sender = ensure_signed(origin)?;
Self::remove_enclave(&sender)?;
Self::deposit_event(RawEvent::RemovedEnclave(sender));
Ok(())
}
#[weight = (<T as Config>::WeightInfo::call_worker(), DispatchClass::Normal, Pays::Yes)]
pub fn call_worker(origin, request: Request) -> DispatchResult {
let _sender = ensure_signed(origin)?;
log::info!("call_worker with {:?}", request);
Self::deposit_event(RawEvent::Forwarded(request.shard));
Ok(())
}
// the integritee-service calls this function for every processed call to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_call(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_call(origin, shard: ShardIdentifier, call_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(), <Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("call confirmed with shard {:?}, call hash {:?}, ipfs_hash {:?}", shard, call_hash, ipfs_hash);
Self::deposit_event(RawEvent::CallConfirmed(sender, call_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
// the integritee-service calls this function for every processed block to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_block(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_block(origin, shard: ShardIdentifier, block_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(),<Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("block confirmed with shard {:?}, block hash {:?}, ipfs_hash {:?}", shard, block_hash, ipfs_hash);
Self::deposit_event(RawEvent::BlockConfirmed(sender, block_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
/// Sent by a client who requests to get shielded funds managed by an enclave. For this on-chain balance is sent to the bonding_account of the enclave.
/// The bonding_account does not have a private key as the balance on this account is exclusively managed from withing the pallet_teerex.
/// Note: The bonding_account is bit-equivalent to the worker shard.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn shield_funds(origin, incognito_account_encrypted: Vec<u8>, amount: BalanceOf<T>, bonding_account: T::AccountId) -> DispatchResult {
let sender = ensure_signed(origin)?;
T::Currency::transfer(&sender, &bonding_account, amount, ExistenceRequirement::AllowDeath)?;
Self::deposit_event(RawEvent::ShieldFunds(incognito_account_encrypted));
Ok(())
}
/// Sent by enclaves only as a result of an `unshield` request from a client to an enclave.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn unshield_funds(origin, public_account: T::AccountId, amount: BalanceOf<T>, bonding_account: T::AccountId, call_hash: H256) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = <EnclaveIndex<T>>::get(sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == bonding_account.encode(),<Error<T>>::WrongMrenclaveForBondingAccount);
if !<ConfirmedCalls>::contains_key(call_hash) {
log::info!("First confirmation for call: {:?}", call_hash);
T::Currency::transfer(&bonding_account, &public_account, amount, ExistenceRequirement::AllowDeath)?;
<ConfirmedCalls>::insert(call_hash, 0);
Self::deposit_event(RawEvent::UnshieldedFunds(public_account));
} else {
log::info!("Second confirmation for call: {:?}", call_hash);
}
<ConfirmedCalls>::mutate(call_hash, |confirmations| {*confirmations += 1 });
Ok(())
}
}
}
decl_error! {
pub enum Error for Module<T: Config> {
/// failed to decode enclave signer
EnclaveSignerDecodeError,
/// Sender does not match attested enclave in report
SenderIsNotAttestedEnclave,
/// Verifying RA report failed
RemoteAttestationVerificationFailed,
RemoteAttestationTooOld,
///The enclave cannot attest, because its building mode is not allowed
SgxModeNotAllowed,
///The enclave is not registered
EnclaveIsNotRegistered,
///The bonding account doesn't match the enclave
WrongMrenclaveForBondingAccount,
///The shard doesn't match the enclave
WrongMrenclaveForShard,
///The worker url is too long
EnclaveUrlTooLong,
///The RA report is too long
RaReportTooLong,
///The enclave doesn't exists
InexistentEnclave,
}
}
impl<T: Config> Module<T> {
fn add_enclave(
sender: &T::AccountId,
enclave: &Enclave<T::AccountId, Vec<u8>>,
) -> DispatchResult {
let enclave_idx = if <EnclaveIndex<T>>::contains_key(sender) {
log::info!("Updating already registered enclave");
<EnclaveIndex<T>>::get(sender)
} else
|
{
let enclaves_count = Self::enclave_count()
.checked_add(1)
.ok_or("[Teerex]: Overflow adding new enclave to registry")?;
<EnclaveIndex<T>>::insert(sender, enclaves_count);
<EnclaveCount>::put(enclaves_count);
enclaves_count
}
|
conditional_block
|
|
lib.rs
|
());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("call confirmed with shard {:?}, call hash {:?}, ipfs_hash {:?}", shard, call_hash, ipfs_hash);
Self::deposit_event(RawEvent::CallConfirmed(sender, call_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
// the integritee-service calls this function for every processed block to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_block(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_block(origin, shard: ShardIdentifier, block_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(),<Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("block confirmed with shard {:?}, block hash {:?}, ipfs_hash {:?}", shard, block_hash, ipfs_hash);
Self::deposit_event(RawEvent::BlockConfirmed(sender, block_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
/// Sent by a client who requests to get shielded funds managed by an enclave. For this on-chain balance is sent to the bonding_account of the enclave.
/// The bonding_account does not have a private key as the balance on this account is exclusively managed from withing the pallet_teerex.
/// Note: The bonding_account is bit-equivalent to the worker shard.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn shield_funds(origin, incognito_account_encrypted: Vec<u8>, amount: BalanceOf<T>, bonding_account: T::AccountId) -> DispatchResult {
let sender = ensure_signed(origin)?;
T::Currency::transfer(&sender, &bonding_account, amount, ExistenceRequirement::AllowDeath)?;
Self::deposit_event(RawEvent::ShieldFunds(incognito_account_encrypted));
Ok(())
}
/// Sent by enclaves only as a result of an `unshield` request from a client to an enclave.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn unshield_funds(origin, public_account: T::AccountId, amount: BalanceOf<T>, bonding_account: T::AccountId, call_hash: H256) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = <EnclaveIndex<T>>::get(sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == bonding_account.encode(),<Error<T>>::WrongMrenclaveForBondingAccount);
if !<ConfirmedCalls>::contains_key(call_hash) {
log::info!("First confirmation for call: {:?}", call_hash);
T::Currency::transfer(&bonding_account, &public_account, amount, ExistenceRequirement::AllowDeath)?;
<ConfirmedCalls>::insert(call_hash, 0);
Self::deposit_event(RawEvent::UnshieldedFunds(public_account));
} else {
log::info!("Second confirmation for call: {:?}", call_hash);
}
<ConfirmedCalls>::mutate(call_hash, |confirmations| {*confirmations += 1 });
Ok(())
}
}
}
decl_error! {
pub enum Error for Module<T: Config> {
/// failed to decode enclave signer
EnclaveSignerDecodeError,
/// Sender does not match attested enclave in report
SenderIsNotAttestedEnclave,
/// Verifying RA report failed
RemoteAttestationVerificationFailed,
RemoteAttestationTooOld,
///The enclave cannot attest, because its building mode is not allowed
SgxModeNotAllowed,
///The enclave is not registered
EnclaveIsNotRegistered,
///The bonding account doesn't match the enclave
WrongMrenclaveForBondingAccount,
///The shard doesn't match the enclave
WrongMrenclaveForShard,
///The worker url is too long
EnclaveUrlTooLong,
///The RA report is too long
RaReportTooLong,
///The enclave doesn't exists
InexistentEnclave,
}
}
impl<T: Config> Module<T> {
fn add_enclave(
sender: &T::AccountId,
enclave: &Enclave<T::AccountId, Vec<u8>>,
) -> DispatchResult {
let enclave_idx = if <EnclaveIndex<T>>::contains_key(sender) {
log::info!("Updating already registered enclave");
<EnclaveIndex<T>>::get(sender)
} else {
let enclaves_count = Self::enclave_count()
.checked_add(1)
.ok_or("[Teerex]: Overflow adding new enclave to registry")?;
<EnclaveIndex<T>>::insert(sender, enclaves_count);
<EnclaveCount>::put(enclaves_count);
enclaves_count
};
<EnclaveRegistry<T>>::insert(enclave_idx, &enclave);
Ok(())
}
fn remove_enclave(sender: &T::AccountId) -> DispatchResult {
ensure!(
<EnclaveIndex<T>>::contains_key(sender),
<Error<T>>::InexistentEnclave
);
let index_to_remove = <EnclaveIndex<T>>::take(sender);
let enclaves_count = Self::enclave_count();
let new_enclaves_count = enclaves_count
.checked_sub(1)
.ok_or("[Teerex]: Underflow removing an enclave from the registry")?;
Self::swap_and_pop(index_to_remove, new_enclaves_count + 1)?;
<EnclaveCount>::put(new_enclaves_count);
Ok(())
}
/// Our list implementation would introduce holes in out list if if we try to remove elements from the middle.
/// As the order of the enclave entries is not important, we use the swap an pop method to remove elements from
/// the registry.
fn swap_and_pop(index_to_remove: u64, new_enclaves_count: u64) -> DispatchResult {
if index_to_remove != new_enclaves_count {
let last_enclave = <EnclaveRegistry<T>>::get(&new_enclaves_count);
<EnclaveRegistry<T>>::insert(index_to_remove, &last_enclave);
<EnclaveIndex<T>>::insert(last_enclave.pubkey, index_to_remove);
}
<EnclaveRegistry<T>>::remove(new_enclaves_count);
Ok(())
}
fn unregister_silent_workers(now: T::Moment) {
let minimum = (now - T::MaxSilenceTime::get()).saturated_into::<u64>();
let silent_workers = <EnclaveRegistry<T>>::iter()
.filter(|e| e.1.timestamp < minimum)
.map(|e| e.1.pubkey);
for index in silent_workers {
let result = Self::remove_enclave(&index);
match result {
Ok(_) => {
log::info!("Unregister enclave because silent worker : {:?}", index);
Self::deposit_event(RawEvent::RemovedEnclave(index));
}
Err(e) => {
log::error!("Cannot unregister enclave : {:?}", e);
}
};
}
}
#[cfg(not(feature = "skip-ias-check"))]
fn verify_report(
sender: &T::AccountId,
ra_report: Vec<u8>,
) -> Result<SgxReport, sp_runtime::DispatchError> {
let report = verify_ias_report(&ra_report)
.map_err(|_| <Error<T>>::RemoteAttestationVerificationFailed)?;
log::info!("RA Report: {:?}", report);
let enclave_signer = T::AccountId::decode(&mut &report.pubkey[..])
.map_err(|_| <Error<T>>::EnclaveSignerDecodeError)?;
ensure!(
sender == &enclave_signer,
<Error<T>>::SenderIsNotAttestedEnclave
);
// TODO: activate state checks as soon as we've fixed our setup
// ensure!((report.status == SgxStatus::Ok) | (report.status == SgxStatus::ConfigurationNeeded),
// "RA status is insufficient");
// log::info!("teerex: status is acceptable");
Self::ensure_timestamp_within_24_hours(report.timestamp)?;
Ok(report)
}
#[cfg(not(feature = "skip-ias-check"))]
fn ensure_timestamp_within_24_hours(report_timestamp: u64) -> DispatchResult
|
{
use sp_runtime::traits::CheckedSub;
let elapsed_time = <timestamp::Pallet<T>>::get()
.checked_sub(&T::Moment::saturated_from(report_timestamp))
.ok_or("Underflow while calculating elapsed time since report creation")?;
if elapsed_time < T::MomentsPerDay::get() {
Ok(())
} else {
Err(<Error<T>>::RemoteAttestationTooOld.into())
}
}
|
identifier_body
|
|
lib.rs
|
map(|report| Enclave::new(sender.clone(), report.mr_enclave, report.timestamp, worker_url.clone(), report.build_mode))?;
#[cfg(not(feature = "skip-ias-check"))]
if !<AllowSGXDebugMode>::get() && enclave.sgx_mode == SgxBuildMode::Debug {
log::error!("substraTEE_registry: debug mode is not allowed to attest!");
return Err(<Error<T>>::SgxModeNotAllowed.into());
}
#[cfg(feature = "skip-ias-check")]
log::warn!("[teerex]: Skipping remote attestation check. Only dev-chains are allowed to do this!");
#[cfg(feature = "skip-ias-check")]
let enclave = Enclave::new(
sender.clone(),
// insert mrenclave if the ra_report represents one, otherwise insert default
<[u8; 32]>::decode(&mut ra_report.as_slice()).unwrap_or_default(),
<timestamp::Pallet<T>>::get().saturated_into(),
worker_url.clone(),
SgxBuildMode::default()
);
Self::add_enclave(&sender, &enclave)?;
Self::deposit_event(RawEvent::AddedEnclave(sender, worker_url));
Ok(())
}
// TODO: we can't expect a dead enclave to unregister itself
// alternative: allow anyone to unregister an enclave that hasn't recently supplied a RA
// such a call should be feeless if successful
#[weight = (<T as Config>::WeightInfo::unregister_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn unregister_enclave(origin) -> DispatchResult {
let sender = ensure_signed(origin)?;
Self::remove_enclave(&sender)?;
Self::deposit_event(RawEvent::RemovedEnclave(sender));
Ok(())
}
#[weight = (<T as Config>::WeightInfo::call_worker(), DispatchClass::Normal, Pays::Yes)]
pub fn call_worker(origin, request: Request) -> DispatchResult {
let _sender = ensure_signed(origin)?;
log::info!("call_worker with {:?}", request);
Self::deposit_event(RawEvent::Forwarded(request.shard));
Ok(())
}
// the integritee-service calls this function for every processed call to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_call(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_call(origin, shard: ShardIdentifier, call_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(), <Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("call confirmed with shard {:?}, call hash {:?}, ipfs_hash {:?}", shard, call_hash, ipfs_hash);
Self::deposit_event(RawEvent::CallConfirmed(sender, call_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
// the integritee-service calls this function for every processed block to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_block(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_block(origin, shard: ShardIdentifier, block_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(),<Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("block confirmed with shard {:?}, block hash {:?}, ipfs_hash {:?}", shard, block_hash, ipfs_hash);
Self::deposit_event(RawEvent::BlockConfirmed(sender, block_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
/// Sent by a client who requests to get shielded funds managed by an enclave. For this on-chain balance is sent to the bonding_account of the enclave.
/// The bonding_account does not have a private key as the balance on this account is exclusively managed from withing the pallet_teerex.
/// Note: The bonding_account is bit-equivalent to the worker shard.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn shield_funds(origin, incognito_account_encrypted: Vec<u8>, amount: BalanceOf<T>, bonding_account: T::AccountId) -> DispatchResult {
let sender = ensure_signed(origin)?;
T::Currency::transfer(&sender, &bonding_account, amount, ExistenceRequirement::AllowDeath)?;
Self::deposit_event(RawEvent::ShieldFunds(incognito_account_encrypted));
Ok(())
}
/// Sent by enclaves only as a result of an `unshield` request from a client to an enclave.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn unshield_funds(origin, public_account: T::AccountId, amount: BalanceOf<T>, bonding_account: T::AccountId, call_hash: H256) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = <EnclaveIndex<T>>::get(sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == bonding_account.encode(),<Error<T>>::WrongMrenclaveForBondingAccount);
if !<ConfirmedCalls>::contains_key(call_hash) {
log::info!("First confirmation for call: {:?}", call_hash);
T::Currency::transfer(&bonding_account, &public_account, amount, ExistenceRequirement::AllowDeath)?;
<ConfirmedCalls>::insert(call_hash, 0);
Self::deposit_event(RawEvent::UnshieldedFunds(public_account));
} else {
log::info!("Second confirmation for call: {:?}", call_hash);
}
<ConfirmedCalls>::mutate(call_hash, |confirmations| {*confirmations += 1 });
Ok(())
}
}
}
decl_error! {
pub enum Error for Module<T: Config> {
/// failed to decode enclave signer
EnclaveSignerDecodeError,
/// Sender does not match attested enclave in report
SenderIsNotAttestedEnclave,
/// Verifying RA report failed
RemoteAttestationVerificationFailed,
RemoteAttestationTooOld,
///The enclave cannot attest, because its building mode is not allowed
SgxModeNotAllowed,
///The enclave is not registered
EnclaveIsNotRegistered,
///The bonding account doesn't match the enclave
WrongMrenclaveForBondingAccount,
///The shard doesn't match the enclave
WrongMrenclaveForShard,
///The worker url is too long
EnclaveUrlTooLong,
///The RA report is too long
RaReportTooLong,
///The enclave doesn't exists
InexistentEnclave,
}
}
impl<T: Config> Module<T> {
fn add_enclave(
sender: &T::AccountId,
enclave: &Enclave<T::AccountId, Vec<u8>>,
) -> DispatchResult {
let enclave_idx = if <EnclaveIndex<T>>::contains_key(sender) {
log::info!("Updating already registered enclave");
<EnclaveIndex<T>>::get(sender)
} else {
let enclaves_count = Self::enclave_count()
.checked_add(1)
.ok_or("[Teerex]: Overflow adding new enclave to registry")?;
<EnclaveIndex<T>>::insert(sender, enclaves_count);
<EnclaveCount>::put(enclaves_count);
enclaves_count
};
<EnclaveRegistry<T>>::insert(enclave_idx, &enclave);
Ok(())
}
fn remove_enclave(sender: &T::AccountId) -> DispatchResult {
ensure!(
<EnclaveIndex<T>>::contains_key(sender),
<Error<T>>::InexistentEnclave
);
let index_to_remove = <EnclaveIndex<T>>::take(sender);
let enclaves_count = Self::enclave_count();
let new_enclaves_count = enclaves_count
.checked_sub(1)
.ok_or("[Teerex]: Underflow removing an enclave from the registry")?;
Self::swap_and_pop(index_to_remove, new_enclaves_count + 1)?;
<EnclaveCount>::put(new_enclaves_count);
Ok(())
}
/// Our list implementation would introduce holes in out list if if we try to remove elements from the middle.
/// As the order of the enclave entries is not important, we use the swap an pop method to remove elements from
/// the registry.
fn
|
swap_and_pop
|
identifier_name
|
|
lib.rs
|
uate associated types
pub type AccountId<T> = <T as frame_system::Config>::AccountId;
pub type BalanceOf<T> = <<T as Config>::Currency as Currency<AccountId<T>>>::Balance;
#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, sp_core::RuntimeDebug)]
pub struct Request {
pub shard: ShardIdentifier,
pub cyphertext: Vec<u8>,
}
decl_event!(
pub enum Event<T>
where
<T as system::Config>::AccountId,
{
AddedEnclave(AccountId, Vec<u8>),
RemovedEnclave(AccountId),
UpdatedIpfsHash(ShardIdentifier, u64, Vec<u8>),
Forwarded(ShardIdentifier),
ShieldFunds(Vec<u8>),
UnshieldedFunds(AccountId),
|
CallConfirmed(AccountId, H256),
BlockConfirmed(AccountId, H256),
}
);
decl_storage! {
trait Store for Module<T: Config> as Teerex {
// Simple lists are not supported in runtime modules as theoretically O(n)
// operations can be executed while only being charged O(1), see substrate
// Kitties tutorial Chapter 2, Tracking all Kitties.
// watch out: we start indexing with 1 instead of zero in order to
// avoid ambiguity between Null and 0
pub EnclaveRegistry get(fn enclave): map hasher(blake2_128_concat) u64 => Enclave<T::AccountId, Vec<u8>>;
pub EnclaveCount get(fn enclave_count): u64;
pub EnclaveIndex get(fn enclave_index): map hasher(blake2_128_concat) T::AccountId => u64;
pub LatestIpfsHash get(fn latest_ipfs_hash) : map hasher(blake2_128_concat) ShardIdentifier => Vec<u8>;
// enclave index of the worker that recently committed an update
pub WorkerForShard get(fn worker_for_shard) : map hasher(blake2_128_concat) ShardIdentifier => u64;
pub ConfirmedCalls get(fn confirmed_calls): map hasher(blake2_128_concat) H256 => u64;
//pub ConfirmedBlocks get(fn confirmed_blocks): map hasher(blake2_128_concat) H256 => u64;
pub AllowSGXDebugMode get(fn allow_sgx_debug_mode) config(allow_sgx_debug_mode): bool;
}
}
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
// the integritee-service wants to register his enclave
#[weight = (<T as Config>::WeightInfo::register_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn register_enclave(origin, ra_report: Vec<u8>, worker_url: Vec<u8>) -> DispatchResult {
log::info!("teerex: called into runtime call register_enclave()");
let sender = ensure_signed(origin)?;
ensure!(ra_report.len() <= MAX_RA_REPORT_LEN, <Error<T>>::RaReportTooLong);
ensure!(worker_url.len() <= MAX_URL_LEN, <Error<T>>::EnclaveUrlTooLong);
log::info!("teerex: parameter lenght ok");
#[cfg(not(feature = "skip-ias-check"))]
let enclave = Self::verify_report(&sender, ra_report)
.map(|report| Enclave::new(sender.clone(), report.mr_enclave, report.timestamp, worker_url.clone(), report.build_mode))?;
#[cfg(not(feature = "skip-ias-check"))]
if !<AllowSGXDebugMode>::get() && enclave.sgx_mode == SgxBuildMode::Debug {
log::error!("substraTEE_registry: debug mode is not allowed to attest!");
return Err(<Error<T>>::SgxModeNotAllowed.into());
}
#[cfg(feature = "skip-ias-check")]
log::warn!("[teerex]: Skipping remote attestation check. Only dev-chains are allowed to do this!");
#[cfg(feature = "skip-ias-check")]
let enclave = Enclave::new(
sender.clone(),
// insert mrenclave if the ra_report represents one, otherwise insert default
<[u8; 32]>::decode(&mut ra_report.as_slice()).unwrap_or_default(),
<timestamp::Pallet<T>>::get().saturated_into(),
worker_url.clone(),
SgxBuildMode::default()
);
Self::add_enclave(&sender, &enclave)?;
Self::deposit_event(RawEvent::AddedEnclave(sender, worker_url));
Ok(())
}
// TODO: we can't expect a dead enclave to unregister itself
// alternative: allow anyone to unregister an enclave that hasn't recently supplied a RA
// such a call should be feeless if successful
#[weight = (<T as Config>::WeightInfo::unregister_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn unregister_enclave(origin) -> DispatchResult {
let sender = ensure_signed(origin)?;
Self::remove_enclave(&sender)?;
Self::deposit_event(RawEvent::RemovedEnclave(sender));
Ok(())
}
#[weight = (<T as Config>::WeightInfo::call_worker(), DispatchClass::Normal, Pays::Yes)]
pub fn call_worker(origin, request: Request) -> DispatchResult {
let _sender = ensure_signed(origin)?;
log::info!("call_worker with {:?}", request);
Self::deposit_event(RawEvent::Forwarded(request.shard));
Ok(())
}
// the integritee-service calls this function for every processed call to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_call(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_call(origin, shard: ShardIdentifier, call_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(), <Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("call confirmed with shard {:?}, call hash {:?}, ipfs_hash {:?}", shard, call_hash, ipfs_hash);
Self::deposit_event(RawEvent::CallConfirmed(sender, call_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
// the integritee-service calls this function for every processed block to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_block(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_block(origin, shard: ShardIdentifier, block_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(),<Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("block confirmed with shard {:?}, block hash {:?}, ipfs_hash {:?}", shard, block_hash, ipfs_hash);
Self::deposit_event(RawEvent::BlockConfirmed(sender, block_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
/// Sent by a client who requests to get shielded funds managed by an enclave. For this on-chain balance is sent to the bonding_account of the enclave.
/// The bonding_account does not have a private key as the balance on this account is exclusively managed from withing the pallet_teerex.
/// Note: The bonding_account is bit-equivalent to the worker shard.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn shield_funds(origin, incognito_account_encrypted: Vec<u8>, amount: BalanceOf<T>, bonding_account: T::AccountId) -> DispatchResult {
let sender = ensure_signed(origin)?;
T::Currency::transfer(&sender, &bonding_account, amount, ExistenceRequirement::AllowDeath)?;
Self::deposit_event(RawEvent::ShieldFunds(incognito_account_encrypted));
Ok(())
}
/// Sent by enclaves only as a result of an `unshield` request from a client to an enclave.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn unshield_funds(origin, public_account: T::AccountId, amount: BalanceOf<T>, bonding_account: T::AccountId, call_hash: H256) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = <EnclaveIndex<T>>::get(sender);
|
random_line_split
|
|
mod.rs
|
pub trait ChangeSet {
fn as_ref(&self) -> Option<&ViewChanges>;
/// Provides mutable reference to changes. The implementation for a `RawAccessMut` type
/// should always return `Some(_)`.
fn as_mut(&mut self) -> Option<&mut ViewChanges>;
}
/// No-op implementation used in `Snapshot`.
impl ChangeSet for () {
fn as_ref(&self) -> Option<&ViewChanges> {
None
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesRef {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesMut<'_> {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
Some(&mut *self)
}
}
/// Allows to read data from the database.
///
/// This trait is rarely needs to be used directly; [`Access`] is a more high-level trait
/// encompassing access to database.
///
/// [`Access`]: trait.Access.html
pub trait RawAccess: Clone {
/// Type of the `changes()` that will be applied to the database.
type Changes: ChangeSet;
/// Reference to a `Snapshot`.
fn snapshot(&self) -> &dyn Snapshot;
/// Returns changes related to specific `address` compared to the `snapshot()`.
fn changes(&self, address: &IndexAddress) -> Self::Changes;
}
/// Allows to mutate data in indexes.
///
/// This is a marker trait that is used as a bound for mutable operations on indexes.
/// It can be used in the same way for high-level database objects:
///
/// # Example
///
/// ```
/// use exonum_merkledb::{access::{Access, RawAccessMut}, ListIndex, MapIndex};
///
/// pub struct Schema<T: Access> {
/// list: ListIndex<T::Base, String>,
/// map: MapIndex<T::Base, u64, u64>,
/// }
///
/// impl<T: Access> Schema<T>
/// where
/// T::Base: RawAccessMut,
/// {
/// pub fn mutate(&mut self) {
/// self.list.push("foo".to_owned());
/// self.map.put(&1, 2);
/// }
/// }
/// ```
pub trait RawAccessMut: RawAccess {}
impl<'a, T> RawAccessMut for T where T: RawAccess<Changes = ChangesMut<'a>> {}
/// Converts index access to a readonly presentation. The conversion operation is cheap.
pub trait AsReadonly: RawAccess {
/// Readonly version of the access.
type Readonly: RawAccess;
/// Performs the conversion.
fn as_readonly(&self) -> Self::Readonly;
}
/// Represents address of the index in the database.
///
/// # Examples
///
/// `IndexAddress` can be used implicitly, since `&str` and `(&str, &impl BinaryKey)` can both
/// be converted into an address.
///
/// ```
/// use exonum_merkledb::{access::AccessExt, IndexAddress, TemporaryDB, Database};
///
/// let db = TemporaryDB::new();
/// let fork = db.fork();
///
/// // Using a string address:
/// let map = fork.get_map::<_, String, u8>("map");
/// // Using an address within an index family:
/// let list = fork.get_list::<_, String>(("index", &3_u32));
/// // Using `IndexAddress` explicitly:
/// let addr = IndexAddress::with_root("data").append_bytes(&vec![1, 2, 3]);
/// let set = fork.get_value_set::<_, u64>(addr);
/// ```
#[derive(Debug, Clone, Eq, PartialEq, Hash, Default)]
pub struct IndexAddress {
pub(super) name: String,
pub(super) bytes: Option<Vec<u8>>,
}
impl IndexAddress {
/// Creates empty `IndexAddress`.
pub fn new() -> Self {
Self::default()
}
/// Creates new `IndexAddress` with specified `root` name.
pub fn with_root<S: Into<String>>(root: S) -> Self {
Self {
name: root.into(),
bytes: None,
}
}
/// Returns name part of `IndexAddress`.
pub fn name(&self) -> &str {
&self.name
}
/// Returns bytes part of `IndexAddress`.
pub fn bytes(&self) -> Option<&[u8]> {
self.bytes.as_ref().map(Vec::as_slice)
}
/// Returns tuple consists of `name` and `bytes` concatenated with provided `key`.
/// This is used to obtain single value(serialized as byte array) from the database.
pub(crate) fn keyed<'a>(&self, key: &'a [u8]) -> (&str, Cow<'a, [u8]>) {
(
&self.name,
match self.bytes {
None => Cow::Borrowed(key),
Some(ref bytes) => {
let bytes = concat_keys!(bytes, key);
bytes.into()
}
},
)
}
/// Prepends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let prefixed = addr.prepend_name("prefix");
/// assert_eq!(prefixed.name(), "prefix.foo");
/// ```
pub fn prepend_name<'a>(self, prefix: impl Into<Cow<'a, str>>) -> Self {
let prefix = prefix.into();
Self {
name: if self.name.is_empty() {
prefix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[prefix.as_ref(), ".", self.name()].concat()
},
bytes: self.bytes,
}
}
/// Appends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let suffixed = addr.append_name("suffix");
/// assert_eq!(suffixed.name(), "foo.suffix");
/// ```
pub fn append_name<'a>(self, suffix: impl Into<Cow<'a, str>>) -> Self {
let suffix = suffix.into();
Self {
name: if self.name.is_empty() {
suffix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[self.name(), ".", suffix.as_ref()].concat()
},
bytes: self.bytes,
}
}
/// Appends a bytes part to `IndexAddress`.
pub fn append_bytes<K: BinaryKey + ?Sized>(self, suffix: &K) -> Self {
let name = self.name;
let bytes = if let Some(ref bytes) = self.bytes {
concat_keys!(bytes, suffix)
} else {
concat_keys!(suffix)
};
Self {
name,
bytes: Some(bytes),
}
}
/// Full address with a separator between `name` and `bytes` represented as byte array.
pub fn fully_qualified_name(&self) -> Vec<u8> {
if let Some(bytes) = self.bytes() {
concat_keys!(self.name(), INDEX_NAME_SEPARATOR, bytes)
} else {
concat_keys!(self.name())
}
}
}
impl<'a> From<&'a str> for IndexAddress {
fn from(name: &'a str) -> Self {
Self::with_root(name)
}
}
impl From<String> for IndexAddress {
fn from(name: String) -> Self {
Self::with_root(name)
}
}
// TODO should we have this impl in public interface? ECR-2834
impl<'a, K: BinaryKey + ?Sized> From<(&'a str, &'a K)> for IndexAddress {
fn from((name, key): (&'a str, &'a K)) -> Self {
Self {
name: name.to_owned(),
bytes: Some(key_bytes(key)),
}
}
}
macro_rules! impl_snapshot_access {
($typ:ty) => {
impl RawAccess for $typ {
type Changes = ();
fn snapshot(&self) -> &dyn Snapshot {
self.as_ref()
}
fn changes(&self, _address: &IndexAddress) -> Self::Changes {}
}
impl AsReadonly for $typ {
type Readonly = Self;
fn as_readonly(&self) -> Self::Readonly {
self.clone()
}
}
};
}
impl_snapshot_access!(&'_ dyn Snapshot);
impl_snapshot_access!(&'_ Box<dyn Snapshot>);
impl_snapshot_access!(std::rc::Rc<dyn Snapshot>);
impl_snapshot_access!(std::sync::Arc<dyn Snapshot>);
fn key_bytes<K: BinaryKey + ?Sized>(key: &K) -> Vec<u8> {
concat_keys!(key)
}
impl<T: RawAccess> View<T> {
/// Creates a new view for an index with the specified
|
random_line_split
|
||
mod.rs
|
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesRef {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesMut<'_> {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
Some(&mut *self)
}
}
/// Allows to read data from the database.
///
/// This trait is rarely needs to be used directly; [`Access`] is a more high-level trait
/// encompassing access to database.
///
/// [`Access`]: trait.Access.html
pub trait RawAccess: Clone {
/// Type of the `changes()` that will be applied to the database.
type Changes: ChangeSet;
/// Reference to a `Snapshot`.
fn snapshot(&self) -> &dyn Snapshot;
/// Returns changes related to specific `address` compared to the `snapshot()`.
fn changes(&self, address: &IndexAddress) -> Self::Changes;
}
/// Allows to mutate data in indexes.
///
/// This is a marker trait that is used as a bound for mutable operations on indexes.
/// It can be used in the same way for high-level database objects:
///
/// # Example
///
/// ```
/// use exonum_merkledb::{access::{Access, RawAccessMut}, ListIndex, MapIndex};
///
/// pub struct Schema<T: Access> {
/// list: ListIndex<T::Base, String>,
/// map: MapIndex<T::Base, u64, u64>,
/// }
///
/// impl<T: Access> Schema<T>
/// where
/// T::Base: RawAccessMut,
/// {
/// pub fn mutate(&mut self) {
/// self.list.push("foo".to_owned());
/// self.map.put(&1, 2);
/// }
/// }
/// ```
pub trait RawAccessMut: RawAccess {}
impl<'a, T> RawAccessMut for T where T: RawAccess<Changes = ChangesMut<'a>> {}
/// Converts index access to a readonly presentation. The conversion operation is cheap.
pub trait AsReadonly: RawAccess {
/// Readonly version of the access.
type Readonly: RawAccess;
/// Performs the conversion.
fn as_readonly(&self) -> Self::Readonly;
}
/// Represents address of the index in the database.
///
/// # Examples
///
/// `IndexAddress` can be used implicitly, since `&str` and `(&str, &impl BinaryKey)` can both
/// be converted into an address.
///
/// ```
/// use exonum_merkledb::{access::AccessExt, IndexAddress, TemporaryDB, Database};
///
/// let db = TemporaryDB::new();
/// let fork = db.fork();
///
/// // Using a string address:
/// let map = fork.get_map::<_, String, u8>("map");
/// // Using an address within an index family:
/// let list = fork.get_list::<_, String>(("index", &3_u32));
/// // Using `IndexAddress` explicitly:
/// let addr = IndexAddress::with_root("data").append_bytes(&vec![1, 2, 3]);
/// let set = fork.get_value_set::<_, u64>(addr);
/// ```
#[derive(Debug, Clone, Eq, PartialEq, Hash, Default)]
pub struct IndexAddress {
pub(super) name: String,
pub(super) bytes: Option<Vec<u8>>,
}
impl IndexAddress {
/// Creates empty `IndexAddress`.
pub fn new() -> Self {
Self::default()
}
/// Creates new `IndexAddress` with specified `root` name.
pub fn with_root<S: Into<String>>(root: S) -> Self {
Self {
name: root.into(),
bytes: None,
}
}
/// Returns name part of `IndexAddress`.
pub fn name(&self) -> &str {
&self.name
}
/// Returns bytes part of `IndexAddress`.
pub fn bytes(&self) -> Option<&[u8]> {
self.bytes.as_ref().map(Vec::as_slice)
}
/// Returns tuple consists of `name` and `bytes` concatenated with provided `key`.
/// This is used to obtain single value(serialized as byte array) from the database.
pub(crate) fn keyed<'a>(&self, key: &'a [u8]) -> (&str, Cow<'a, [u8]>) {
(
&self.name,
match self.bytes {
None => Cow::Borrowed(key),
Some(ref bytes) => {
let bytes = concat_keys!(bytes, key);
bytes.into()
}
},
)
}
/// Prepends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let prefixed = addr.prepend_name("prefix");
/// assert_eq!(prefixed.name(), "prefix.foo");
/// ```
pub fn prepend_name<'a>(self, prefix: impl Into<Cow<'a, str>>) -> Self {
let prefix = prefix.into();
Self {
name: if self.name.is_empty() {
prefix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[prefix.as_ref(), ".", self.name()].concat()
},
bytes: self.bytes,
}
}
/// Appends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let suffixed = addr.append_name("suffix");
/// assert_eq!(suffixed.name(), "foo.suffix");
/// ```
pub fn append_name<'a>(self, suffix: impl Into<Cow<'a, str>>) -> Self {
let suffix = suffix.into();
Self {
name: if self.name.is_empty() {
suffix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[self.name(), ".", suffix.as_ref()].concat()
},
bytes: self.bytes,
}
}
/// Appends a bytes part to `IndexAddress`.
pub fn append_bytes<K: BinaryKey + ?Sized>(self, suffix: &K) -> Self {
let name = self.name;
let bytes = if let Some(ref bytes) = self.bytes {
concat_keys!(bytes, suffix)
} else {
concat_keys!(suffix)
};
Self {
name,
bytes: Some(bytes),
}
}
/// Full address with a separator between `name` and `bytes` represented as byte array.
pub fn fully_qualified_name(&self) -> Vec<u8> {
if let Some(bytes) = self.bytes() {
concat_keys!(self.name(), INDEX_NAME_SEPARATOR, bytes)
} else {
concat_keys!(self.name())
}
}
}
impl<'a> From<&'a str> for IndexAddress {
fn from(name: &'a str) -> Self {
Self::with_root(name)
}
}
impl From<String> for IndexAddress {
fn from(name: String) -> Self {
Self::with_root(name)
}
}
// TODO should we have this impl in public interface? ECR-2834
impl<'a, K: BinaryKey + ?Sized> From<(&'a str, &'a K)> for IndexAddress {
fn from((name, key): (&'a str, &'a K)) -> Self {
Self {
name: name.to_owned(),
bytes: Some(key_bytes(key)),
}
}
}
macro_rules! impl_snapshot_access {
($typ:ty) => {
impl RawAccess for $typ {
type Changes = ();
fn snapshot(&self) -> &dyn Snapshot {
self.as_ref()
}
fn changes(&self, _address: &IndexAddress) -> Self::Changes {}
}
impl AsReadonly for $typ {
type Readonly = Self;
fn as_readonly(&self) -> Self::Readonly {
self.clone()
}
}
};
}
impl_snapshot_access!(&'_ dyn Snapshot);
impl_snapshot_access!(&'_ Box<dyn Snapshot>);
impl_snapshot_access!(std::rc::Rc<dyn Snapshot>);
impl_snapshot_access!(std::sync::Arc<dyn Snapshot>);
fn key_bytes<K: BinaryKey + ?Sized>(key: &K) -> Vec<u8> {
concat_keys!(key)
}
impl<T: RawAccess> View<T> {
/// Creates a new view for an index with the specified address.
#[doc(hidden)]
// ^-- This method is used in the testkit to revert blocks. It should not be used
// in the user-facing code; use more high-level abstractions instead (e.g., indexes or
// `AccessExt` methods).
pub fn new<I: Into<IndexAddress>>(index_access: T, address: I) -> Self {
let address = address.into();
|
{
None
}
|
identifier_body
|
|
mod.rs
|
Key + ?Sized> From<(&'a str, &'a K)> for IndexAddress {
fn from((name, key): (&'a str, &'a K)) -> Self {
Self {
name: name.to_owned(),
bytes: Some(key_bytes(key)),
}
}
}
macro_rules! impl_snapshot_access {
($typ:ty) => {
impl RawAccess for $typ {
type Changes = ();
fn snapshot(&self) -> &dyn Snapshot {
self.as_ref()
}
fn changes(&self, _address: &IndexAddress) -> Self::Changes {}
}
impl AsReadonly for $typ {
type Readonly = Self;
fn as_readonly(&self) -> Self::Readonly {
self.clone()
}
}
};
}
impl_snapshot_access!(&'_ dyn Snapshot);
impl_snapshot_access!(&'_ Box<dyn Snapshot>);
impl_snapshot_access!(std::rc::Rc<dyn Snapshot>);
impl_snapshot_access!(std::sync::Arc<dyn Snapshot>);
fn key_bytes<K: BinaryKey + ?Sized>(key: &K) -> Vec<u8> {
concat_keys!(key)
}
impl<T: RawAccess> View<T> {
/// Creates a new view for an index with the specified address.
#[doc(hidden)]
// ^-- This method is used in the testkit to revert blocks. It should not be used
// in the user-facing code; use more high-level abstractions instead (e.g., indexes or
// `AccessExt` methods).
pub fn new<I: Into<IndexAddress>>(index_access: T, address: I) -> Self {
let address = address.into();
let changes = index_access.changes(&address);
Self {
index_access,
changes,
address,
}
}
fn snapshot(&self) -> &dyn Snapshot {
self.index_access.snapshot()
}
fn get_bytes(&self, key: &[u8]) -> Option<Vec<u8>> {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(ref v) => return Some(v.clone()),
Change::Delete => return None,
}
}
if changes.is_empty() {
return None;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().get(name, &key)
}
fn contains_raw_key(&self, key: &[u8]) -> bool {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(..) => return true,
Change::Delete => return false,
}
}
if changes.is_empty() {
return false;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().contains(name, &key)
}
fn iter_bytes(&self, from: &[u8]) -> BytesIter<'_> {
use std::collections::Bound::*;
let (name, key) = self.address.keyed(from);
let prefix = self.address.bytes.clone().unwrap_or_else(|| vec![]);
let changes_iter = self
.changes
.as_ref()
.map(|changes| changes.data.range::<[u8], _>((Included(from), Unbounded)));
let is_empty = self.changes.as_ref().map_or(false, ViewChanges::is_empty);
if is_empty {
// Ignore all changes from the snapshot
Box::new(ChangesIter::new(changes_iter.unwrap()))
} else {
Box::new(ForkIter::new(
Box::new(SnapshotIter::new(self.snapshot(), name, prefix, &key)),
changes_iter,
))
}
}
/// Returns a value of *any* type corresponding to the key of *any* type.
pub fn get<K, V>(&self, key: &K) -> Option<V>
where
K: BinaryKey + ?Sized,
V: BinaryValue,
{
self.get_bytes(&key_bytes(key)).map(|v| {
BinaryValue::from_bytes(Cow::Owned(v)).expect("Error while deserializing value")
})
}
/// Returns `true` if the index contains a value of *any* type for the specified key of
/// *any* type.
pub fn contains<K>(&self, key: &K) -> bool
where
K: BinaryKey + ?Sized,
{
self.contains_raw_key(&key_bytes(key))
}
/// Returns an iterator over the entries of the index in ascending order. The iterator element
/// type is *any* key-value pair. An argument `subprefix` allows specifying a subset of keys
/// for iteration.
pub fn iter<P, K, V>(&self, subprefix: &P) -> Iter<'_, K, V>
where
P: BinaryKey + ?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
Iter {
base_iter: self.iter_bytes(&iter_prefix),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Returns an iterator over the entries of the index in ascending order starting from the
/// specified key. The iterator element type is *any* key-value pair. An argument `subprefix`
/// allows specifying a subset of iteration.
pub fn iter_from<P, F, K, V>(&self, subprefix: &P, from: &F) -> Iter<'_, K, V>
where
P: BinaryKey,
F: BinaryKey + ?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
let iter_from = key_bytes(from);
Iter {
base_iter: self.iter_bytes(&iter_from),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Crutch to be able to create metadata for indexes not present in the storage.
///
/// # Return value
///
/// Returns whether the changes were saved.
pub(crate) fn put_or_forget<K, V>(&mut self, key: &K, value: V) -> bool
where
K: BinaryKey + ?Sized,
V: BinaryValue,
{
if let Some(changes) = self.changes.as_mut() {
changes
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
true
} else {
false
}
}
}
impl<T: RawAccessMut> View<T> {
/// Inserts a key-value pair into the fork.
pub fn put<K, V>(&mut self, key: &K, value: V)
where
K: BinaryKey + ?Sized,
V: BinaryValue,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
}
/// Removes a key from the view.
pub fn remove<K>(&mut self, key: &K)
where
K: BinaryKey + ?Sized,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Delete);
}
/// Clears the view removing all its elements.
pub fn clear(&mut self) {
self.changes.as_mut().unwrap().clear();
}
}
/// Iterator over entries in a snapshot limited to a specific view.
struct SnapshotIter<'a> {
inner: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
}
impl<'a> fmt::Debug for SnapshotIter<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SnapshotIter")
.field("prefix", &self.prefix)
.field("ended", &self.ended)
.finish()
}
}
impl<'a> SnapshotIter<'a> {
fn new(snapshot: &'a dyn Snapshot, name: &str, prefix: Vec<u8>, from: &[u8]) -> Self {
debug_assert!(from.starts_with(&prefix));
SnapshotIter {
inner: snapshot.iter(name, from),
prefix,
ended: false,
}
}
}
impl BytesIterator for SnapshotIter<'_> {
fn next(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let next = self.inner.next();
match next {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let peeked = self.inner.peek();
match peeked {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ =>
|
{
self.ended = true;
None
}
|
conditional_block
|
|
mod.rs
|
concat_keys!(key)
}
impl<T: RawAccess> View<T> {
/// Creates a new view for an index with the specified address.
#[doc(hidden)]
// ^-- This method is used in the testkit to revert blocks. It should not be used
// in the user-facing code; use more high-level abstractions instead (e.g., indexes or
// `AccessExt` methods).
pub fn new<I: Into<IndexAddress>>(index_access: T, address: I) -> Self {
let address = address.into();
let changes = index_access.changes(&address);
Self {
index_access,
changes,
address,
}
}
fn snapshot(&self) -> &dyn Snapshot {
self.index_access.snapshot()
}
fn get_bytes(&self, key: &[u8]) -> Option<Vec<u8>> {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(ref v) => return Some(v.clone()),
Change::Delete => return None,
}
}
if changes.is_empty() {
return None;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().get(name, &key)
}
fn contains_raw_key(&self, key: &[u8]) -> bool {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(..) => return true,
Change::Delete => return false,
}
}
if changes.is_empty() {
return false;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().contains(name, &key)
}
fn iter_bytes(&self, from: &[u8]) -> BytesIter<'_> {
use std::collections::Bound::*;
let (name, key) = self.address.keyed(from);
let prefix = self.address.bytes.clone().unwrap_or_else(|| vec![]);
let changes_iter = self
.changes
.as_ref()
.map(|changes| changes.data.range::<[u8], _>((Included(from), Unbounded)));
let is_empty = self.changes.as_ref().map_or(false, ViewChanges::is_empty);
if is_empty {
// Ignore all changes from the snapshot
Box::new(ChangesIter::new(changes_iter.unwrap()))
} else {
Box::new(ForkIter::new(
Box::new(SnapshotIter::new(self.snapshot(), name, prefix, &key)),
changes_iter,
))
}
}
/// Returns a value of *any* type corresponding to the key of *any* type.
pub fn get<K, V>(&self, key: &K) -> Option<V>
where
K: BinaryKey + ?Sized,
V: BinaryValue,
{
self.get_bytes(&key_bytes(key)).map(|v| {
BinaryValue::from_bytes(Cow::Owned(v)).expect("Error while deserializing value")
})
}
/// Returns `true` if the index contains a value of *any* type for the specified key of
/// *any* type.
pub fn contains<K>(&self, key: &K) -> bool
where
K: BinaryKey + ?Sized,
{
self.contains_raw_key(&key_bytes(key))
}
/// Returns an iterator over the entries of the index in ascending order. The iterator element
/// type is *any* key-value pair. An argument `subprefix` allows specifying a subset of keys
/// for iteration.
pub fn iter<P, K, V>(&self, subprefix: &P) -> Iter<'_, K, V>
where
P: BinaryKey + ?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
Iter {
base_iter: self.iter_bytes(&iter_prefix),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Returns an iterator over the entries of the index in ascending order starting from the
/// specified key. The iterator element type is *any* key-value pair. An argument `subprefix`
/// allows specifying a subset of iteration.
pub fn iter_from<P, F, K, V>(&self, subprefix: &P, from: &F) -> Iter<'_, K, V>
where
P: BinaryKey,
F: BinaryKey + ?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
let iter_from = key_bytes(from);
Iter {
base_iter: self.iter_bytes(&iter_from),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Crutch to be able to create metadata for indexes not present in the storage.
///
/// # Return value
///
/// Returns whether the changes were saved.
pub(crate) fn put_or_forget<K, V>(&mut self, key: &K, value: V) -> bool
where
K: BinaryKey + ?Sized,
V: BinaryValue,
{
if let Some(changes) = self.changes.as_mut() {
changes
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
true
} else {
false
}
}
}
impl<T: RawAccessMut> View<T> {
/// Inserts a key-value pair into the fork.
pub fn put<K, V>(&mut self, key: &K, value: V)
where
K: BinaryKey + ?Sized,
V: BinaryValue,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
}
/// Removes a key from the view.
pub fn remove<K>(&mut self, key: &K)
where
K: BinaryKey + ?Sized,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Delete);
}
/// Clears the view removing all its elements.
pub fn clear(&mut self) {
self.changes.as_mut().unwrap().clear();
}
}
/// Iterator over entries in a snapshot limited to a specific view.
struct SnapshotIter<'a> {
inner: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
}
impl<'a> fmt::Debug for SnapshotIter<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SnapshotIter")
.field("prefix", &self.prefix)
.field("ended", &self.ended)
.finish()
}
}
impl<'a> SnapshotIter<'a> {
fn new(snapshot: &'a dyn Snapshot, name: &str, prefix: Vec<u8>, from: &[u8]) -> Self {
debug_assert!(from.starts_with(&prefix));
SnapshotIter {
inner: snapshot.iter(name, from),
prefix,
ended: false,
}
}
}
impl BytesIterator for SnapshotIter<'_> {
fn next(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let next = self.inner.next();
match next {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let peeked = self.inner.peek();
match peeked {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
}
struct ChangesIter<'a, T: Iterator + 'a> {
inner: Peekable<T>,
_lifetime: PhantomData<&'a ()>,
}
/// Iterator over a set of changes.
impl<'a, T> ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn new(iterator: T) -> Self {
ChangesIter {
inner: iterator.peekable(),
_lifetime: PhantomData,
}
}
}
impl<'a, T> BytesIterator for ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn next(&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.next() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
fn
|
peek
|
identifier_name
|
|
fitters.py
|
result of a fit.
:param fit_result: The output from fit
:param axes: The Matplotlib axes to add the fit to
:param x: The values of X at which to visualize the model
:returns: A list of matplotlib artists. **This is important:**
plots will not be properly cleared if this isn't provided
"""
y = self.predict(fit_result, x)
if normalize is not None:
y = normalize(y)
result = axes.plot(x, y, color,
lw=linewidth, alpha=alpha,
scalex=False, scaley=False)
return result
def _sigma_to_weights(self, dy):
if dy is not None:
return 1. / np.asarray(dy) ** 2
@property
def options(self):
"""
A dictionary of the current setting of each model hyperparameter.
Hyperparameters are defined in subclasses by creating class-level
:mod:`Option <glue.core.simpleforms>` attributes. This attribute
dict maps ``{hyperparameter_name: current_value}``
"""
result = []
for typ in type(self).mro():
result.extend(k for k, v in typ.__dict__.items()
if isinstance(v, Option))
return dict((o, getattr(self, o)) for o in result)
def summarize(self, fit_result, x, y, dy=None):
"""
Return a textual summary of the fit.
:param fit_result: The return value from :meth:`fit`
:param x: The x values passed to :meth:`fit`
:returns: A description of the fit result
:rtype: str
"""
return str(fit_result)
@property
def constraints(self):
"""
A dict of the constraints on each parameter in :attr:`param_names`.
Each value is itself a dict with 3 items:
:key value: The default value
:key fixed: True / False, indicating whether the parameter is fixed
:key bounds: [min, max] or None, indicating lower/upper limits
"""
result = {}
for p in self.param_names:
result[p] = dict(value=None, fixed=False, limits=None)
result[p].update(self._constraints.get(p, {}))
return result
def set_constraint(self, parameter_name, value=None,
fixed=None, limits=None):
"""
Update a constraint.
:param parameter_name: name of the parameter to update
:type parameter_name: str
:param value: Set the default value (optional)
:param limits: Set the limits to[min, max] (optional)
:param fixed: Set whether the parameter is fixed (optional)
"""
c = self._constraints.setdefault(parameter_name, {})
if value is not None:
c['value'] = value
if fixed is not None:
c['fixed'] = fixed
if limits is not None:
c['limits'] = limits
def build_and_fit(self, x, y, dy=None):
"""
Method which builds the arguments to fit, and calls that method
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if dy is not None:
dy = np.asarray(dy).ravel()
return self.fit(x, y, dy=dy,
constraints=self.constraints,
**self.options)
def fit(self, x, y, dy, constraints, **options):
"""
Fit the model to data.
*This must be overriden by a subclass.*
:param x: The x values of the data
:type x: :class:`numpy.ndarray`
:param y: The y values of the data
:type y: :class:`numpy.ndarray`
:param dy: 1 sigma uncertainties on each datum (optional)
:type dy: :class:`numpy.ndarray`
:param constraints: The current value of the ``constraints`` property
:param options: kwargs for model hyperparameters.
:returns: An object representing the fit result.
"""
raise NotImplementedError()
def predict(self, fit_result, x):
"""
Evaluate the model at a set of locations.
**This must be overridden in a subclass.**
:param fit_result: The result from the fit method
:param x: Locations to evaluate model at
:type x: :class:`numpy.ndarray`
:returns: model(x)
:rtype: :class:`numpy.ndarray`
"""
raise NotImplementedError()
class AstropyFitter1D(BaseFitter1D):
"""
A base class for wrapping :mod:`astropy.modeling`.
Subclasses must override :attr:`model_cls` :attr:`fitting_cls`
to point to the desired Astropy :mod:`model <astropy.modeling>`
and :mod:`fitter <astropy.modeling.fitting>` classes.
In addition, they should override :attr:`label` with a better label,
and :meth:`parameter_guesses` to generate initial guesses
"""
model_cls = None
"""class describing the model"""
fitting_cls = None
"""class to fit the model"""
label = "Base Astropy Fitter"
"""UI Label"""
@property
def param_names(self):
return self.model_cls.param_names
def predict(self, fit_result, x):
model, _ = fit_result
return model(x)
def summarize(self, fit_result, x, y, dy=None):
model, fitter = fit_result
result = [_report_fitter(fitter), ""]
pnames = list(sorted(model.param_names))
maxlen = max(map(len, pnames))
result.extend("%s = %e" % (p.ljust(maxlen), getattr(model, p).value)
for p in pnames)
return "\n".join(result)
def fit(self, x, y, dy, constraints):
m, f = self._get_model_fitter(x, y, dy, constraints)
dy = self._sigma_to_weights(dy)
return f(m, x, y, weights=dy), f
def _get_model_fitter(self, x, y, dy, constraints):
if self.model_cls is None or self.fitting_cls is None:
raise NotImplementedError("Model or fitting class is unspecified.")
params = dict((k, v['value']) for k, v in constraints.items())
# update unset parameters with guesses from data
for k, v in self.parameter_guesses(x, y, dy).items():
if params[k] is not None or constraints[k]['fixed']:
continue
params[k] = v
m = self.model_cls(**params)
f = self.fitting_cls()
for param_name, constraint in constraints.items():
param = getattr(m, param_name)
if constraint['fixed']:
param.fixed = True
if constraint['limits']:
param.min, param.max = constraint['limits']
return m, f
def parameter_guesses(self, x, y, dy):
"""
Provide initial guesses for each model parameter.
**The base implementation does nothing, and should be overridden**
:param x: X - values of the data
:type x: :class:`numpy.ndarray`
:param y: Y - values of the data
:type y: :class:`numpy.ndarray`
:param dy: uncertainties on Y(assumed to be 1 sigma)
:type dy: :class:`numpy.ndarray`
:returns: A dict mapping ``{parameter_name: value guess}`` for each
parameter
"""
return {}
def _gaussian_parameter_estimates(x, y, dy):
amplitude = np.percentile(y, 95)
y = np.maximum(y / y.sum(), 0)
mean = (x * y).sum()
stddev = np.sqrt((y * (x - mean) ** 2).sum())
return dict(mean=mean, stddev=stddev, amplitude=amplitude)
class BasicGaussianFitter(BaseFitter1D):
"""
Fallback Gaussian fitter, for astropy < 0.3.
If :mod:`astropy.modeling` is installed, this class is replaced by
:class:`SimpleAstropyGaussianFitter`
"""
label = "Gaussian"
def _errorfunc(self, params, x, y, dy):
yp = self.eval(x, *params)
result = (yp - y)
if dy is not None:
result /= dy
return result
|
return np.exp(-(x - mean) ** 2 / (2 * stddev ** 2)) * amplitude
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
def fit(self, x, y, dy, constraints):
from scipy import optimize
init_values = _gaussian_parameter_estimates(x, y, dy)
init_values =
|
@staticmethod
def eval(x, amplitude, mean, stddev):
|
random_line_split
|
fitters.py
|
of a fit.
:param fit_result: The output from fit
:param axes: The Matplotlib axes to add the fit to
:param x: The values of X at which to visualize the model
:returns: A list of matplotlib artists. **This is important:**
plots will not be properly cleared if this isn't provided
"""
y = self.predict(fit_result, x)
if normalize is not None:
y = normalize(y)
result = axes.plot(x, y, color,
lw=linewidth, alpha=alpha,
scalex=False, scaley=False)
return result
def _sigma_to_weights(self, dy):
if dy is not None:
return 1. / np.asarray(dy) ** 2
@property
def options(self):
"""
A dictionary of the current setting of each model hyperparameter.
Hyperparameters are defined in subclasses by creating class-level
:mod:`Option <glue.core.simpleforms>` attributes. This attribute
dict maps ``{hyperparameter_name: current_value}``
"""
result = []
for typ in type(self).mro():
result.extend(k for k, v in typ.__dict__.items()
if isinstance(v, Option))
return dict((o, getattr(self, o)) for o in result)
def summarize(self, fit_result, x, y, dy=None):
"""
Return a textual summary of the fit.
:param fit_result: The return value from :meth:`fit`
:param x: The x values passed to :meth:`fit`
:returns: A description of the fit result
:rtype: str
"""
return str(fit_result)
@property
def constraints(self):
"""
A dict of the constraints on each parameter in :attr:`param_names`.
Each value is itself a dict with 3 items:
:key value: The default value
:key fixed: True / False, indicating whether the parameter is fixed
:key bounds: [min, max] or None, indicating lower/upper limits
"""
result = {}
for p in self.param_names:
result[p] = dict(value=None, fixed=False, limits=None)
result[p].update(self._constraints.get(p, {}))
return result
def set_constraint(self, parameter_name, value=None,
fixed=None, limits=None):
"""
Update a constraint.
:param parameter_name: name of the parameter to update
:type parameter_name: str
:param value: Set the default value (optional)
:param limits: Set the limits to[min, max] (optional)
:param fixed: Set whether the parameter is fixed (optional)
"""
c = self._constraints.setdefault(parameter_name, {})
if value is not None:
c['value'] = value
if fixed is not None:
c['fixed'] = fixed
if limits is not None:
c['limits'] = limits
def build_and_fit(self, x, y, dy=None):
"""
Method which builds the arguments to fit, and calls that method
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if dy is not None:
dy = np.asarray(dy).ravel()
return self.fit(x, y, dy=dy,
constraints=self.constraints,
**self.options)
def fit(self, x, y, dy, constraints, **options):
"""
Fit the model to data.
*This must be overriden by a subclass.*
:param x: The x values of the data
:type x: :class:`numpy.ndarray`
:param y: The y values of the data
:type y: :class:`numpy.ndarray`
:param dy: 1 sigma uncertainties on each datum (optional)
:type dy: :class:`numpy.ndarray`
:param constraints: The current value of the ``constraints`` property
:param options: kwargs for model hyperparameters.
:returns: An object representing the fit result.
"""
raise NotImplementedError()
def predict(self, fit_result, x):
"""
Evaluate the model at a set of locations.
**This must be overridden in a subclass.**
:param fit_result: The result from the fit method
:param x: Locations to evaluate model at
:type x: :class:`numpy.ndarray`
:returns: model(x)
:rtype: :class:`numpy.ndarray`
"""
raise NotImplementedError()
class AstropyFitter1D(BaseFitter1D):
"""
A base class for wrapping :mod:`astropy.modeling`.
Subclasses must override :attr:`model_cls` :attr:`fitting_cls`
to point to the desired Astropy :mod:`model <astropy.modeling>`
and :mod:`fitter <astropy.modeling.fitting>` classes.
In addition, they should override :attr:`label` with a better label,
and :meth:`parameter_guesses` to generate initial guesses
"""
model_cls = None
"""class describing the model"""
fitting_cls = None
"""class to fit the model"""
label = "Base Astropy Fitter"
"""UI Label"""
@property
def param_names(self):
return self.model_cls.param_names
def predict(self, fit_result, x):
model, _ = fit_result
return model(x)
def summarize(self, fit_result, x, y, dy=None):
model, fitter = fit_result
result = [_report_fitter(fitter), ""]
pnames = list(sorted(model.param_names))
maxlen = max(map(len, pnames))
result.extend("%s = %e" % (p.ljust(maxlen), getattr(model, p).value)
for p in pnames)
return "\n".join(result)
def fit(self, x, y, dy, constraints):
m, f = self._get_model_fitter(x, y, dy, constraints)
dy = self._sigma_to_weights(dy)
return f(m, x, y, weights=dy), f
def _get_model_fitter(self, x, y, dy, constraints):
if self.model_cls is None or self.fitting_cls is None:
raise NotImplementedError("Model or fitting class is unspecified.")
params = dict((k, v['value']) for k, v in constraints.items())
# update unset parameters with guesses from data
for k, v in self.parameter_guesses(x, y, dy).items():
if params[k] is not None or constraints[k]['fixed']:
continue
params[k] = v
m = self.model_cls(**params)
f = self.fitting_cls()
for param_name, constraint in constraints.items():
param = getattr(m, param_name)
if constraint['fixed']:
param.fixed = True
if constraint['limits']:
param.min, param.max = constraint['limits']
return m, f
def
|
(self, x, y, dy):
"""
Provide initial guesses for each model parameter.
**The base implementation does nothing, and should be overridden**
:param x: X - values of the data
:type x: :class:`numpy.ndarray`
:param y: Y - values of the data
:type y: :class:`numpy.ndarray`
:param dy: uncertainties on Y(assumed to be 1 sigma)
:type dy: :class:`numpy.ndarray`
:returns: A dict mapping ``{parameter_name: value guess}`` for each
parameter
"""
return {}
def _gaussian_parameter_estimates(x, y, dy):
amplitude = np.percentile(y, 95)
y = np.maximum(y / y.sum(), 0)
mean = (x * y).sum()
stddev = np.sqrt((y * (x - mean) ** 2).sum())
return dict(mean=mean, stddev=stddev, amplitude=amplitude)
class BasicGaussianFitter(BaseFitter1D):
"""
Fallback Gaussian fitter, for astropy < 0.3.
If :mod:`astropy.modeling` is installed, this class is replaced by
:class:`SimpleAstropyGaussianFitter`
"""
label = "Gaussian"
def _errorfunc(self, params, x, y, dy):
yp = self.eval(x, *params)
result = (yp - y)
if dy is not None:
result /= dy
return result
@staticmethod
def eval(x, amplitude, mean, stddev):
return np.exp(-(x - mean) ** 2 / (2 * stddev ** 2)) * amplitude
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
def fit(self, x, y, dy, constraints):
from scipy import optimize
init_values = _gaussian_parameter_estimates(x, y, dy)
init_values =
|
parameter_guesses
|
identifier_name
|
fitters.py
|
def plot(self, fit_result, axes, x, linewidth=None, alpha=None, color=None, normalize=None):
"""
Plot the result of a fit.
:param fit_result: The output from fit
:param axes: The Matplotlib axes to add the fit to
:param x: The values of X at which to visualize the model
:returns: A list of matplotlib artists. **This is important:**
plots will not be properly cleared if this isn't provided
"""
y = self.predict(fit_result, x)
if normalize is not None:
y = normalize(y)
result = axes.plot(x, y, color,
lw=linewidth, alpha=alpha,
scalex=False, scaley=False)
return result
def _sigma_to_weights(self, dy):
if dy is not None:
return 1. / np.asarray(dy) ** 2
@property
def options(self):
"""
A dictionary of the current setting of each model hyperparameter.
Hyperparameters are defined in subclasses by creating class-level
:mod:`Option <glue.core.simpleforms>` attributes. This attribute
dict maps ``{hyperparameter_name: current_value}``
"""
result = []
for typ in type(self).mro():
result.extend(k for k, v in typ.__dict__.items()
if isinstance(v, Option))
return dict((o, getattr(self, o)) for o in result)
def summarize(self, fit_result, x, y, dy=None):
"""
Return a textual summary of the fit.
:param fit_result: The return value from :meth:`fit`
:param x: The x values passed to :meth:`fit`
:returns: A description of the fit result
:rtype: str
"""
return str(fit_result)
@property
def constraints(self):
"""
A dict of the constraints on each parameter in :attr:`param_names`.
Each value is itself a dict with 3 items:
:key value: The default value
:key fixed: True / False, indicating whether the parameter is fixed
:key bounds: [min, max] or None, indicating lower/upper limits
"""
result = {}
for p in self.param_names:
result[p] = dict(value=None, fixed=False, limits=None)
result[p].update(self._constraints.get(p, {}))
return result
def set_constraint(self, parameter_name, value=None,
fixed=None, limits=None):
"""
Update a constraint.
:param parameter_name: name of the parameter to update
:type parameter_name: str
:param value: Set the default value (optional)
:param limits: Set the limits to[min, max] (optional)
:param fixed: Set whether the parameter is fixed (optional)
"""
c = self._constraints.setdefault(parameter_name, {})
if value is not None:
c['value'] = value
if fixed is not None:
c['fixed'] = fixed
if limits is not None:
c['limits'] = limits
def build_and_fit(self, x, y, dy=None):
"""
Method which builds the arguments to fit, and calls that method
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if dy is not None:
dy = np.asarray(dy).ravel()
return self.fit(x, y, dy=dy,
constraints=self.constraints,
**self.options)
def fit(self, x, y, dy, constraints, **options):
"""
Fit the model to data.
*This must be overriden by a subclass.*
:param x: The x values of the data
:type x: :class:`numpy.ndarray`
:param y: The y values of the data
:type y: :class:`numpy.ndarray`
:param dy: 1 sigma uncertainties on each datum (optional)
:type dy: :class:`numpy.ndarray`
:param constraints: The current value of the ``constraints`` property
:param options: kwargs for model hyperparameters.
:returns: An object representing the fit result.
"""
raise NotImplementedError()
def predict(self, fit_result, x):
"""
Evaluate the model at a set of locations.
**This must be overridden in a subclass.**
:param fit_result: The result from the fit method
:param x: Locations to evaluate model at
:type x: :class:`numpy.ndarray`
:returns: model(x)
:rtype: :class:`numpy.ndarray`
"""
raise NotImplementedError()
class AstropyFitter1D(BaseFitter1D):
"""
A base class for wrapping :mod:`astropy.modeling`.
Subclasses must override :attr:`model_cls` :attr:`fitting_cls`
to point to the desired Astropy :mod:`model <astropy.modeling>`
and :mod:`fitter <astropy.modeling.fitting>` classes.
In addition, they should override :attr:`label` with a better label,
and :meth:`parameter_guesses` to generate initial guesses
"""
model_cls = None
"""class describing the model"""
fitting_cls = None
"""class to fit the model"""
label = "Base Astropy Fitter"
"""UI Label"""
@property
def param_names(self):
return self.model_cls.param_names
def predict(self, fit_result, x):
model, _ = fit_result
return model(x)
def summarize(self, fit_result, x, y, dy=None):
model, fitter = fit_result
result = [_report_fitter(fitter), ""]
pnames = list(sorted(model.param_names))
maxlen = max(map(len, pnames))
result.extend("%s = %e" % (p.ljust(maxlen), getattr(model, p).value)
for p in pnames)
return "\n".join(result)
def fit(self, x, y, dy, constraints):
m, f = self._get_model_fitter(x, y, dy, constraints)
dy = self._sigma_to_weights(dy)
return f(m, x, y, weights=dy), f
def _get_model_fitter(self, x, y, dy, constraints):
if self.model_cls is None or self.fitting_cls is None:
raise NotImplementedError("Model or fitting class is unspecified.")
params = dict((k, v['value']) for k, v in constraints.items())
# update unset parameters with guesses from data
for k, v in self.parameter_guesses(x, y, dy).items():
if params[k] is not None or constraints[k]['fixed']:
continue
params[k] = v
m = self.model_cls(**params)
f = self.fitting_cls()
for param_name, constraint in constraints.items():
param = getattr(m, param_name)
if constraint['fixed']:
param.fixed = True
if constraint['limits']:
param.min, param.max = constraint['limits']
return m, f
def parameter_guesses(self, x, y, dy):
"""
Provide initial guesses for each model parameter.
**The base implementation does nothing, and should be overridden**
:param x: X - values of the data
:type x: :class:`numpy.ndarray`
:param y: Y - values of the data
:type y: :class:`numpy.ndarray`
:param dy: uncertainties on Y(assumed to be 1 sigma)
:type dy: :class:`numpy.ndarray`
:returns: A dict mapping ``{parameter_name: value guess}`` for each
parameter
"""
return {}
def _gaussian_parameter_estimates(x, y, dy):
amplitude = np.percentile(y, 95)
y = np.maximum(y / y.sum(), 0)
mean = (x * y).sum()
stddev = np.sqrt((y * (x - mean) ** 2).sum())
return dict(mean=mean, stddev=stddev, amplitude=amplitude)
class BasicGaussianFitter(BaseFitter1D):
"""
Fallback Gaussian fitter, for astropy < 0.3.
If :mod:`astropy.modeling` is installed, this class is replaced by
:class:`SimpleAstropyGaussianFitter`
"""
label = "Gaussian"
def _errorfunc(self, params, x, y, dy):
yp = self.eval(x, *params)
result = (yp - y)
if dy is not None:
result /= dy
return result
@staticmethod
def eval(x, amplitude, mean, stddev):
return np.exp(-(x - mean) ** 2 / (2 * stddev ** 2)) * amplitude
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
def
|
setattr(self, k, v)
|
conditional_block
|
|
fitters.py
|
of a fit.
:param fit_result: The output from fit
:param axes: The Matplotlib axes to add the fit to
:param x: The values of X at which to visualize the model
:returns: A list of matplotlib artists. **This is important:**
plots will not be properly cleared if this isn't provided
"""
y = self.predict(fit_result, x)
if normalize is not None:
y = normalize(y)
result = axes.plot(x, y, color,
lw=linewidth, alpha=alpha,
scalex=False, scaley=False)
return result
def _sigma_to_weights(self, dy):
if dy is not None:
return 1. / np.asarray(dy) ** 2
@property
def options(self):
"""
A dictionary of the current setting of each model hyperparameter.
Hyperparameters are defined in subclasses by creating class-level
:mod:`Option <glue.core.simpleforms>` attributes. This attribute
dict maps ``{hyperparameter_name: current_value}``
"""
result = []
for typ in type(self).mro():
result.extend(k for k, v in typ.__dict__.items()
if isinstance(v, Option))
return dict((o, getattr(self, o)) for o in result)
def summarize(self, fit_result, x, y, dy=None):
"""
Return a textual summary of the fit.
:param fit_result: The return value from :meth:`fit`
:param x: The x values passed to :meth:`fit`
:returns: A description of the fit result
:rtype: str
"""
return str(fit_result)
@property
def constraints(self):
"""
A dict of the constraints on each parameter in :attr:`param_names`.
Each value is itself a dict with 3 items:
:key value: The default value
:key fixed: True / False, indicating whether the parameter is fixed
:key bounds: [min, max] or None, indicating lower/upper limits
"""
result = {}
for p in self.param_names:
result[p] = dict(value=None, fixed=False, limits=None)
result[p].update(self._constraints.get(p, {}))
return result
def set_constraint(self, parameter_name, value=None,
fixed=None, limits=None):
"""
Update a constraint.
:param parameter_name: name of the parameter to update
:type parameter_name: str
:param value: Set the default value (optional)
:param limits: Set the limits to[min, max] (optional)
:param fixed: Set whether the parameter is fixed (optional)
"""
c = self._constraints.setdefault(parameter_name, {})
if value is not None:
c['value'] = value
if fixed is not None:
c['fixed'] = fixed
if limits is not None:
c['limits'] = limits
def build_and_fit(self, x, y, dy=None):
"""
Method which builds the arguments to fit, and calls that method
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if dy is not None:
dy = np.asarray(dy).ravel()
return self.fit(x, y, dy=dy,
constraints=self.constraints,
**self.options)
def fit(self, x, y, dy, constraints, **options):
"""
Fit the model to data.
*This must be overriden by a subclass.*
:param x: The x values of the data
:type x: :class:`numpy.ndarray`
:param y: The y values of the data
:type y: :class:`numpy.ndarray`
:param dy: 1 sigma uncertainties on each datum (optional)
:type dy: :class:`numpy.ndarray`
:param constraints: The current value of the ``constraints`` property
:param options: kwargs for model hyperparameters.
:returns: An object representing the fit result.
"""
raise NotImplementedError()
def predict(self, fit_result, x):
"""
Evaluate the model at a set of locations.
**This must be overridden in a subclass.**
:param fit_result: The result from the fit method
:param x: Locations to evaluate model at
:type x: :class:`numpy.ndarray`
:returns: model(x)
:rtype: :class:`numpy.ndarray`
"""
raise NotImplementedError()
class AstropyFitter1D(BaseFitter1D):
"""
A base class for wrapping :mod:`astropy.modeling`.
Subclasses must override :attr:`model_cls` :attr:`fitting_cls`
to point to the desired Astropy :mod:`model <astropy.modeling>`
and :mod:`fitter <astropy.modeling.fitting>` classes.
In addition, they should override :attr:`label` with a better label,
and :meth:`parameter_guesses` to generate initial guesses
"""
model_cls = None
"""class describing the model"""
fitting_cls = None
"""class to fit the model"""
label = "Base Astropy Fitter"
"""UI Label"""
@property
def param_names(self):
return self.model_cls.param_names
def predict(self, fit_result, x):
model, _ = fit_result
return model(x)
def summarize(self, fit_result, x, y, dy=None):
model, fitter = fit_result
result = [_report_fitter(fitter), ""]
pnames = list(sorted(model.param_names))
maxlen = max(map(len, pnames))
result.extend("%s = %e" % (p.ljust(maxlen), getattr(model, p).value)
for p in pnames)
return "\n".join(result)
def fit(self, x, y, dy, constraints):
m, f = self._get_model_fitter(x, y, dy, constraints)
dy = self._sigma_to_weights(dy)
return f(m, x, y, weights=dy), f
def _get_model_fitter(self, x, y, dy, constraints):
if self.model_cls is None or self.fitting_cls is None:
raise NotImplementedError("Model or fitting class is unspecified.")
params = dict((k, v['value']) for k, v in constraints.items())
# update unset parameters with guesses from data
for k, v in self.parameter_guesses(x, y, dy).items():
if params[k] is not None or constraints[k]['fixed']:
continue
params[k] = v
m = self.model_cls(**params)
f = self.fitting_cls()
for param_name, constraint in constraints.items():
param = getattr(m, param_name)
if constraint['fixed']:
param.fixed = True
if constraint['limits']:
param.min, param.max = constraint['limits']
return m, f
def parameter_guesses(self, x, y, dy):
|
def _gaussian_parameter_estimates(x, y, dy):
amplitude = np.percentile(y, 95)
y = np.maximum(y / y.sum(), 0)
mean = (x * y).sum()
stddev = np.sqrt((y * (x - mean) ** 2).sum())
return dict(mean=mean, stddev=stddev, amplitude=amplitude)
class BasicGaussianFitter(BaseFitter1D):
"""
Fallback Gaussian fitter, for astropy < 0.3.
If :mod:`astropy.modeling` is installed, this class is replaced by
:class:`SimpleAstropyGaussianFitter`
"""
label = "Gaussian"
def _errorfunc(self, params, x, y, dy):
yp = self.eval(x, *params)
result = (yp - y)
if dy is not None:
result /= dy
return result
@staticmethod
def eval(x, amplitude, mean, stddev):
return np.exp(-(x - mean) ** 2 / (2 * stddev ** 2)) * amplitude
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
def fit(self, x, y, dy, constraints):
from scipy import optimize
init_values = _gaussian_parameter_estimates(x, y, dy)
init_values =
|
"""
Provide initial guesses for each model parameter.
**The base implementation does nothing, and should be overridden**
:param x: X - values of the data
:type x: :class:`numpy.ndarray`
:param y: Y - values of the data
:type y: :class:`numpy.ndarray`
:param dy: uncertainties on Y(assumed to be 1 sigma)
:type dy: :class:`numpy.ndarray`
:returns: A dict mapping ``{parameter_name: value guess}`` for each
parameter
"""
return {}
|
identifier_body
|
kmp.py
|
if iter>=self.em_num_min_steps:
if LL[iter]-LL[iter-1]<self.em_max_diffLL or iter==self.em_num_max_steps-1:
print('EM converged after ',str(iter),' iterations.')
return
print('Max no. of iterations reached')
return
def computeGamma(self,data):
L = np.zeros((self.num_states,data.shape[1]))
for i in range(self.num_states):
L[i,:] = self.priors[i]*gaussPDF(data,self.mu[:,i],self.sigma[i,:,:])
L_axis0_sum = np.sum(L,axis=0)
gamma = np.divide(L, np.repeat(L_axis0_sum.reshape(1,L_axis0_sum.shape[0]), self.num_states, axis=0))
return L,gamma
#######################################################################################################################
class KMP: #Assumptions: Input is only time; All dofs of output are continuous TODO: Generalize
def __init__(self, input_dofs, robot_dofs, demo_dt, ndemos, data_address):
self.input_dofs = input_dofs
self.robot_dofs = robot_dofs
self.ndemos = ndemos
self.demo_dt = demo_dt
self.norm_data = self.loadData(addr=data_address) # Fetching the data from the saved location
# self.norm_data = self.normalizeData(self.training_data) # Making all demos have the same length
self.demo_duration = 200 * self.demo_dt #self.training_data[0].shape[0] * self.demo_dt
self.data = self.combineData(self.norm_data)
self.gmm_model = GMM(num_vars=(self.input_dofs+self.robot_dofs), data=self.data)
self.model_num_datapts = int(self.demo_duration/self.gmm_model.dt)
self.data_out, self.sigma_out, _ = self.GMR(np.array(range(1,self.model_num_datapts+1)) * self.gmm_model.dt)
####### DEBUGGING ##############
# plt.scatter(self.data[1,:],self.data[2,:])
# for i in range(self.gmm_model.num_states):
# plt.plot(self.gmm_model.mu[1,i],self.gmm_model.mu[2,i], 'ro')
# plt.show()
##################################
self.ref_traj = []
for i in range(self.model_num_datapts):
self.ref_traj.append(ReferenceTrajectoryPoint(t=(i+1)*self.gmm_model.dt, mu=self.data_out[:,i], sigma=self.sigma_out[i,:,:]))
####### DEBUGGING ##############
# ref_path = extractPath(self.ref_traj)
# print(ref_path)
# print(len(ref_path), " ", len(ref_path[0]))
# plt.scatter(ref_path[:, 0], ref_path[:, 1])
# plt.title('Reference Path')
# plt.show()
##################################
print('KMP Initialized with Reference Trajectory')
###################################
def loadData(self, addr):
data = []
for i in range(self.ndemos):
data.append(np.loadtxt(open(addr + str(i + 1) + ""), delimiter=","))
# data is saved as a list of nparrays [nparray(Demo1),nparray(Demo2),...]
return data
#########################
def normalizeData(self, data):
dofs = self.input_dofs + self.robot_dofs
dt = self.demo_dt
sum = 0
for i in range(len(data)):
sum += len(data[i])
mean = sum / len(data)
alpha = []
for i in range(len(data)):
alpha.append(len(data[i]) / mean)
alpha_mean = np.mean(alpha)
mean_t = int(mean)
# normalize the data so that all demos contain same number of data points
ndata = []
for i in range(len(alpha)):
demo_ndata = np.empty((0, dofs))
for j in range(mean_t): # Number of phase steps is same as number of time steps in nominal trajectory, because for nominal traj alpha is 1
z = j * alpha[i] * dt
corr_timestep = z / dt
whole = int(corr_timestep)
frac = corr_timestep - whole
row = []
for k in range(dofs):
if whole == (data[i].shape[0] - 1):
row.append(data[i][whole][k])
else:
row.append(data[i][whole][k] + frac * (data[i][whole + 1][k] - data[i][whole][k]))
demo_ndata = np.append(demo_ndata, [row], axis=0)
ndata.append(demo_ndata)
return ndata
############################
def combineData(self,data):
# total_time_steps = 0
# for i in range(len(data)):
# total_time_steps = total_time_steps + data[i].shape[0]
# time = np.array(range(total_time_steps)) * self.demo_dt
positions = data[0].T
for i in range(1,len(data)):
positions = np.append(positions,data[i].T,axis=1)
# data = np.vstack((time,positions))
return positions
#############################
def GMR(self, data_in_raw):
data_in = data_in_raw.reshape((1,data_in_raw.shape[0]))
num_datapts = data_in.shape[1]
num_varout = self.robot_dofs
diag_reg_factor = 1e-8
mu_tmp = np.zeros((num_varout, self.gmm_model.num_states))
exp_data = np.zeros((num_varout, num_datapts))
exp_sigma = np.zeros((num_datapts, num_varout, num_varout))
H = np.empty((self.gmm_model.num_states, num_datapts))
for t in range(num_datapts):
# Compute activation weights
for i in range(self.gmm_model.num_states):
H[i,t] = self.gmm_model.priors[i] * gaussPDF((data_in[:,t].reshape((-1,1))), self.gmm_model.mu[0:self.input_dofs,i], self.gmm_model.sigma[i,0:self.input_dofs,0:self.input_dofs])
# print(gaussPDF(data_in[:,t].reshape((-1,1)), self.gmm_model.mu[0:self.input_dofs,i], self.gmm_model.sigma[0:self.input_dofs,0:self.input_dofs,i]))
H[:,t] = H[:,t]/(sum(H[:,t]) + 1e-10)
# print(H)
# Compute conditional means
for i in range(self.gmm_model.num_states):
mu_tmp[:,i] = self.gmm_model.mu[self.input_dofs:(self.input_dofs+self.robot_dofs),i] + self.gmm_model.sigma[i,0:self.input_dofs,self.input_dofs:(self.input_dofs+self.robot_dofs)]/self.gmm_model.sigma[i,0:self.input_dofs,0:self.input_dofs] * (data_in[:,t].reshape((-1,1)) - self.gmm_model.mu[0:self.input_dofs,i])
exp_data[:,t] = exp_data[:,t] + H[i,t] * mu_tmp[:,i]
# print("Mu_tmp: ",mu_tmp[:,i])
# print(H[i,t] * mu_tmp[:,i])
# Compute conditional covariance
for i in range(self.gmm_model.num_states):
sigma_tmp = self.gmm_model.sigma[i,self.input_dofs:(self.input_dofs+self.robot_dofs),self.input_dofs:(self.input_dofs+self.robot_dofs)] - self.gmm_model.sigma[i,self.input_dofs:(self.input_dofs+self.robot_dofs),0:self.input_dofs]/self.gmm_model.sigma[i,0:self.input_dofs,0:self.input_dofs] * self.gmm_model.sigma[i,0:self.input_dofs,self.input_dofs:(self.input_dofs+self.robot_dofs)]
# print(sigma_tmp)
exp_sigma[t,:,:] = exp_sigma[t,:,:] + H[i,t] * (sigma_tmp + mu_tmp[:,i]*mu_tmp[:,i].T)
# print(exp_sigma[t,:,:])
exp_sigma[t,:,:] = exp_sigma[t,:,:] - exp_data[:,t] * exp_data[:,t].T + np.eye(num_varout) * diag_reg_factor
return exp_data, exp_sigma, H
def setParams(self, dt, lamda, kh):
self.dt = dt
self.len = int(self.demo_duration/dt)
self.lamda = lamda
self.kh = kh
##################################
def addViaPts(self, via_pts, via_pt_var):
# Search for closest point in ref trajectory
self.new_ref_traj = copy.deepcopy(self.ref_traj)
replace_ind = 0
num_phases = len(via_pts)
phase_size = len(self.ref_traj)/num_phases
for via_pt_ind,via_pt in enumerate(via_pts):
min_dist = float('Inf')
for i in range(math.ceil(via_pt_ind*phase_size), math.floor((via_pt_ind+1)*phase_size)):
dist = distBWPts(self.ref_traj[i].mu[0:2],via_pt)
# print("dist: ", dist)
if dist<min_dist:
|
min_dist = dist
# print("min_dist: ", min_dist)
replace_ind = i
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.